From 20e3c295aea5992a4d8ea8e7e45ed9c9d6639813 Mon Sep 17 00:00:00 2001 From: "Xiaochao Dong (@damnever)" Date: Fri, 24 Mar 2023 17:18:24 +0800 Subject: [PATCH 0001/1397] Optimize constant label pair adding with relabel.Replace Signed-off-by: Xiaochao Dong (@damnever) --- model/relabel/relabel.go | 15 +++++++++ model/relabel/relabel_test.go | 60 +++++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index d29c3d07ae..83b1a91474 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -267,6 +267,17 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return false } case Replace: + // Fast path to add or delete label pair. + if val == "" && cfg.Regex == DefaultRelabelConfig.Regex && + !varInRegexTemplate(cfg.TargetLabel) && !varInRegexTemplate(cfg.Replacement) { + if !model.LabelName(cfg.TargetLabel).IsValid() || cfg.Replacement == "" { + lb.Del(cfg.TargetLabel) + } else { + lb.Set(cfg.TargetLabel, cfg.Replacement) + } + break + } + indexes := cfg.Regex.FindStringSubmatchIndex(val) // If there is no match no replacement must take place. if indexes == nil { @@ -316,3 +327,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return true } + +func varInRegexTemplate(template string) bool { + return strings.Contains(template, "$") +} diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index 517b9b8223..d3815afe62 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -15,6 +15,7 @@ package relabel import ( "fmt" + "sort" "testing" "github.com/prometheus/common/model" @@ -850,3 +851,62 @@ func BenchmarkRelabel(b *testing.B) { }) } } + +func BenchmarkRelabel_ReplaceAddLabel(b *testing.B) { + cfgs := []*Config{} + for k, v := range map[string]string{ + "wwwwww": "wwwwww", + "xxxxxxxxx": "xxxxxxxxx", + "yyyyyyyyyyyy": "yyyyyyyyyyyy", + "${0}": "dropped", + "dropped": "${0}", + } { + cfgs = append(cfgs, &Config{ + Action: DefaultRelabelConfig.Action, + Separator: DefaultRelabelConfig.Separator, + Regex: DefaultRelabelConfig.Regex, + TargetLabel: k, + Replacement: v, + }) + } + expectLset := labels.Labels{ + labels.Label{Name: "abcdefg01", Value: "hijklmn1"}, + labels.Label{Name: "abcdefg02", Value: "hijklmn2"}, + labels.Label{Name: "abcdefg03", Value: "hijklmn3"}, + labels.Label{Name: "abcdefg04", Value: "hijklmn4"}, + labels.Label{Name: "abcdefg05", Value: "hijklmn5"}, + labels.Label{Name: "abcdefg06", Value: "hijklmn6"}, + labels.Label{Name: "abcdefg07", Value: "hijklmn7"}, + labels.Label{Name: "abcdefg08", Value: "hijklmn8"}, + labels.Label{Name: "abcdefg09", Value: "hijklmn9"}, + labels.Label{Name: "abcdefg10", Value: "hijklmn10"}, + labels.Label{Name: "abcdefg11", Value: "hijklmn11"}, + labels.Label{Name: "abcdefg12", Value: "hijklmn12"}, + labels.Label{Name: "abcdefg13", Value: "hijklmn13"}, + labels.Label{Name: "wwwwww", Value: "wwwwww"}, + labels.Label{Name: "xxxxxxxxx", Value: "xxxxxxxxx"}, + labels.Label{Name: "yyyyyyyyyyyy", Value: "yyyyyyyyyyyy"}, + } + sort.Sort(expectLset) + + for i := 0; i < b.N; i++ { + lset := labels.Labels{ + labels.Label{Name: "abcdefg01", Value: "hijklmn1"}, + labels.Label{Name: "abcdefg02", Value: "hijklmn2"}, + labels.Label{Name: "abcdefg03", Value: "hijklmn3"}, + labels.Label{Name: "abcdefg04", Value: "hijklmn4"}, + labels.Label{Name: "abcdefg05", Value: "hijklmn5"}, + labels.Label{Name: "abcdefg06", Value: "hijklmn6"}, + labels.Label{Name: "abcdefg07", Value: "hijklmn7"}, + labels.Label{Name: "abcdefg08", Value: "hijklmn8"}, + labels.Label{Name: "abcdefg09", Value: "hijklmn9"}, + labels.Label{Name: "abcdefg10", Value: "hijklmn10"}, + labels.Label{Name: "abcdefg11", Value: "hijklmn11"}, + labels.Label{Name: "abcdefg12", Value: "hijklmn12"}, + labels.Label{Name: "abcdefg13", Value: "hijklmn13"}, + } + actual, _ := Process(lset, cfgs...) + var _ = actual + // require.Equal(b, actual, expectLset) + } +} From 2d0d3333712e596ec2f1f4e4596fdb7ae0466e6f Mon Sep 17 00:00:00 2001 From: "Xiaochao Dong (@damnever)" Date: Sat, 25 Mar 2023 10:42:20 +0800 Subject: [PATCH 0002/1397] Fix lint issue Signed-off-by: Xiaochao Dong (@damnever) --- model/relabel/relabel_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index d3815afe62..bd51b9771e 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -905,8 +905,7 @@ func BenchmarkRelabel_ReplaceAddLabel(b *testing.B) { labels.Label{Name: "abcdefg12", Value: "hijklmn12"}, labels.Label{Name: "abcdefg13", Value: "hijklmn13"}, } - actual, _ := Process(lset, cfgs...) - var _ = actual + _, _ = Process(lset, cfgs...) // require.Equal(b, actual, expectLset) } } From 1601b2a79e0116f9d3a3e30915a6899e73c96feb Mon Sep 17 00:00:00 2001 From: "Xiaochao Dong (@damnever)" Date: Wed, 29 Mar 2023 11:20:59 +0800 Subject: [PATCH 0003/1397] check new line in target Signed-off-by: Xiaochao Dong (@damnever) --- model/relabel/relabel.go | 5 +++++ model/relabel/relabel_test.go | 6 ++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index 83b1a91474..7607138b5c 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -269,6 +269,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { case Replace: // Fast path to add or delete label pair. if val == "" && cfg.Regex == DefaultRelabelConfig.Regex && + !containsNewLine(cfg.TargetLabel) && !varInRegexTemplate(cfg.TargetLabel) && !varInRegexTemplate(cfg.Replacement) { if !model.LabelName(cfg.TargetLabel).IsValid() || cfg.Replacement == "" { lb.Del(cfg.TargetLabel) @@ -331,3 +332,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { func varInRegexTemplate(template string) bool { return strings.Contains(template, "$") } + +func containsNewLine(s string) bool { + return strings.Contains(s, "\r\n") || strings.Contains(s, "\n") +} diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index bd51b9771e..86844cf280 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -858,6 +858,8 @@ func BenchmarkRelabel_ReplaceAddLabel(b *testing.B) { "wwwwww": "wwwwww", "xxxxxxxxx": "xxxxxxxxx", "yyyyyyyyyyyy": "yyyyyyyyyyyy", + "new\nline1": "dropped", + "new\r\nline2": "dropped", "${0}": "dropped", "dropped": "${0}", } { @@ -905,7 +907,7 @@ func BenchmarkRelabel_ReplaceAddLabel(b *testing.B) { labels.Label{Name: "abcdefg12", Value: "hijklmn12"}, labels.Label{Name: "abcdefg13", Value: "hijklmn13"}, } - _, _ = Process(lset, cfgs...) - // require.Equal(b, actual, expectLset) + actual, _ := Process(lset, cfgs...) + require.Equal(b, actual, expectLset) } } From b3b5c0022e5c05088733519f104db2c8b5c54974 Mon Sep 17 00:00:00 2001 From: "Xiaochao Dong (@damnever)" Date: Mon, 25 Dec 2023 15:14:25 +0800 Subject: [PATCH 0004/1397] Simplify the logic as per the comments Signed-off-by: Xiaochao Dong (@damnever) --- model/relabel/relabel.go | 11 +---- model/relabel/relabel_test.go | 89 +++++++++++------------------------ 2 files changed, 29 insertions(+), 71 deletions(-) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index 7607138b5c..d169ed2f22 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -269,13 +269,8 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { case Replace: // Fast path to add or delete label pair. if val == "" && cfg.Regex == DefaultRelabelConfig.Regex && - !containsNewLine(cfg.TargetLabel) && !varInRegexTemplate(cfg.TargetLabel) && !varInRegexTemplate(cfg.Replacement) { - if !model.LabelName(cfg.TargetLabel).IsValid() || cfg.Replacement == "" { - lb.Del(cfg.TargetLabel) - } else { - lb.Set(cfg.TargetLabel, cfg.Replacement) - } + lb.Set(cfg.TargetLabel, cfg.Replacement) break } @@ -332,7 +327,3 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { func varInRegexTemplate(template string) bool { return strings.Contains(template, "$") } - -func containsNewLine(s string) bool { - return strings.Contains(s, "\r\n") || strings.Contains(s, "\n") -} diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index 86844cf280..7652798f55 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -15,7 +15,6 @@ package relabel import ( "fmt" - "sort" "testing" "github.com/prometheus/common/model" @@ -838,6 +837,34 @@ func BenchmarkRelabel(b *testing.B) { "__scrape_timeout__", "10s", "job", "kubernetes-pods"), }, + { + name: "static label pair", + config: ` + - replacement: wwwwww + target_label: wwwwww + - replacement: yyyyyyyyyyyy + target_label: xxxxxxxxx + - replacement: xxxxxxxxx + target_label: yyyyyyyyyyyy + - source_labels: ["something"] + target_label: with_source_labels + replacement: value + - replacement: dropped + target_label: ${0} + - replacement: ${0} + target_label: dropped`, + lbls: labels.FromStrings( + "abcdefg01", "hijklmn1", + "abcdefg02", "hijklmn2", + "abcdefg03", "hijklmn3", + "abcdefg04", "hijklmn4", + "abcdefg05", "hijklmn5", + "abcdefg06", "hijklmn6", + "abcdefg07", "hijklmn7", + "abcdefg08", "hijklmn8", + "job", "foo", + ), + }, } for i := range tests { err := yaml.UnmarshalStrict([]byte(tests[i].config), &tests[i].cfgs) @@ -851,63 +878,3 @@ func BenchmarkRelabel(b *testing.B) { }) } } - -func BenchmarkRelabel_ReplaceAddLabel(b *testing.B) { - cfgs := []*Config{} - for k, v := range map[string]string{ - "wwwwww": "wwwwww", - "xxxxxxxxx": "xxxxxxxxx", - "yyyyyyyyyyyy": "yyyyyyyyyyyy", - "new\nline1": "dropped", - "new\r\nline2": "dropped", - "${0}": "dropped", - "dropped": "${0}", - } { - cfgs = append(cfgs, &Config{ - Action: DefaultRelabelConfig.Action, - Separator: DefaultRelabelConfig.Separator, - Regex: DefaultRelabelConfig.Regex, - TargetLabel: k, - Replacement: v, - }) - } - expectLset := labels.Labels{ - labels.Label{Name: "abcdefg01", Value: "hijklmn1"}, - labels.Label{Name: "abcdefg02", Value: "hijklmn2"}, - labels.Label{Name: "abcdefg03", Value: "hijklmn3"}, - labels.Label{Name: "abcdefg04", Value: "hijklmn4"}, - labels.Label{Name: "abcdefg05", Value: "hijklmn5"}, - labels.Label{Name: "abcdefg06", Value: "hijklmn6"}, - labels.Label{Name: "abcdefg07", Value: "hijklmn7"}, - labels.Label{Name: "abcdefg08", Value: "hijklmn8"}, - labels.Label{Name: "abcdefg09", Value: "hijklmn9"}, - labels.Label{Name: "abcdefg10", Value: "hijklmn10"}, - labels.Label{Name: "abcdefg11", Value: "hijklmn11"}, - labels.Label{Name: "abcdefg12", Value: "hijklmn12"}, - labels.Label{Name: "abcdefg13", Value: "hijklmn13"}, - labels.Label{Name: "wwwwww", Value: "wwwwww"}, - labels.Label{Name: "xxxxxxxxx", Value: "xxxxxxxxx"}, - labels.Label{Name: "yyyyyyyyyyyy", Value: "yyyyyyyyyyyy"}, - } - sort.Sort(expectLset) - - for i := 0; i < b.N; i++ { - lset := labels.Labels{ - labels.Label{Name: "abcdefg01", Value: "hijklmn1"}, - labels.Label{Name: "abcdefg02", Value: "hijklmn2"}, - labels.Label{Name: "abcdefg03", Value: "hijklmn3"}, - labels.Label{Name: "abcdefg04", Value: "hijklmn4"}, - labels.Label{Name: "abcdefg05", Value: "hijklmn5"}, - labels.Label{Name: "abcdefg06", Value: "hijklmn6"}, - labels.Label{Name: "abcdefg07", Value: "hijklmn7"}, - labels.Label{Name: "abcdefg08", Value: "hijklmn8"}, - labels.Label{Name: "abcdefg09", Value: "hijklmn9"}, - labels.Label{Name: "abcdefg10", Value: "hijklmn10"}, - labels.Label{Name: "abcdefg11", Value: "hijklmn11"}, - labels.Label{Name: "abcdefg12", Value: "hijklmn12"}, - labels.Label{Name: "abcdefg13", Value: "hijklmn13"}, - } - actual, _ := Process(lset, cfgs...) - require.Equal(b, actual, expectLset) - } -} From 34230bb17293535b65a53e33cbaf626663a679c3 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 27 Nov 2023 17:20:27 +0000 Subject: [PATCH 0005/1397] tsdb/wlog: close segment files sooner 'defer' runs at the end of the whole function; we should close each segment file as soon as we finished reading it. Signed-off-by: Bryan Boreham --- tsdb/wlog/watcher.go | 5 +++-- tsdb/wlog/watcher_test.go | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index 1c76e38877..56cd0cc4e2 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -730,10 +730,11 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err if err != nil { return fmt.Errorf("unable to open segment: %w", err) } - defer sr.Close() r := NewLiveReader(w.logger, w.readerMetrics, sr) - if err := readFn(w, r, index, false); err != nil && !errors.Is(err, io.EOF) { + err = readFn(w, r, index, false) + sr.Close() + if err != nil && !errors.Is(err, io.EOF) { return fmt.Errorf("readSegment: %w", err) } diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index b30dce91a3..2686d3bc9a 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -218,11 +218,11 @@ func TestTailSamples(t *testing.T) { for i := first; i <= last; i++ { segment, err := OpenReadSegment(SegmentName(watcher.walDir, i)) require.NoError(t, err) - defer segment.Close() reader := NewLiveReader(nil, NewLiveReaderMetrics(nil), segment) // Use tail true so we can ensure we got the right number of samples. watcher.readSegment(reader, i, true) + require.NoError(t, segment.Close()) } expectedSeries := seriesCount From b5389192583bac5171716e6467a370f29bda3c43 Mon Sep 17 00:00:00 2001 From: tyltr Date: Wed, 31 Jan 2024 20:27:23 +0800 Subject: [PATCH 0006/1397] remove redundant code Signed-off-by: tyltr --- discovery/legacymanager/manager_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go index 6fbecabc2a..7675321689 100644 --- a/discovery/legacymanager/manager_test.go +++ b/discovery/legacymanager/manager_test.go @@ -1091,7 +1091,6 @@ func TestCoordinationWithReceiver(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.title, func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() From dfabda75072ac62dd0d2a381146b5a86936288c9 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Sat, 17 Feb 2024 12:17:38 +0100 Subject: [PATCH 0007/1397] Add scaffold for new Mantine-based UI Signed-off-by: Julius Volz --- web/ui/mantine-ui/.eslintrc.cjs | 18 + web/ui/mantine-ui/.gitignore | 24 + web/ui/mantine-ui/README.md | 30 + web/ui/mantine-ui/index.html | 13 + web/ui/mantine-ui/package-lock.json | 4029 ++++++++++++++++++ web/ui/mantine-ui/package.json | 33 + web/ui/mantine-ui/postcss.config.cjs | 14 + web/ui/mantine-ui/public/prometheus-logo.svg | 50 + web/ui/mantine-ui/src/App.tsx | 9 + web/ui/mantine-ui/src/index.css | 68 + web/ui/mantine-ui/src/main.tsx | 10 + web/ui/mantine-ui/src/vite-env.d.ts | 1 + web/ui/mantine-ui/tsconfig.json | 25 + web/ui/mantine-ui/tsconfig.node.json | 11 + web/ui/mantine-ui/vite.config.ts | 7 + 15 files changed, 4342 insertions(+) create mode 100644 web/ui/mantine-ui/.eslintrc.cjs create mode 100644 web/ui/mantine-ui/.gitignore create mode 100644 web/ui/mantine-ui/README.md create mode 100644 web/ui/mantine-ui/index.html create mode 100644 web/ui/mantine-ui/package-lock.json create mode 100644 web/ui/mantine-ui/package.json create mode 100644 web/ui/mantine-ui/postcss.config.cjs create mode 100644 web/ui/mantine-ui/public/prometheus-logo.svg create mode 100644 web/ui/mantine-ui/src/App.tsx create mode 100644 web/ui/mantine-ui/src/index.css create mode 100644 web/ui/mantine-ui/src/main.tsx create mode 100644 web/ui/mantine-ui/src/vite-env.d.ts create mode 100644 web/ui/mantine-ui/tsconfig.json create mode 100644 web/ui/mantine-ui/tsconfig.node.json create mode 100644 web/ui/mantine-ui/vite.config.ts diff --git a/web/ui/mantine-ui/.eslintrc.cjs b/web/ui/mantine-ui/.eslintrc.cjs new file mode 100644 index 0000000000..d6c9537953 --- /dev/null +++ b/web/ui/mantine-ui/.eslintrc.cjs @@ -0,0 +1,18 @@ +module.exports = { + root: true, + env: { browser: true, es2020: true }, + extends: [ + 'eslint:recommended', + 'plugin:@typescript-eslint/recommended', + 'plugin:react-hooks/recommended', + ], + ignorePatterns: ['dist', '.eslintrc.cjs'], + parser: '@typescript-eslint/parser', + plugins: ['react-refresh'], + rules: { + 'react-refresh/only-export-components': [ + 'warn', + { allowConstantExport: true }, + ], + }, +} diff --git a/web/ui/mantine-ui/.gitignore b/web/ui/mantine-ui/.gitignore new file mode 100644 index 0000000000..a547bf36d8 --- /dev/null +++ b/web/ui/mantine-ui/.gitignore @@ -0,0 +1,24 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/web/ui/mantine-ui/README.md b/web/ui/mantine-ui/README.md new file mode 100644 index 0000000000..0d6babeddb --- /dev/null +++ b/web/ui/mantine-ui/README.md @@ -0,0 +1,30 @@ +# React + TypeScript + Vite + +This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules. + +Currently, two official plugins are available: + +- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/README.md) uses [Babel](https://babeljs.io/) for Fast Refresh +- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh + +## Expanding the ESLint configuration + +If you are developing a production application, we recommend updating the configuration to enable type aware lint rules: + +- Configure the top-level `parserOptions` property like this: + +```js +export default { + // other rules... + parserOptions: { + ecmaVersion: 'latest', + sourceType: 'module', + project: ['./tsconfig.json', './tsconfig.node.json'], + tsconfigRootDir: __dirname, + }, +} +``` + +- Replace `plugin:@typescript-eslint/recommended` to `plugin:@typescript-eslint/recommended-type-checked` or `plugin:@typescript-eslint/strict-type-checked` +- Optionally add `plugin:@typescript-eslint/stylistic-type-checked` +- Install [eslint-plugin-react](https://github.com/jsx-eslint/eslint-plugin-react) and add `plugin:react/recommended` & `plugin:react/jsx-runtime` to the `extends` list diff --git a/web/ui/mantine-ui/index.html b/web/ui/mantine-ui/index.html new file mode 100644 index 0000000000..83b4f801ea --- /dev/null +++ b/web/ui/mantine-ui/index.html @@ -0,0 +1,13 @@ + + + + + + + Prometheus Server + + +
+ + + diff --git a/web/ui/mantine-ui/package-lock.json b/web/ui/mantine-ui/package-lock.json new file mode 100644 index 0000000000..47b72006f5 --- /dev/null +++ b/web/ui/mantine-ui/package-lock.json @@ -0,0 +1,4029 @@ +{ + "name": "mantine-ui", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "mantine-ui", + "version": "0.0.0", + "dependencies": { + "@mantine/core": "^7.5.3", + "@mantine/hooks": "^7.5.3", + "react": "^18.2.0", + "react-dom": "^18.2.0" + }, + "devDependencies": { + "@types/react": "^18.2.55", + "@types/react-dom": "^18.2.19", + "@typescript-eslint/eslint-plugin": "^6.21.0", + "@typescript-eslint/parser": "^6.21.0", + "@vitejs/plugin-react": "^4.2.1", + "eslint": "^8.56.0", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.4.5", + "postcss": "^8.4.35", + "postcss-preset-mantine": "^1.13.0", + "postcss-simple-vars": "^7.0.1", + "typescript": "^5.2.2", + "vite": "^5.1.0" + } + }, + "node_modules/@babel/core": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.9.tgz", + "integrity": "sha512-5q0175NOjddqpvvzU+kDiSOAk4PfdO6FvwCWoQ6RO7rTzEe8vlo+4HVfcnAREhD4npMs0e9uZypjTwzZPCf/cw==", + "dev": true, + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.23.5", + "@babel/generator": "^7.23.6", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helpers": "^7.23.9", + "@babel/parser": "^7.23.9", + "@babel/template": "^7.23.9", + "@babel/traverse": "^7.23.9", + "@babel/types": "^7.23.9", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/@ampproject/remapping": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", + "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/code-frame": { + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz", + "integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==", + "dev": true, + "dependencies": { + "@babel/highlight": "^7.23.4", + "chalk": "^2.4.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/compat-data": { + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.5.tgz", + "integrity": "sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/generator": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.6.tgz", + "integrity": "sha512-qrSfCYxYQB5owCmGLbl8XRpX1ytXlpueOb0N0UmQwA073KZxejgQTzAmJezxvpwQD9uGtK2shHdi55QT+MbjIw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.23.6", + "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/helper-compilation-targets": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz", + "integrity": "sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.23.5", + "@babel/helper-validator-option": "^7.23.5", + "browserslist": "^4.22.2", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/helper-environment-visitor": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/helper-function-name": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", + "dev": true, + "dependencies": { + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/helper-hoist-variables": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/helper-module-imports": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz", + "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/helper-module-transforms": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.3.tgz", + "integrity": "sha512-7bBs4ED9OmswdfDzpz4MpWgSrV7FXlc3zIagvLFjS5H+Mk7Snr21vQ6QwrsoCGMfNC4e4LQPdoULEt4ykz0SRQ==", + "dev": true, + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-module-imports": "^7.22.15", + "@babel/helper-simple-access": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/helper-validator-identifier": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/helper-simple-access": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", + "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/helper-split-export-declaration": { + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/helper-string-parser": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz", + "integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/helper-validator-identifier": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/helper-validator-option": { + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz", + "integrity": "sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/helpers": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.9.tgz", + "integrity": "sha512-87ICKgU5t5SzOT7sBMfCOZQ2rHjRU+Pcb9BoILMYz600W6DkVRLFBPwQ18gwUVvggqXivaUakpnxWQGbpywbBQ==", + "dev": true, + "dependencies": { + "@babel/template": "^7.23.9", + "@babel/traverse": "^7.23.9", + "@babel/types": "^7.23.9" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/highlight": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz", + "integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==", + "dev": true, + "dependencies": { + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/parser": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.9.tgz", + "integrity": "sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA==", + "dev": true, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/template": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.23.9.tgz", + "integrity": "sha512-+xrD2BWLpvHKNmX2QbpdpsBaWnRxahMwJjO+KZk2JOElj5nSmKezyS1B4u+QbHMTX69t4ukm6hh9lsYQ7GHCKA==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.23.5", + "@babel/parser": "^7.23.9", + "@babel/types": "^7.23.9" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/traverse": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.9.tgz", + "integrity": "sha512-I/4UJ9vs90OkBtY6iiiTORVMyIhJ4kAVmsKo9KFc8UOxMeUfi2hvtIBsET5u9GizXE6/GFSuKCTNfgCswuEjRg==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.23.5", + "@babel/generator": "^7.23.6", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.9", + "@babel/types": "^7.23.9", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@babel/types": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.9.tgz", + "integrity": "sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q==", + "dev": true, + "dependencies": { + "@babel/helper-string-parser": "^7.23.4", + "@babel/helper-validator-identifier": "^7.22.20", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/@jridgewell/gen-mapping": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", + "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "dev": true, + "dependencies": { + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/core/node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/core/node_modules/@jridgewell/set-array": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", + "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/core/node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", + "dev": true + }, + "node_modules/@babel/core/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.22", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.22.tgz", + "integrity": "sha512-Wf963MzWtA2sjrNt+g18IAln9lKnlRp+K2eH4jjIoF1wYeq3aMREpG09xhlhdzS0EjwU7qmUJYangWa+151vZw==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@babel/core/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/core/node_modules/browserslist": { + "version": "4.23.0", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.0.tgz", + "integrity": "sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "caniuse-lite": "^1.0.30001587", + "electron-to-chromium": "^1.4.668", + "node-releases": "^2.0.14", + "update-browserslist-db": "^1.0.13" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/@babel/core/node_modules/caniuse-lite": { + "version": "1.0.30001587", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001587.tgz", + "integrity": "sha512-HMFNotUmLXn71BQxg8cijvqxnIAofforZOwGsxyXJ0qugTdspUF4sPSJ2vhgprHCB996tIDzEq1ubumPDV8ULA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/@babel/core/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/core/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/core/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "node_modules/@babel/core/node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true + }, + "node_modules/@babel/core/node_modules/electron-to-chromium": { + "version": "1.4.673", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.673.tgz", + "integrity": "sha512-zjqzx4N7xGdl5468G+vcgzDhaHkaYgVcf9MqgexcTqsl2UHSCmOj/Bi3HAprg4BZCpC7HyD8a6nZl6QAZf72gw==", + "dev": true + }, + "node_modules/@babel/core/node_modules/escalade": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/@babel/core/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/core/node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/core/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/core/node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, + "node_modules/@babel/core/node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/core/node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@babel/core/node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/@babel/core/node_modules/node-releases": { + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", + "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==", + "dev": true + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/core/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/core/node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/core/node_modules/update-browserslist-db": { + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", + "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/@babel/core/node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.23.3.tgz", + "integrity": "sha512-qXRvbeKDSfwnlJnanVRp0SfuWE5DQhwQr5xtLBzp56Wabyo+4CMosF6Kfp+eOD/4FYpql64XVJ2W0pVLlJZxOQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self/node_modules/@babel/helper-plugin-utils": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", + "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.23.3.tgz", + "integrity": "sha512-91RS0MDnAWDNvGC6Wio5XYkyWI39FMFO+JK9+4AlgaTH+yWwVTsw7/sn6LK0lH7c5F+TFkpv/3LfCJ1Ydwof/g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source/node_modules/@babel/helper-plugin-utils": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", + "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.23.9.tgz", + "integrity": "sha512-0CX6F+BI2s9dkUqr08KFrAIZgNFj75rdBU/DjCyYLIaV/quFjkk6T+EJ2LkZHyZTbEV4L5p97mNkUsHl2wLFAw==", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.10.0", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.10.0.tgz", + "integrity": "sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==", + "dev": true, + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@floating-ui/core": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.0.tgz", + "integrity": "sha512-PcF++MykgmTj3CIyOQbKA/hDzOAiqI3mhuoN44WRCopIs1sgoDoU4oty4Jtqaj/y3oDU6fnVSm4QG0a3t5i0+g==", + "dependencies": { + "@floating-ui/utils": "^0.2.1" + } + }, + "node_modules/@floating-ui/dom": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.3.tgz", + "integrity": "sha512-RnDthu3mzPlQ31Ss/BTwQ1zjzIhr3lk1gZB1OC56h/1vEtaXkESrOqL5fQVMfXpwGtRwX+YsZBdyHtJMQnkArw==", + "dependencies": { + "@floating-ui/core": "^1.0.0", + "@floating-ui/utils": "^0.2.0" + } + }, + "node_modules/@floating-ui/react": { + "version": "0.24.8", + "resolved": "https://registry.npmjs.org/@floating-ui/react/-/react-0.24.8.tgz", + "integrity": "sha512-AuYeDoaR8jtUlUXtZ1IJ/6jtBkGnSpJXbGNzokBL87VDJ8opMq1Bgrc0szhK482ReQY6KZsMoZCVSb4xwalkBA==", + "dependencies": { + "@floating-ui/react-dom": "^2.0.1", + "aria-hidden": "^1.2.3", + "tabbable": "^6.0.1" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@floating-ui/react-dom": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.8.tgz", + "integrity": "sha512-HOdqOt3R3OGeTKidaLvJKcgg75S6tibQ3Tif4eyd91QnIJWr0NLvoXFpJA/j8HqkFSL68GDca9AuyWEHlhyClw==", + "dependencies": { + "@floating-ui/dom": "^1.6.1" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@floating-ui/utils": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.1.tgz", + "integrity": "sha512-9TANp6GPoMtYzQdt54kfAyMmz1+osLlXdg2ENroU7zzrtflTLrrC/lgrIfaSe+Wu0b89GKccT7vxXA0MoAIO+Q==" + }, + "node_modules/@mantine/core": { + "version": "7.5.3", + "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.5.3.tgz", + "integrity": "sha512-Wvv6DJXI+GX9mmKG5HITTh/24sCZ0RoYQHdTHh0tOfGnEy+RleyhA82UjnMsp0n2NjfCISBwbiKgfya6b2iaFw==", + "dependencies": { + "@floating-ui/react": "^0.24.8", + "clsx": "2.0.0", + "react-number-format": "^5.3.1", + "react-remove-scroll": "^2.5.7", + "react-textarea-autosize": "8.5.3", + "type-fest": "^3.13.1" + }, + "peerDependencies": { + "@mantine/hooks": "7.5.3", + "react": "^18.2.0", + "react-dom": "^18.2.0" + } + }, + "node_modules/@mantine/hooks": { + "version": "7.5.3", + "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.5.3.tgz", + "integrity": "sha512-mFI448mAs12v8FrgSVhytqlhTVrEjIfd/PqPEfwJu5YcZIq4YZdqpzJIUbANnRrFSvmoQpDb1PssdKx7Ds35hw==", + "peerDependencies": { + "react": "^18.2.0" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__core/node_modules/@babel/helper-string-parser": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz", + "integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@types/babel__core/node_modules/@babel/helper-validator-identifier": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@types/babel__core/node_modules/@babel/parser": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.9.tgz", + "integrity": "sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA==", + "dev": true, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@types/babel__core/node_modules/@babel/types": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.9.tgz", + "integrity": "sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q==", + "dev": true, + "dependencies": { + "@babel/helper-string-parser": "^7.23.4", + "@babel/helper-validator-identifier": "^7.22.20", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@types/babel__core/node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@types/babel__generator": { + "dev": true + }, + "node_modules/@types/babel__template": { + "dev": true + }, + "node_modules/@types/babel__traverse": { + "dev": true + }, + "node_modules/@types/prop-types": { + "devOptional": true + }, + "node_modules/@types/react": { + "version": "18.2.56", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.56.tgz", + "integrity": "sha512-NpwHDMkS/EFZF2dONFQHgkPRwhvgq/OAvIaGQzxGSBmaeR++kTg6njr15Vatz0/2VcCEwJQFi6Jf4Q0qBu0rLA==", + "devOptional": true, + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.2.19", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.19.tgz", + "integrity": "sha512-aZvQL6uUbIJpjZk4U8JZGbau9KDeAwMfmhyWorxgBkqDIEf6ROjRozcmPIicqsUwPUjbkDfHKgGee1Lq65APcA==", + "dev": true, + "dependencies": { + "@types/react": "*" + } + }, + "node_modules/@types/scheduler": { + "devOptional": true + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz", + "integrity": "sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==", + "dev": true, + "dependencies": { + "@eslint-community/regexpp": "^4.5.1", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/type-utils": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "graphemer": "^1.4.0", + "ignore": "^5.2.4", + "natural-compare": "^1.4.0", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha", + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.21.0.tgz", + "integrity": "sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", + "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", + "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "9.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/@typescript-eslint/parser/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/minimatch": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", + "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz", + "integrity": "sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/scope-manager/node_modules/@typescript-eslint/types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", + "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", + "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", + "dev": true, + "dependencies": { + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", + "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", + "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "9.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/@typescript-eslint/type-utils/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/minimatch": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", + "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", + "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@types/json-schema": "^7.0.12", + "@types/semver": "^7.5.0", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "semver": "^7.5.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true + }, + "node_modules/@typescript-eslint/utils/node_modules/@types/semver": { + "version": "7.5.7", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.7.tgz", + "integrity": "sha512-/wdoPq1QqkSj9/QOeKkFquEuPzQbHTWAMPH/PaUMB+JuR31lXhlWXRZ52IpfDYVlDOUBvX09uBrPwxGT1hjNBg==", + "dev": true + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", + "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", + "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "9.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils/node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/@typescript-eslint/utils/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/minimatch": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", + "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz", + "integrity": "sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/visitor-keys/node_modules/@typescript-eslint/types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", + "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@vitejs/plugin-react": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.2.1.tgz", + "integrity": "sha512-oojO9IDc4nCUUi8qIR11KoQm0XFFLIwsRBwHRR4d/88IWghn1y6ckz/bJ8GHDCsYEJee8mDzqtJxh15/cisJNQ==", + "dev": true, + "dependencies": { + "@babel/core": "^7.23.5", + "@babel/plugin-transform-react-jsx-self": "^7.23.3", + "@babel/plugin-transform-react-jsx-source": "^7.23.3", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.14.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0" + } + }, + "node_modules/aria-hidden": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.3.tgz", + "integrity": "sha512-xcLxITLe2HYa1cnYnwCjkOO1PqUHQpozB8x9AR0OgWN2woOBi5kSDVxKfd0b7sb1hw5qFeJhXm9H1nu3xSfLeQ==", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/clsx": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.0.0.tgz", + "integrity": "sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "devOptional": true + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/debug/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/detect-node-es": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", + "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==" + }, + "node_modules/eslint": { + "version": "8.56.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.56.0.tgz", + "integrity": "sha512-Go19xM6T9puCOWntie1/P997aXxFsOi37JIHRWI514Hc6ZnaHGKY9xFhrU65RT6CcBEzZoGG1e6Nq+DT04ZtZQ==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.56.0", + "@humanwhocodes/config-array": "^0.11.13", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz", + "integrity": "sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==", + "dev": true, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/eslint-plugin-react-refresh": { + "version": "0.4.5", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.5.tgz", + "integrity": "sha512-D53FYKJa+fDmZMtriODxvhwrO+IOqrxoEo21gMA0sjHdU6dPVH4OhyFip9ypl8HOF5RV5KdTo+rBQLvnY2cO8w==", + "dev": true, + "peerDependencies": { + "eslint": ">=7" + } + }, + "node_modules/eslint/node_modules/@aashutoshrathi/word-wrap": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz", + "integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eslint/node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/eslint/node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/@eslint/js": { + "version": "8.56.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.56.0.tgz", + "integrity": "sha512-gMsVel9D7f2HLkBma9VbtzZRehRogVRfbr++f06nL2vnCGCNlzOD+/MUov/F4p8myyAHspEhVobgjpX64q5m6A==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/eslint/node_modules/@humanwhocodes/config-array": { + "version": "0.11.14", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", + "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "dev": true, + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/eslint/node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/eslint/node_modules/@humanwhocodes/object-schema": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.2.tgz", + "integrity": "sha512-6EwiSjwWYP7pTckG6I5eyFANjPhmPjUX9JRLUSfNPC7FX7zK9gyZAfUEaECL6ALTpGX5AjnBq3C9XmVWPitNpw==", + "dev": true + }, + "node_modules/eslint/node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "dev": true + }, + "node_modules/eslint/node_modules/acorn": { + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/eslint/node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/eslint/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/eslint/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/eslint/node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true + }, + "node_modules/eslint/node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/eslint/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/eslint/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/eslint/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/eslint/node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "node_modules/eslint/node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/eslint/node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true + }, + "node_modules/eslint/node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/eslint/node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/esquery": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", + "dev": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/eslint/node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/eslint/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/eslint/node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eslint/node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true + }, + "node_modules/eslint/node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "node_modules/eslint/node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true + }, + "node_modules/eslint/node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/eslint/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/eslint/node_modules/flatted": { + "version": "3.2.9", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.9.tgz", + "integrity": "sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==", + "dev": true + }, + "node_modules/eslint/node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true + }, + "node_modules/eslint/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/eslint/node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/eslint/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dev": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/eslint/node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/eslint/node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "node_modules/eslint/node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/eslint/node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/eslint/node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, + "node_modules/eslint/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/eslint/node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true + }, + "node_modules/eslint/node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/eslint/node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/eslint/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/eslint/node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/eslint/node_modules/optionator": { + "version": "0.9.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz", + "integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==", + "dev": true, + "dependencies": { + "@aashutoshrathi/word-wrap": "^1.2.3", + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/eslint/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/eslint/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eslint/node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/eslint/node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/eslint/node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/eslint/node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dev": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/eslint/node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/eslint/node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true + }, + "node_modules/eslint/node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/eslint/node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/eslint/node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/eslint/node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true + }, + "node_modules/eslint/node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "dev": true, + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/get-nonce": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", + "integrity": "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true + }, + "node_modules/ignore": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/loose-envify/node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dev": true, + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "dev": true + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.4.35", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.35.tgz", + "integrity": "sha512-u5U8qYpBCpN13BsiEB0CbR1Hhh4Gc0zLFuedrHJKMctHCHAGrMdG0PRM/KErzAL3CU6/eckEtmHNB3x6e3c0vA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-js": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz", + "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==", + "dev": true, + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-mixins": { + "version": "9.0.4", + "resolved": "https://registry.npmjs.org/postcss-mixins/-/postcss-mixins-9.0.4.tgz", + "integrity": "sha512-XVq5jwQJDRu5M1XGkdpgASqLk37OqkH4JCFDXl/Dn7janOJjCTEKL+36cnRVy7bMtoBzALfO7bV7nTIsFnUWLA==", + "dev": true, + "dependencies": { + "fast-glob": "^3.2.11", + "postcss-js": "^4.0.0", + "postcss-simple-vars": "^7.0.0", + "sugarss": "^4.0.1" + }, + "engines": { + "node": ">=14.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-nested": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.1.tgz", + "integrity": "sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==", + "dev": true, + "dependencies": { + "postcss-selector-parser": "^6.0.11" + }, + "engines": { + "node": ">=12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-preset-mantine": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/postcss-preset-mantine/-/postcss-preset-mantine-1.13.0.tgz", + "integrity": "sha512-1bv/mQz2K+/FixIMxYd83BYH7PusDZaI7LpUtKbb1l/5N5w6t1p/V9ONHfRJeeAZyfa6Xc+AtR+95VKdFXRH1g==", + "dev": true, + "dependencies": { + "postcss-mixins": "^9.0.4", + "postcss-nested": "^6.0.1" + }, + "peerDependencies": { + "postcss": ">=8.0.0" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.0.15", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.15.tgz", + "integrity": "sha512-rEYkQOMUCEMhsKbK66tbEU9QVIxbhN18YiniAwA7XQYTVBqrBy+P2p5JcdqsHgKM2zWylp8d7J6eszocfds5Sw==", + "dev": true, + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-simple-vars": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/postcss-simple-vars/-/postcss-simple-vars-7.0.1.tgz", + "integrity": "sha512-5GLLXaS8qmzHMOjVxqkk1TZPf1jMqesiI7qLhnlyERalG0sMbHIbJqrcnrpmZdKCLglHnRHoEBB61RtGTsj++A==", + "dev": true, + "engines": { + "node": ">=14.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.2.1" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/react": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", + "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", + "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.0" + }, + "peerDependencies": { + "react": "^18.2.0" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" + }, + "node_modules/react-number-format": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/react-number-format/-/react-number-format-5.3.1.tgz", + "integrity": "sha512-qpYcQLauIeEhCZUZY9jXZnnroOtdy3jYaS1zQ3M1Sr6r/KMOBEIGNIb7eKT19g2N1wbYgFgvDzs19hw5TrB8XQ==", + "dependencies": { + "prop-types": "^15.7.2" + }, + "peerDependencies": { + "react": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-refresh": { + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.14.0.tgz", + "integrity": "sha512-wViHqhAd8OHeLS/IRMJjTSDHF3U9eWi62F/MledQGPdJGDhodXJ9PBLNGr6WWL7qlH12Mt3TyTpbS+hGXMjCzQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-remove-scroll": { + "version": "2.5.7", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.7.tgz", + "integrity": "sha512-FnrTWO4L7/Bhhf3CYBNArEG/yROV0tKmTv7/3h9QCFvH6sndeFf1wPqOcbFVu5VAulS5dV1wGT3GZZ/1GawqiA==", + "dependencies": { + "react-remove-scroll-bar": "^2.3.4", + "react-style-singleton": "^2.2.1", + "tslib": "^2.1.0", + "use-callback-ref": "^1.3.0", + "use-sidecar": "^1.1.2" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-remove-scroll-bar": { + "version": "2.3.4", + "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.4.tgz", + "integrity": "sha512-63C4YQBUt0m6ALadE9XV56hV8BgJWDmmTPY758iIJjfQKt2nYwoUrPk0LXRXcB/yIj82T1/Ixfdpdk68LwIB0A==", + "dependencies": { + "react-style-singleton": "^2.2.1", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-style-singleton": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.1.tgz", + "integrity": "sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==", + "dependencies": { + "get-nonce": "^1.0.0", + "invariant": "^2.2.4", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-textarea-autosize": { + "version": "8.5.3", + "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.3.tgz", + "integrity": "sha512-XT1024o2pqCuZSuBt9FwHlaDeNtVrtCXu0Rnz88t1jUGheCLa3PhjE1GH8Ctm2axEtvdCl5SUHYschyQ0L5QHQ==", + "dependencies": { + "@babel/runtime": "^7.20.13", + "use-composed-ref": "^1.3.0", + "use-latest": "^1.2.1" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true, + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/scheduler": { + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", + "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, + "node_modules/source-map-js": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", + "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sugarss": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/sugarss/-/sugarss-4.0.1.tgz", + "integrity": "sha512-WCjS5NfuVJjkQzK10s8WOBY+hhDxxNt/N6ZaGwxFZ+wN3/lKKFSaaKUNecULcTTvE4urLcKaZFQD8vO0mOZujw==", + "dev": true, + "engines": { + "node": ">=12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.3.3" + } + }, + "node_modules/tabbable": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz", + "integrity": "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==" + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-api-utils": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.2.1.tgz", + "integrity": "sha512-RIYA36cJn2WiH9Hy77hdF9r7oEwxAtB/TS9/S4Qd90Ap4z5FSiin5zEiTL44OII1Y3IIlEvxwxFUVgrHSZ/UpA==", + "dev": true, + "engines": { + "node": ">=16" + }, + "peerDependencies": { + "typescript": ">=4.2.0" + } + }, + "node_modules/tslib": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + }, + "node_modules/type-fest": { + "version": "3.13.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-3.13.1.tgz", + "integrity": "sha512-tLq3bSNx+xSpwvAJnzrK0Ep5CLNWjvFTOp71URMaAEWBfRb9nnJiBoUe0tF8bI4ZFO3omgBR6NvnbzVUT3Ly4g==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.3.3.tgz", + "integrity": "sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==", + "dev": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/use-callback-ref": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.1.tgz", + "integrity": "sha512-Lg4Vx1XZQauB42Hw3kK7JM6yjVjgFmFC5/Ab797s79aARomD2nEErc4mCgM8EZrARLmmbWpi5DGCadmK50DcAQ==", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-composed-ref": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/use-composed-ref/-/use-composed-ref-1.3.0.tgz", + "integrity": "sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/use-isomorphic-layout-effect": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz", + "integrity": "sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-latest": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/use-latest/-/use-latest-1.2.1.tgz", + "integrity": "sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw==", + "dependencies": { + "use-isomorphic-layout-effect": "^1.1.1" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sidecar": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.2.tgz", + "integrity": "sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==", + "dependencies": { + "detect-node-es": "^1.1.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.9.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true + }, + "node_modules/vite": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.1.3.tgz", + "integrity": "sha512-UfmUD36DKkqhi/F75RrxvPpry+9+tTkrXfMNZD+SboZqBCMsxKtO52XeGzzuh7ioz+Eo/SYDBbdb0Z7vgcDJew==", + "dev": true, + "dependencies": { + "esbuild": "^0.19.3", + "postcss": "^8.4.35", + "rollup": "^4.2.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vite/node_modules/@esbuild/aix-ppc64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.19.12.tgz", + "integrity": "sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/android-arm": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.19.12.tgz", + "integrity": "sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/android-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.19.12.tgz", + "integrity": "sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/android-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.19.12.tgz", + "integrity": "sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/darwin-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz", + "integrity": "sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/darwin-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.19.12.tgz", + "integrity": "sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/freebsd-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.12.tgz", + "integrity": "sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/freebsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.19.12.tgz", + "integrity": "sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-arm": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.19.12.tgz", + "integrity": "sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.19.12.tgz", + "integrity": "sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-ia32": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.19.12.tgz", + "integrity": "sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-loong64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.19.12.tgz", + "integrity": "sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA==", + "cpu": [ + "loong64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-mips64el": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.19.12.tgz", + "integrity": "sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w==", + "cpu": [ + "mips64el" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-ppc64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.19.12.tgz", + "integrity": "sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-riscv64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.19.12.tgz", + "integrity": "sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-s390x": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.19.12.tgz", + "integrity": "sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg==", + "cpu": [ + "s390x" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/linux-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.19.12.tgz", + "integrity": "sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/netbsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.19.12.tgz", + "integrity": "sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/openbsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.19.12.tgz", + "integrity": "sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/sunos-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.19.12.tgz", + "integrity": "sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.19.12.tgz", + "integrity": "sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-ia32": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.19.12.tgz", + "integrity": "sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@esbuild/win32-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.19.12.tgz", + "integrity": "sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite/node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.12.0.tgz", + "integrity": "sha512-+ac02NL/2TCKRrJu2wffk1kZ+RyqxVUlbjSagNgPm94frxtr+XDL12E5Ll1enWskLrtrZ2r8L3wED1orIibV/w==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/vite/node_modules/@rollup/rollup-android-arm64": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.12.0.tgz", + "integrity": "sha512-OBqcX2BMe6nvjQ0Nyp7cC90cnumt8PXmO7Dp3gfAju/6YwG0Tj74z1vKrfRz7qAv23nBcYM8BCbhrsWqO7PzQQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/vite/node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.12.0.tgz", + "integrity": "sha512-X64tZd8dRE/QTrBIEs63kaOBG0b5GVEd3ccoLtyf6IdXtHdh8h+I56C2yC3PtC9Ucnv0CpNFJLqKFVgCYe0lOQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/vite/node_modules/@rollup/rollup-darwin-x64": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.12.0.tgz", + "integrity": "sha512-cc71KUZoVbUJmGP2cOuiZ9HSOP14AzBAThn3OU+9LcA1+IUqswJyR1cAJj3Mg55HbjZP6OLAIscbQsQLrpgTOg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/vite/node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.12.0.tgz", + "integrity": "sha512-a6w/Y3hyyO6GlpKL2xJ4IOh/7d+APaqLYdMf86xnczU3nurFTaVN9s9jOXQg97BE4nYm/7Ga51rjec5nfRdrvA==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/vite/node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.12.0.tgz", + "integrity": "sha512-0fZBq27b+D7Ar5CQMofVN8sggOVhEtzFUwOwPppQt0k+VR+7UHMZZY4y+64WJ06XOhBTKXtQB/Sv0NwQMXyNAA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/vite/node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.12.0.tgz", + "integrity": "sha512-eTvzUS3hhhlgeAv6bfigekzWZjaEX9xP9HhxB0Dvrdbkk5w/b+1Sxct2ZuDxNJKzsRStSq1EaEkVSEe7A7ipgQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/vite/node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.12.0.tgz", + "integrity": "sha512-ix+qAB9qmrCRiaO71VFfY8rkiAZJL8zQRXveS27HS+pKdjwUfEhqo2+YF2oI+H/22Xsiski+qqwIBxVewLK7sw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/vite/node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.12.0.tgz", + "integrity": "sha512-TenQhZVOtw/3qKOPa7d+QgkeM6xY0LtwzR8OplmyL5LrgTWIXpTQg2Q2ycBf8jm+SFW2Wt/DTn1gf7nFp3ssVA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/vite/node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.12.0.tgz", + "integrity": "sha512-LfFdRhNnW0zdMvdCb5FNuWlls2WbbSridJvxOvYWgSBOYZtgBfW9UGNJG//rwMqTX1xQE9BAodvMH9tAusKDUw==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/vite/node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.12.0.tgz", + "integrity": "sha512-JPDxovheWNp6d7AHCgsUlkuCKvtu3RB55iNEkaQcf0ttsDU/JZF+iQnYcQJSk/7PtT4mjjVG8N1kpwnI9SLYaw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/vite/node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.12.0.tgz", + "integrity": "sha512-fjtuvMWRGJn1oZacG8IPnzIV6GF2/XG+h71FKn76OYFqySXInJtseAqdprVTDTyqPxQOG9Exak5/E9Z3+EJ8ZA==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/vite/node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.12.0.tgz", + "integrity": "sha512-ZYmr5mS2wd4Dew/JjT0Fqi2NPB/ZhZ2VvPp7SmvPZb4Y1CG/LRcS6tcRo2cYU7zLK5A7cdbhWnnWmUjoI4qapg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/vite/node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==", + "dev": true + }, + "node_modules/vite/node_modules/esbuild": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.12.tgz", + "integrity": "sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg==", + "dev": true, + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.19.12", + "@esbuild/android-arm": "0.19.12", + "@esbuild/android-arm64": "0.19.12", + "@esbuild/android-x64": "0.19.12", + "@esbuild/darwin-arm64": "0.19.12", + "@esbuild/darwin-x64": "0.19.12", + "@esbuild/freebsd-arm64": "0.19.12", + "@esbuild/freebsd-x64": "0.19.12", + "@esbuild/linux-arm": "0.19.12", + "@esbuild/linux-arm64": "0.19.12", + "@esbuild/linux-ia32": "0.19.12", + "@esbuild/linux-loong64": "0.19.12", + "@esbuild/linux-mips64el": "0.19.12", + "@esbuild/linux-ppc64": "0.19.12", + "@esbuild/linux-riscv64": "0.19.12", + "@esbuild/linux-s390x": "0.19.12", + "@esbuild/linux-x64": "0.19.12", + "@esbuild/netbsd-x64": "0.19.12", + "@esbuild/openbsd-x64": "0.19.12", + "@esbuild/sunos-x64": "0.19.12", + "@esbuild/win32-arm64": "0.19.12", + "@esbuild/win32-ia32": "0.19.12", + "@esbuild/win32-x64": "0.19.12" + } + }, + "node_modules/vite/node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/vite/node_modules/rollup": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.12.0.tgz", + "integrity": "sha512-wz66wn4t1OHIJw3+XU7mJJQV/2NAfw5OAk6G6Hoo3zcvz/XOfQ52Vgi+AN4Uxoxi0KBBwk2g8zPrTDA4btSB/Q==", + "dev": true, + "dependencies": { + "@types/estree": "1.0.5" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.12.0", + "@rollup/rollup-android-arm64": "4.12.0", + "@rollup/rollup-darwin-arm64": "4.12.0", + "@rollup/rollup-darwin-x64": "4.12.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.12.0", + "@rollup/rollup-linux-arm64-gnu": "4.12.0", + "@rollup/rollup-linux-arm64-musl": "4.12.0", + "@rollup/rollup-linux-riscv64-gnu": "4.12.0", + "@rollup/rollup-linux-x64-gnu": "4.12.0", + "@rollup/rollup-linux-x64-musl": "4.12.0", + "@rollup/rollup-win32-arm64-msvc": "4.12.0", + "@rollup/rollup-win32-ia32-msvc": "4.12.0", + "@rollup/rollup-win32-x64-msvc": "4.12.0", + "fsevents": "~2.3.2" + } + } + } +} diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json new file mode 100644 index 0000000000..fe4d234498 --- /dev/null +++ b/web/ui/mantine-ui/package.json @@ -0,0 +1,33 @@ +{ + "name": "mantine-ui", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", + "preview": "vite preview" + }, + "dependencies": { + "@mantine/core": "^7.5.3", + "@mantine/hooks": "^7.5.3", + "react": "^18.2.0", + "react-dom": "^18.2.0" + }, + "devDependencies": { + "@types/react": "^18.2.55", + "@types/react-dom": "^18.2.19", + "@typescript-eslint/eslint-plugin": "^6.21.0", + "@typescript-eslint/parser": "^6.21.0", + "@vitejs/plugin-react": "^4.2.1", + "eslint": "^8.56.0", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.4.5", + "postcss": "^8.4.35", + "postcss-preset-mantine": "^1.13.0", + "postcss-simple-vars": "^7.0.1", + "typescript": "^5.2.2", + "vite": "^5.1.0" + } +} diff --git a/web/ui/mantine-ui/postcss.config.cjs b/web/ui/mantine-ui/postcss.config.cjs new file mode 100644 index 0000000000..bfba0ddfae --- /dev/null +++ b/web/ui/mantine-ui/postcss.config.cjs @@ -0,0 +1,14 @@ +module.exports = { + plugins: { + 'postcss-preset-mantine': {}, + 'postcss-simple-vars': { + variables: { + 'mantine-breakpoint-xs': '36em', + 'mantine-breakpoint-sm': '48em', + 'mantine-breakpoint-md': '62em', + 'mantine-breakpoint-lg': '75em', + 'mantine-breakpoint-xl': '88em', + }, + }, + }, +}; diff --git a/web/ui/mantine-ui/public/prometheus-logo.svg b/web/ui/mantine-ui/public/prometheus-logo.svg new file mode 100644 index 0000000000..5c51f66d90 --- /dev/null +++ b/web/ui/mantine-ui/public/prometheus-logo.svg @@ -0,0 +1,50 @@ + + + +image/svg+xml \ No newline at end of file diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx new file mode 100644 index 0000000000..00e16b11b8 --- /dev/null +++ b/web/ui/mantine-ui/src/App.tsx @@ -0,0 +1,9 @@ +import "@mantine/core/styles.css"; + +import { MantineProvider } from "@mantine/core"; + +function App() { + return ; +} + +export default App; diff --git a/web/ui/mantine-ui/src/index.css b/web/ui/mantine-ui/src/index.css new file mode 100644 index 0000000000..6119ad9a8f --- /dev/null +++ b/web/ui/mantine-ui/src/index.css @@ -0,0 +1,68 @@ +:root { + font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif; + line-height: 1.5; + font-weight: 400; + + color-scheme: light dark; + color: rgba(255, 255, 255, 0.87); + background-color: #242424; + + font-synthesis: none; + text-rendering: optimizeLegibility; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} + +a { + font-weight: 500; + color: #646cff; + text-decoration: inherit; +} +a:hover { + color: #535bf2; +} + +body { + margin: 0; + display: flex; + place-items: center; + min-width: 320px; + min-height: 100vh; +} + +h1 { + font-size: 3.2em; + line-height: 1.1; +} + +button { + border-radius: 8px; + border: 1px solid transparent; + padding: 0.6em 1.2em; + font-size: 1em; + font-weight: 500; + font-family: inherit; + background-color: #1a1a1a; + cursor: pointer; + transition: border-color 0.25s; +} +button:hover { + border-color: #646cff; +} +button:focus, +button:focus-visible { + outline: 4px auto -webkit-focus-ring-color; +} + +@media (prefers-color-scheme: light) { + :root { + color: #213547; + background-color: #ffffff; + } + a:hover { + color: #747bff; + } + button { + background-color: #f9f9f9; + } +} diff --git a/web/ui/mantine-ui/src/main.tsx b/web/ui/mantine-ui/src/main.tsx new file mode 100644 index 0000000000..3d7150da80 --- /dev/null +++ b/web/ui/mantine-ui/src/main.tsx @@ -0,0 +1,10 @@ +import React from 'react' +import ReactDOM from 'react-dom/client' +import App from './App.tsx' +import './index.css' + +ReactDOM.createRoot(document.getElementById('root')!).render( + + + , +) diff --git a/web/ui/mantine-ui/src/vite-env.d.ts b/web/ui/mantine-ui/src/vite-env.d.ts new file mode 100644 index 0000000000..11f02fe2a0 --- /dev/null +++ b/web/ui/mantine-ui/src/vite-env.d.ts @@ -0,0 +1 @@ +/// diff --git a/web/ui/mantine-ui/tsconfig.json b/web/ui/mantine-ui/tsconfig.json new file mode 100644 index 0000000000..a7fc6fbf23 --- /dev/null +++ b/web/ui/mantine-ui/tsconfig.json @@ -0,0 +1,25 @@ +{ + "compilerOptions": { + "target": "ES2020", + "useDefineForClassFields": true, + "lib": ["ES2020", "DOM", "DOM.Iterable"], + "module": "ESNext", + "skipLibCheck": true, + + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "react-jsx", + + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": ["src"], + "references": [{ "path": "./tsconfig.node.json" }] +} diff --git a/web/ui/mantine-ui/tsconfig.node.json b/web/ui/mantine-ui/tsconfig.node.json new file mode 100644 index 0000000000..97ede7ee6f --- /dev/null +++ b/web/ui/mantine-ui/tsconfig.node.json @@ -0,0 +1,11 @@ +{ + "compilerOptions": { + "composite": true, + "skipLibCheck": true, + "module": "ESNext", + "moduleResolution": "bundler", + "allowSyntheticDefaultImports": true, + "strict": true + }, + "include": ["vite.config.ts"] +} diff --git a/web/ui/mantine-ui/vite.config.ts b/web/ui/mantine-ui/vite.config.ts new file mode 100644 index 0000000000..5a33944a9b --- /dev/null +++ b/web/ui/mantine-ui/vite.config.ts @@ -0,0 +1,7 @@ +import { defineConfig } from 'vite' +import react from '@vitejs/plugin-react' + +// https://vitejs.dev/config/ +export default defineConfig({ + plugins: [react()], +}) From d6a347e5ded303445331e32f0636379fe255da7d Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 21 Feb 2024 11:13:48 +0100 Subject: [PATCH 0008/1397] Implement several status pages and other general aspects Signed-off-by: Julius Volz --- web/ui/mantine-ui/index.html | 24 +- web/ui/mantine-ui/package.json | 18 +- web/ui/mantine-ui/src/App.module.css | 32 ++ web/ui/mantine-ui/src/App.tsx | 300 +++++++++++++++++- web/ui/mantine-ui/src/api/api.ts | 32 ++ .../src/api/response-types/rules.ts | 51 +++ .../src/api/response-types/tsdb-status.ts | 20 ++ web/ui/mantine-ui/src/codebox.module.css | 45 +++ web/ui/mantine-ui/src/codemirror/theme.ts | 295 +++++++++++++++++ web/ui/mantine-ui/src/error-boundary.tsx | 52 +++ .../mantine-ui/src/images/prometheus-logo.svg | 19 ++ web/ui/mantine-ui/src/lib/time-format.ts | 118 +++++++ web/ui/mantine-ui/src/main.tsx | 35 +- web/ui/mantine-ui/src/pages/agent.tsx | 27 ++ web/ui/mantine-ui/src/pages/alerts.tsx | 3 + web/ui/mantine-ui/src/pages/config.tsx | 16 + web/ui/mantine-ui/src/pages/flags.module.css | 21 ++ web/ui/mantine-ui/src/pages/flags.tsx | 183 +++++++++++ web/ui/mantine-ui/src/pages/graph.tsx | 3 + web/ui/mantine-ui/src/pages/rules.tsx | 257 +++++++++++++++ .../src/pages/service-discovery.tsx | 3 + web/ui/mantine-ui/src/pages/status.tsx | 82 +++++ web/ui/mantine-ui/src/pages/targets.tsx | 3 + web/ui/mantine-ui/src/pages/tsdb-status.tsx | 101 ++++++ web/ui/mantine-ui/src/settings.ts | 13 + web/ui/mantine-ui/src/theme-selector.tsx | 67 ++++ web/ui/mantine-ui/vite.config.ts | 13 +- 27 files changed, 1818 insertions(+), 15 deletions(-) create mode 100644 web/ui/mantine-ui/src/App.module.css create mode 100644 web/ui/mantine-ui/src/api/api.ts create mode 100644 web/ui/mantine-ui/src/api/response-types/rules.ts create mode 100644 web/ui/mantine-ui/src/api/response-types/tsdb-status.ts create mode 100644 web/ui/mantine-ui/src/codebox.module.css create mode 100644 web/ui/mantine-ui/src/codemirror/theme.ts create mode 100644 web/ui/mantine-ui/src/error-boundary.tsx create mode 100644 web/ui/mantine-ui/src/images/prometheus-logo.svg create mode 100644 web/ui/mantine-ui/src/lib/time-format.ts create mode 100644 web/ui/mantine-ui/src/pages/agent.tsx create mode 100644 web/ui/mantine-ui/src/pages/alerts.tsx create mode 100644 web/ui/mantine-ui/src/pages/config.tsx create mode 100644 web/ui/mantine-ui/src/pages/flags.module.css create mode 100644 web/ui/mantine-ui/src/pages/flags.tsx create mode 100644 web/ui/mantine-ui/src/pages/graph.tsx create mode 100644 web/ui/mantine-ui/src/pages/rules.tsx create mode 100644 web/ui/mantine-ui/src/pages/service-discovery.tsx create mode 100644 web/ui/mantine-ui/src/pages/status.tsx create mode 100644 web/ui/mantine-ui/src/pages/targets.tsx create mode 100644 web/ui/mantine-ui/src/pages/tsdb-status.tsx create mode 100644 web/ui/mantine-ui/src/settings.ts create mode 100644 web/ui/mantine-ui/src/theme-selector.tsx diff --git a/web/ui/mantine-ui/index.html b/web/ui/mantine-ui/index.html index 83b4f801ea..f54cf951c0 100644 --- a/web/ui/mantine-ui/index.html +++ b/web/ui/mantine-ui/index.html @@ -4,7 +4,29 @@ - Prometheus Server + + + + + + TITLE_PLACEHOLDER
diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index fe4d234498..221a6c36bd 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -10,12 +10,28 @@ "preview": "vite preview" }, "dependencies": { + "@codemirror/autocomplete": "^6.12.0", + "@codemirror/language": "^6.10.1", + "@codemirror/lint": "^6.5.0", + "@codemirror/state": "^6.4.0", + "@codemirror/view": "^6.24.0", + "@lezer/common": "^1.2.1", + "@lezer/highlight": "^1.2.0", + "@mantine/code-highlight": "^7.5.3", "@mantine/core": "^7.5.3", + "@mantine/dates": "^7.5.3", "@mantine/hooks": "^7.5.3", + "@prometheus-io/codemirror-promql": "^0.50.0-rc.1", + "@tabler/icons-react": "^2.47.0", + "@tanstack/react-query": "^5.22.2", + "@uiw/react-codemirror": "^4.21.22", + "dayjs": "^1.11.10", "react": "^18.2.0", - "react-dom": "^18.2.0" + "react-dom": "^18.2.0", + "react-router-dom": "^6.22.1" }, "devDependencies": { + "@types/eslint": "~8.56.2", "@types/react": "^18.2.55", "@types/react-dom": "^18.2.19", "@typescript-eslint/eslint-plugin": "^6.21.0", diff --git a/web/ui/mantine-ui/src/App.module.css b/web/ui/mantine-ui/src/App.module.css new file mode 100644 index 0000000000..82c11a4ff2 --- /dev/null +++ b/web/ui/mantine-ui/src/App.module.css @@ -0,0 +1,32 @@ +.control { + display: block; + padding: var(--mantine-spacing-xs) var(--mantine-spacing-md); + border-radius: var(--mantine-radius-md); + font-weight: 500; + + @mixin hover { + background-color: var(--mantine-color-gray-8); + } +} + +.link { + display: block; + line-height: 1; + padding: rem(8px) rem(12px); + border-radius: var(--mantine-radius-sm); + text-decoration: none; + color: var(--mantine-color-gray-0); + font-size: var(--mantine-font-size-sm); + font-weight: 500; + background-color: transparent; + + @mixin hover { + background-color: var(--mantine-color-gray-6); + color: var(--mantine-color-gray-0); + } + + [data-mantine-color-scheme] &[aria-current="page"] { + background-color: var(--mantine-color-blue-filled); + color: var(--mantine-color-white); + } +} diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index 00e16b11b8..4c7732bb53 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -1,9 +1,305 @@ import "@mantine/core/styles.css"; +import "@mantine/code-highlight/styles.css"; +import classes from "./App.module.css"; +import PrometheusLogo from "./images/prometheus-logo.svg"; -import { MantineProvider } from "@mantine/core"; +import { + AppShell, + Burger, + Button, + Group, + MantineProvider, + Menu, + Skeleton, + Text, + createTheme, + rem, +} from "@mantine/core"; +import { useDisclosure } from "@mantine/hooks"; +import { + IconBellFilled, + IconChartAreaFilled, + IconChevronDown, + IconChevronRight, + IconCloudDataConnection, + IconDatabase, + IconFileAnalytics, + IconFlag, + IconHeartRateMonitor, + IconHelp, + IconInfoCircle, + IconServerCog, +} from "@tabler/icons-react"; +import { + BrowserRouter, + NavLink, + Navigate, + Route, + Routes, +} from "react-router-dom"; +import Graph from "./pages/graph"; +import Alerts from "./pages/alerts"; +import { IconTable } from "@tabler/icons-react"; +import { QueryClient, QueryClientProvider } from "@tanstack/react-query"; +// import { ReactQueryDevtools } from "react-query/devtools"; +import Rules from "./pages/rules"; +import Targets from "./pages/targets"; +import ServiceDiscovery from "./pages/service-discovery"; +import Status from "./pages/status"; +import TSDBStatus from "./pages/tsdb-status"; +import Flags from "./pages/flags"; +import Config from "./pages/config"; +import { Suspense, useContext } from "react"; +import ErrorBoundary from "./error-boundary"; +import { ThemeSelector } from "./theme-selector"; +import { SettingsContext } from "./settings"; +import Agent from "./pages/agent"; + +const queryClient = new QueryClient(); + +const monitoringStatusPages = [ + { + title: "Targets", + path: "/targets", + icon: , + element: , + }, + { + title: "Rules", + path: "/rules", + icon: , + element: , + }, + { + title: "Service discovery", + path: "/service-discovery", + icon: ( + + ), + element: , + }, +]; + +const serverStatusPages = [ + { + title: "Runtime & build information", + path: "/status", + icon: , + element: , + }, + { + title: "TSDB status", + path: "/tsdb-status", + icon: , + element: , + }, + { + title: "Command-line flags", + path: "/flags", + icon: , + element: , + }, + { + title: "Configuration", + path: "/config", + icon: , + element: , + }, +]; + +const allStatusPages = [...monitoringStatusPages, ...serverStatusPages]; + +const theme = createTheme({ + colors: { + "codebox-bg": [ + "#f5f5f5", + "#e7e7e7", + "#cdcdcd", + "#b2b2b2", + "#9a9a9a", + "#8b8b8b", + "#848484", + "#717171", + "#656565", + "#575757", + ], + }, +}); function App() { - return ; + const [opened, { toggle }] = useDisclosure(); + const { agentMode } = useContext(SettingsContext); + + const navLinks = ( + <> + + + + + + {allStatusPages.map((p) => ( + + + + } + /> + ))} + + + + } + /> + + + + Monitoring status + {monitoringStatusPages.map((p) => ( + + {p.title} + + ))} + + + Server status + {serverStatusPages.map((p) => ( + + {p.title} + + ))} + + + + + + ); + + return ( + + + + + + + + + + Prometheus{agentMode && " Agent"} + + + {navLinks} + + + + {} + + + + + {navLinks} + + + + + ( + + ))} + > + + + } + /> + } /> + } /> + } /> + {allStatusPages.map((p) => ( + + ))} + + + + + + {/* */} + + + + ); } export default App; diff --git a/web/ui/mantine-ui/src/api/api.ts b/web/ui/mantine-ui/src/api/api.ts new file mode 100644 index 0000000000..48576ee90f --- /dev/null +++ b/web/ui/mantine-ui/src/api/api.ts @@ -0,0 +1,32 @@ +import { useSuspenseQuery } from "@tanstack/react-query"; + +export const API_PATH = "api/v1"; + +export type APIResponse = { status: string; data: T }; + +export const useSuspenseAPIQuery = (path: string) => + useSuspenseQuery<{ data: T }>({ + queryKey: [path], + retry: false, + refetchOnWindowFocus: false, + gcTime: 0, + queryFn: () => + fetch(`/${API_PATH}/${path}`, { + cache: "no-store", + credentials: "same-origin", + }) + // Introduce 3 seconds delay to simulate slow network. + // .then( + // (res) => + // new Promise((resolve) => + // setTimeout(() => resolve(res), 2000) + // ) + // ) + .then((res) => { + if (!res.ok) { + throw new Error(res.statusText); + } + return res; + }) + .then((res) => res.json() as Promise>), + }); diff --git a/web/ui/mantine-ui/src/api/response-types/rules.ts b/web/ui/mantine-ui/src/api/response-types/rules.ts new file mode 100644 index 0000000000..eba15a6169 --- /dev/null +++ b/web/ui/mantine-ui/src/api/response-types/rules.ts @@ -0,0 +1,51 @@ +type RuleState = "pending" | "firing" | "inactive"; + +export interface Alert { + labels: Record; + state: RuleState; + value: string; + annotations: Record; + activeAt: string; + keepFiringSince: string; +} + +type CommonRuleFields = { + name: string; + query: string; + evaluationTime: string; + health: string; + lastError?: string; + lastEvaluation: string; +}; + +type AlertingRule = { + type: "alerting"; + // For alerting rules, the 'labels' field is always present, even when there are no labels. + labels: Record; + annotations: Record; + duration: number; + keepFiringFor: number; + state: RuleState; + alerts: Alert[]; +} & CommonRuleFields; + +type RecordingRule = { + type: "recording"; + // For recording rules, the 'labels' field is only present when there are labels. + labels?: Record; +} & CommonRuleFields; + +export type Rule = AlertingRule | RecordingRule; + +interface RuleGroup { + name: string; + file: string; + interval: string; + rules: Rule[]; + evaluationTime: string; + lastEvaluation: string; +} + +export interface RulesMap { + groups: RuleGroup[]; +} diff --git a/web/ui/mantine-ui/src/api/response-types/tsdb-status.ts b/web/ui/mantine-ui/src/api/response-types/tsdb-status.ts new file mode 100644 index 0000000000..98b93177ce --- /dev/null +++ b/web/ui/mantine-ui/src/api/response-types/tsdb-status.ts @@ -0,0 +1,20 @@ +interface Stats { + name: string; + value: number; +} + +interface HeadStats { + numSeries: number; + numLabelPairs: number; + chunkCount: number; + minTime: number; + maxTime: number; +} + +export interface TSDBMap { + headStats: HeadStats; + seriesCountByMetricName: Stats[]; + labelValueCountByLabelName: Stats[]; + memoryInBytesByLabelName: Stats[]; + seriesCountByLabelValuePair: Stats[]; +} diff --git a/web/ui/mantine-ui/src/codebox.module.css b/web/ui/mantine-ui/src/codebox.module.css new file mode 100644 index 0000000000..23387c7764 --- /dev/null +++ b/web/ui/mantine-ui/src/codebox.module.css @@ -0,0 +1,45 @@ +.codebox { + background-color: light-dark( + var(--mantine-color-gray-1), + var(--mantine-color-gray-9) + ); +} + +.statsBadge { + background-color: light-dark( + var(--mantine-color-gray-1), + var(--mantine-color-gray-9) + ); + color: light-dark(var(--mantine-color-gray-7), var(--mantine-color-gray-5)); +} + +.labelBadge { + background-color: light-dark( + var(--mantine-color-gray-1), + var(--mantine-color-gray-9) + ); + color: light-dark(var(--mantine-color-gray-7), var(--mantine-color-gray-5)); +} + +.healthOk { + background-color: light-dark( + var(--mantine-color-green-1), + var(--mantine-color-green-9) + ); + color: light-dark(var(--mantine-color-green-9), var(--mantine-color-green-1)); +} + +.healthErr { + background-color: light-dark( + var(--mantine-color-red-1), + var(--mantine-color-red-9) + ); + color: light-dark(var(--mantine-color-red-9), var(--mantine-color-red-1)); +} + +.healthUnknown { + background-color: light-dark( + var(--mantine-color-gray-1), + var(--mantine-color-gray-9) + ); +} diff --git a/web/ui/mantine-ui/src/codemirror/theme.ts b/web/ui/mantine-ui/src/codemirror/theme.ts new file mode 100644 index 0000000000..9028b527c8 --- /dev/null +++ b/web/ui/mantine-ui/src/codemirror/theme.ts @@ -0,0 +1,295 @@ +import { HighlightStyle } from "@codemirror/language"; +import { EditorView } from "@codemirror/view"; +import { tags } from "@lezer/highlight"; + +export const baseTheme = EditorView.theme({ + "&.cm-editor": { + "&.cm-focused": { + outline: "none", + outline_fallback: "none", + }, + backgroundColor: "transparent", + }, + ".cm-scroller": { + overflow: "hidden", + fontFamily: '"DejaVu Sans Mono", monospace', + }, + ".cm-placeholder": { + fontFamily: + '-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans","Liberation Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji"', + }, + + ".cm-matchingBracket": { + fontWeight: "bold", + outline: "1px dashed transparent", + }, + ".cm-nonmatchingBracket": { borderColor: "red" }, + + ".cm-tooltip.cm-tooltip-autocomplete": { + "& > ul": { + maxHeight: "350px", + fontFamily: '"DejaVu Sans Mono", monospace', + maxWidth: "unset", + }, + "& > ul > li": { + padding: "2px 1em 2px 3px", + }, + minWidth: "30%", + }, + + ".cm-completionDetail": { + float: "right", + color: "#999", + }, + + ".cm-tooltip.cm-completionInfo": { + marginTop: "-11px", + padding: "10px", + fontFamily: + "'Open Sans', 'Lucida Sans Unicode', 'Lucida Grande', sans-serif;", + border: "none", + minWidth: "250px", + maxWidth: "min-content", + }, + + ".cm-completionInfo.cm-completionInfo-right": { + "&:before": { + content: "' '", + height: "0", + position: "absolute", + width: "0", + left: "-20px", + borderWidth: "10px", + borderStyle: "solid", + borderColor: "transparent", + }, + marginLeft: "12px", + }, + ".cm-completionInfo.cm-completionInfo-left": { + "&:before": { + content: "' '", + height: "0", + position: "absolute", + width: "0", + right: "-20px", + borderWidth: "10px", + borderStyle: "solid", + borderColor: "transparent", + }, + marginRight: "12px", + }, + + ".cm-completionMatchedText": { + textDecoration: "none", + fontWeight: "bold", + }, + + ".cm-selectionMatch": { + backgroundColor: "#e6f3ff", + }, + + ".cm-diagnostic": { + "&.cm-diagnostic-error": { + borderLeft: "3px solid #e65013", + }, + }, + + ".cm-completionIcon": { + boxSizing: "content-box", + fontSize: "16px", + lineHeight: "1", + marginRight: "10px", + verticalAlign: "top", + "&:after": { content: "'\\ea88'" }, + fontFamily: "codicon", + paddingRight: "0", + opacity: "1", + }, + + ".cm-completionIcon-function, .cm-completionIcon-method": { + "&:after": { content: "'\\ea8c'" }, + }, + ".cm-completionIcon-class": { + "&:after": { content: "'○'" }, + }, + ".cm-completionIcon-interface": { + "&:after": { content: "'◌'" }, + }, + ".cm-completionIcon-variable": { + "&:after": { content: "'𝑥'" }, + }, + ".cm-completionIcon-constant": { + "&:after": { content: "'\\eb5f'" }, + }, + ".cm-completionIcon-type": { + "&:after": { content: "'𝑡'" }, + }, + ".cm-completionIcon-enum": { + "&:after": { content: "'∪'" }, + }, + ".cm-completionIcon-property": { + "&:after": { content: "'□'" }, + }, + ".cm-completionIcon-keyword": { + "&:after": { content: "'\\eb62'" }, + }, + ".cm-completionIcon-namespace": { + "&:after": { content: "'▢'" }, + }, + ".cm-completionIcon-text": { + "&:after": { content: "'\\ea95'" }, + color: "#ee9d28", + }, +}); + +export const lightTheme = EditorView.theme( + { + ".cm-tooltip": { + backgroundColor: "#f8f8f8", + borderColor: "rgba(52, 79, 113, 0.2)", + }, + + ".cm-tooltip.cm-tooltip-autocomplete": { + "& li:hover": { + backgroundColor: "#ddd", + }, + "& > ul > li[aria-selected]": { + backgroundColor: "#d6ebff", + color: "unset", + }, + }, + + ".cm-tooltip.cm-completionInfo": { + backgroundColor: "#d6ebff", + }, + + ".cm-tooltip > .cm-completionInfo.cm-completionInfo-right": { + "&:before": { + borderRightColor: "#d6ebff", + }, + }, + ".cm-tooltip > .cm-completionInfo.cm-completionInfo-left": { + "&:before": { + borderLeftColor: "#d6ebff", + }, + }, + + ".cm-line": { + "&::selection": { + backgroundColor: "#add6ff", + }, + "& > span::selection": { + backgroundColor: "#add6ff", + }, + }, + + ".cm-matchingBracket": { + color: "#000", + backgroundColor: "#dedede", + }, + + ".cm-completionMatchedText": { + color: "#0066bf", + }, + + ".cm-completionIcon": { + color: "#007acc", + }, + + ".cm-completionIcon-constant": { + color: "#007acc", + }, + + ".cm-completionIcon-function, .cm-completionIcon-method": { + color: "#652d90", + }, + + ".cm-completionIcon-keyword": { + color: "#616161", + }, + }, + { dark: false } +); + +export const darkTheme = EditorView.theme( + { + ".cm-content": { + caretColor: "#fff", + }, + + ".cm-tooltip.cm-completionInfo": { + backgroundColor: "#333338", + }, + + ".cm-tooltip > .cm-completionInfo.cm-completionInfo-right": { + "&:before": { + borderRightColor: "#333338", + }, + }, + ".cm-tooltip > .cm-completionInfo.cm-completionInfo-left": { + "&:before": { + borderLeftColor: "#333338", + }, + }, + + ".cm-line": { + "&::selection": { + backgroundColor: "#767676", + }, + "& > span::selection": { + backgroundColor: "#767676", + }, + }, + + ".cm-matchingBracket, &.cm-focused .cm-matchingBracket": { + backgroundColor: "#616161", + }, + + ".cm-completionMatchedText": { + color: "#7dd3fc", + }, + + ".cm-completionIcon, .cm-completionIcon-constant": { + color: "#7dd3fc", + }, + + ".cm-completionIcon-function, .cm-completionIcon-method": { + color: "#d8b4fe", + }, + + ".cm-completionIcon-keyword": { + color: "#cbd5e1 !important", + }, + }, + { dark: true } +); + +export const promqlHighlighter = HighlightStyle.define([ + { tag: tags.number, color: "#09885a" }, + { tag: tags.string, color: "#a31515" }, + { tag: tags.keyword, color: "#008080" }, + { tag: tags.function(tags.variableName), color: "#008080" }, + { tag: tags.labelName, color: "#800000" }, + { tag: tags.operator }, + { tag: tags.modifier, color: "#008080" }, + { tag: tags.paren }, + { tag: tags.squareBracket }, + { tag: tags.brace }, + { tag: tags.invalid, color: "red" }, + { tag: tags.comment, color: "#888", fontStyle: "italic" }, +]); + +export const darkPromqlHighlighter = HighlightStyle.define([ + { tag: tags.number, color: "#22c55e" }, + { tag: tags.string, color: "#fca5a5" }, + { tag: tags.keyword, color: "#14bfad" }, + { tag: tags.function(tags.variableName), color: "#14bfad" }, + { tag: tags.labelName, color: "#ff8585" }, + { tag: tags.operator }, + { tag: tags.modifier, color: "#14bfad" }, + { tag: tags.paren }, + { tag: tags.squareBracket }, + { tag: tags.brace }, + { tag: tags.invalid, color: "#ff3d3d" }, + { tag: tags.comment, color: "#9ca3af", fontStyle: "italic" }, +]); diff --git a/web/ui/mantine-ui/src/error-boundary.tsx b/web/ui/mantine-ui/src/error-boundary.tsx new file mode 100644 index 0000000000..2a6f824914 --- /dev/null +++ b/web/ui/mantine-ui/src/error-boundary.tsx @@ -0,0 +1,52 @@ +import { Alert } from "@mantine/core"; +import { IconAlertTriangle } from "@tabler/icons-react"; +import { Component, ErrorInfo, ReactNode } from "react"; +import { useLocation } from "react-router-dom"; + +interface Props { + children?: ReactNode; +} + +interface State { + error: Error | null; +} + +class ErrorBoundary extends Component { + public state: State = { + error: null, + }; + + public static getDerivedStateFromError(error: Error): State { + // Update state so the next render will show the fallback UI. + return { error }; + } + + public componentDidCatch(error: Error, errorInfo: ErrorInfo) { + console.error("Uncaught error:", error, errorInfo); + } + + public render() { + if (this.state.error !== null) { + return ( + } + > + Error: {this.state.error.message} + + ); + } + + return this.props.children; + } +} + +const ResettingErrorBoundary = (props: Props) => { + const location = useLocation(); + return ( + {props.children} + ); +}; + +export default ResettingErrorBoundary; diff --git a/web/ui/mantine-ui/src/images/prometheus-logo.svg b/web/ui/mantine-ui/src/images/prometheus-logo.svg new file mode 100644 index 0000000000..b914095ecf --- /dev/null +++ b/web/ui/mantine-ui/src/images/prometheus-logo.svg @@ -0,0 +1,19 @@ + + + + + + + + + + diff --git a/web/ui/mantine-ui/src/lib/time-format.ts b/web/ui/mantine-ui/src/lib/time-format.ts new file mode 100644 index 0000000000..f31e574682 --- /dev/null +++ b/web/ui/mantine-ui/src/lib/time-format.ts @@ -0,0 +1,118 @@ +import dayjs from "dayjs"; +import duration from "dayjs/plugin/duration"; +dayjs.extend(duration); +import utc from "dayjs/plugin/utc"; +dayjs.extend(utc); + +export const parseDuration = (durationStr: string): number | null => { + if (durationStr === "") { + return null; + } + if (durationStr === "0") { + // Allow 0 without a unit. + return 0; + } + + const durationRE = new RegExp( + "^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$" + ); + const matches = durationStr.match(durationRE); + if (!matches) { + return null; + } + + let dur = 0; + + // Parse the match at pos `pos` in the regex and use `mult` to turn that + // into ms, then add that value to the total parsed duration. + const m = (pos: number, mult: number) => { + if (matches[pos] === undefined) { + return; + } + const n = parseInt(matches[pos]); + dur += n * mult; + }; + + m(2, 1000 * 60 * 60 * 24 * 365); // y + m(4, 1000 * 60 * 60 * 24 * 7); // w + m(6, 1000 * 60 * 60 * 24); // d + m(8, 1000 * 60 * 60); // h + m(10, 1000 * 60); // m + m(12, 1000); // s + m(14, 1); // ms + + return dur; +}; + +export const formatDuration = (d: number): string => { + let ms = d; + let r = ""; + if (ms === 0) { + return "0s"; + } + + const f = (unit: string, mult: number, exact: boolean) => { + if (exact && ms % mult !== 0) { + return; + } + const v = Math.floor(ms / mult); + if (v > 0) { + r += `${v}${unit}`; + ms -= v * mult; + } + }; + + // Only format years and weeks if the remainder is zero, as it is often + // easier to read 90d than 12w6d. + f("y", 1000 * 60 * 60 * 24 * 365, true); + f("w", 1000 * 60 * 60 * 24 * 7, true); + + f("d", 1000 * 60 * 60 * 24, false); + f("h", 1000 * 60 * 60, false); + f("m", 1000 * 60, false); + f("s", 1000, false); + f("ms", 1, false); + + return r; +}; + +export function parseTime(timeText: string): number { + return dayjs.utc(timeText).valueOf(); +} + +export const now = (): number => dayjs().valueOf(); + +export const humanizeDuration = (milliseconds: number): string => { + const sign = milliseconds < 0 ? "-" : ""; + const unsignedMillis = milliseconds < 0 ? -1 * milliseconds : milliseconds; + const duration = dayjs.duration(unsignedMillis, "ms"); + const ms = Math.floor(duration.milliseconds()); + const s = Math.floor(duration.seconds()); + const m = Math.floor(duration.minutes()); + const h = Math.floor(duration.hours()); + const d = Math.floor(duration.asDays()); + if (d !== 0) { + return `${sign}${d}d ${h}h ${m}m ${s}s`; + } + if (h !== 0) { + return `${sign}${h}h ${m}m ${s}s`; + } + if (m !== 0) { + return `${sign}${m}m ${s}s`; + } + if (s !== 0) { + return `${sign}${s}.${ms}s`; + } + if (unsignedMillis > 0) { + return `${sign}${unsignedMillis.toFixed(3)}ms`; + } + return "0s"; +}; + +export const formatRelative = (startStr: string, end: number): string => { + const start = parseTime(startStr); + if (start < 0) { + return "Never"; + } + return humanizeDuration(end - start) + " ago"; +}; diff --git a/web/ui/mantine-ui/src/main.tsx b/web/ui/mantine-ui/src/main.tsx index 3d7150da80..8e3f4e82d6 100644 --- a/web/ui/mantine-ui/src/main.tsx +++ b/web/ui/mantine-ui/src/main.tsx @@ -1,10 +1,29 @@ -import React from 'react' -import ReactDOM from 'react-dom/client' -import App from './App.tsx' -import './index.css' +import React from "react"; +import ReactDOM from "react-dom/client"; +import App from "./App.tsx"; +import "./index.css"; +import { Settings, SettingsContext } from "./settings.ts"; -ReactDOM.createRoot(document.getElementById('root')!).render( +// Declared/defined in public/index.html, value replaced by Prometheus when serving bundle. +declare const GLOBAL_CONSOLES_LINK: string; +declare const GLOBAL_AGENT_MODE: string; +declare const GLOBAL_READY: string; + +const settings: Settings = { + consolesLink: + GLOBAL_CONSOLES_LINK === "CONSOLES_LINK_PLACEHOLDER" || + GLOBAL_CONSOLES_LINK === "" || + GLOBAL_CONSOLES_LINK === null + ? null + : GLOBAL_CONSOLES_LINK, + agentMode: GLOBAL_AGENT_MODE === "true", + ready: GLOBAL_READY === "true", +}; + +ReactDOM.createRoot(document.getElementById("root")!).render( - - , -) + + + + +); diff --git a/web/ui/mantine-ui/src/pages/agent.tsx b/web/ui/mantine-ui/src/pages/agent.tsx new file mode 100644 index 0000000000..40ff3817e3 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/agent.tsx @@ -0,0 +1,27 @@ +import { Card, Group, Text } from "@mantine/core"; +import { IconSpy } from "@tabler/icons-react"; +import { FC } from "react"; + +const Agent: FC = () => { + return ( + + + + + Prometheus Agent + + + + This Prometheus instance is running in agent mode. In + this mode, Prometheus is only used to scrape discovered targets and + forward the scraped metrics to remote write endpoints. + + + Some features are not available in this mode, such as querying and + alerting. + + + ); +}; + +export default Agent; diff --git a/web/ui/mantine-ui/src/pages/alerts.tsx b/web/ui/mantine-ui/src/pages/alerts.tsx new file mode 100644 index 0000000000..e5514d254b --- /dev/null +++ b/web/ui/mantine-ui/src/pages/alerts.tsx @@ -0,0 +1,3 @@ +export default function Alerts() { + return <>Alerts page; +} diff --git a/web/ui/mantine-ui/src/pages/config.tsx b/web/ui/mantine-ui/src/pages/config.tsx new file mode 100644 index 0000000000..3cbace38c0 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/config.tsx @@ -0,0 +1,16 @@ +import { CodeHighlight } from "@mantine/code-highlight"; +import { useSuspenseQuery } from "@tanstack/react-query"; + +export default function Config() { + const { + data: { + data: { yaml }, + }, + } = useSuspenseQuery<{ data: { yaml: string } }>({ + queryKey: ["config"], + queryFn: () => { + return fetch("/api/v1/status/config").then((res) => res.json()); + }, + }); + return ; +} diff --git a/web/ui/mantine-ui/src/pages/flags.module.css b/web/ui/mantine-ui/src/pages/flags.module.css new file mode 100644 index 0000000000..221b2de028 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/flags.module.css @@ -0,0 +1,21 @@ +.th { + padding: 0; +} + +.control { + width: 100%; + padding: var(--mantine-spacing-xs) var(--mantine-spacing-md); + + @mixin hover { + background-color: light-dark( + var(--mantine-color-gray-0), + var(--mantine-color-dark-6) + ); + } +} + +.icon { + width: rem(21px); + height: rem(21px); + border-radius: rem(21px); +} diff --git a/web/ui/mantine-ui/src/pages/flags.tsx b/web/ui/mantine-ui/src/pages/flags.tsx new file mode 100644 index 0000000000..af4203e6dd --- /dev/null +++ b/web/ui/mantine-ui/src/pages/flags.tsx @@ -0,0 +1,183 @@ +import { useState } from "react"; +import { + Table, + UnstyledButton, + Group, + Text, + Center, + TextInput, + rem, + keys, + Card, +} from "@mantine/core"; +import { + IconSelector, + IconChevronDown, + IconChevronUp, + IconSearch, +} from "@tabler/icons-react"; +import classes from "./flags.module.css"; +import { useSuspenseAPIQuery } from "../api/api"; + +interface RowData { + flag: string; + value: string; +} + +interface ThProps { + children: React.ReactNode; + reversed: boolean; + sorted: boolean; + onSort(): void; +} + +function Th({ children, reversed, sorted, onSort }: ThProps) { + const Icon = sorted + ? reversed + ? IconChevronUp + : IconChevronDown + : IconSelector; + return ( + + + + + {children} + +
+ +
+
+
+
+ ); +} + +function filterData(data: RowData[], search: string) { + const query = search.toLowerCase().trim(); + return data.filter((item) => + keys(data[0]).some((key) => item[key].toLowerCase().includes(query)) + ); +} + +function sortData( + data: RowData[], + payload: { sortBy: keyof RowData | null; reversed: boolean; search: string } +) { + const { sortBy } = payload; + + if (!sortBy) { + return filterData(data, payload.search); + } + + return filterData( + [...data].sort((a, b) => { + if (payload.reversed) { + return b[sortBy].localeCompare(a[sortBy]); + } + + return a[sortBy].localeCompare(b[sortBy]); + }), + payload.search + ); +} + +export default function Flags() { + const { data } = useSuspenseAPIQuery>(`/status/flags`); + + // const { response, error, isLoading } = + // useFetchAPI>(`/status/flags`); + + const flags = Object.entries(data.data).map(([flag, value]) => ({ + flag, + value, + })); + + const [search, setSearch] = useState(""); + const [sortedData, setSortedData] = useState(flags); + const [sortBy, setSortBy] = useState(null); + const [reverseSortDirection, setReverseSortDirection] = useState(false); + + const setSorting = (field: keyof RowData) => { + const reversed = field === sortBy ? !reverseSortDirection : false; + setReverseSortDirection(reversed); + setSortBy(field); + setSortedData(sortData(flags, { sortBy: field, reversed, search })); + }; + + const handleSearchChange = (event: React.ChangeEvent) => { + const { value } = event.currentTarget; + setSearch(value); + setSortedData( + sortData(flags, { sortBy, reversed: reverseSortDirection, search: value }) + ); + }; + + const rows = sortedData.map((row) => ( + + + --{row.flag} + + + {row.value} + + + )); + + return ( + + + } + value={search} + onChange={handleSearchChange} + /> + + + + + + + + + + {rows.length > 0 ? ( + rows + ) : ( + + + + Nothing found + + + + )} + +
setSorting("flag")} + > + Flag + setSorting("value")} + > + Value +
+
+ ); +} diff --git a/web/ui/mantine-ui/src/pages/graph.tsx b/web/ui/mantine-ui/src/pages/graph.tsx new file mode 100644 index 0000000000..1e1c158139 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/graph.tsx @@ -0,0 +1,3 @@ +export default function Graph() { + return <>Graph page; +} diff --git a/web/ui/mantine-ui/src/pages/rules.tsx b/web/ui/mantine-ui/src/pages/rules.tsx new file mode 100644 index 0000000000..ab1e8a5075 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/rules.tsx @@ -0,0 +1,257 @@ +import { + Alert, + Badge, + Card, + Group, + Table, + Text, + Tooltip, + useComputedColorScheme, +} from "@mantine/core"; +// import { useQuery } from "react-query"; +import { + formatDuration, + formatRelative, + humanizeDuration, + now, +} from "../lib/time-format"; +import { + IconAlertTriangle, + IconBell, + IconClockPause, + IconClockPlay, + IconDatabaseImport, + IconHourglass, + IconRefresh, + IconRepeat, +} from "@tabler/icons-react"; +import CodeMirror, { EditorView } from "@uiw/react-codemirror"; +import { useSuspenseAPIQuery } from "../api/api"; +import { RulesMap } from "../api/response-types/rules"; +import { syntaxHighlighting } from "@codemirror/language"; +import { + baseTheme, + darkPromqlHighlighter, + lightTheme, + promqlHighlighter, +} from "../codemirror/theme"; +import { PromQLExtension } from "@prometheus-io/codemirror-promql"; +import classes from "../codebox.module.css"; + +const healthBadgeClass = (state: string) => { + switch (state) { + case "ok": + return classes.healthOk; + case "err": + return classes.healthErr; + case "unknown": + return classes.healthUnknown; + default: + return "orange"; + } +}; + +const promqlExtension = new PromQLExtension(); + +export default function Rules() { + const { data } = useSuspenseAPIQuery(`/rules`); + const theme = useComputedColorScheme(); + + return ( + <> + {data.data.groups.map((g) => ( + + + + + {g.name} + + + {g.file} + + + + + } + > + {formatRelative(g.lastEvaluation, now())} + + + + } + > + {humanizeDuration(parseFloat(g.evaluationTime) * 1000)} + + + + } + > + {humanizeDuration(parseFloat(g.interval) * 1000)} + + + + + + + {g.rules.map((r) => ( + + + + {r.type === "alerting" ? ( + + ) : ( + + )} + + {r.name} + + + + + {r.health} + + + + + } + > + {formatRelative(r.lastEvaluation, now())} + + + + + } + > + {humanizeDuration( + parseFloat(r.evaluationTime) * 1000 + )} + + + + + + + + + + + {r.lastError && ( + } + > + Error: {r.lastError} + + )} + {r.type === "alerting" && ( + + {r.duration && ( + } + > + for: {formatDuration(r.duration * 1000)} + + )} + {r.keepFiringFor && ( + } + > + keep_firing_for: {formatDuration(r.duration * 1000)} + + )} + + )} + {r.labels && Object.keys(r.labels).length > 0 && ( + + {Object.entries(r.labels).map(([k, v]) => ( + + {k}: {v} + + ))} + + )} + {/* {Object.keys(r.annotations).length > 0 && ( + + {Object.entries(r.annotations).map(([k, v]) => ( + + {k}: {v} + + ))} + + )} */} + + + ))} + +
+
+ ))} + + ); +} diff --git a/web/ui/mantine-ui/src/pages/service-discovery.tsx b/web/ui/mantine-ui/src/pages/service-discovery.tsx new file mode 100644 index 0000000000..fff68c3a9e --- /dev/null +++ b/web/ui/mantine-ui/src/pages/service-discovery.tsx @@ -0,0 +1,3 @@ +export default function ServiceDiscovery() { + return <>ServiceDiscovery page; +} diff --git a/web/ui/mantine-ui/src/pages/status.tsx b/web/ui/mantine-ui/src/pages/status.tsx new file mode 100644 index 0000000000..327cc04204 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/status.tsx @@ -0,0 +1,82 @@ +import { Card, Group, Stack, Table, Text } from "@mantine/core"; +import { useSuspenseAPIQuery } from "../api/api"; +import { IconRun, IconWall } from "@tabler/icons-react"; + +const statusConfig: Record< + string, + { + title?: string; + formatValue?: (v: any) => string; + } +> = { + startTime: { + title: "Start time", + formatValue: (v: string) => new Date(v).toUTCString(), + }, + CWD: { title: "Working directory" }, + reloadConfigSuccess: { + title: "Configuration reload", + formatValue: (v: boolean) => (v ? "Successful" : "Unsuccessful"), + }, + lastConfigTime: { + title: "Last successful configuration reload", + formatValue: (v: string) => new Date(v).toUTCString(), + }, + corruptionCount: { title: "WAL corruptions" }, + goroutineCount: { title: "Goroutines" }, + storageRetention: { title: "Storage retention" }, +}; + +export default function Status() { + const { data: buildinfo } = + useSuspenseAPIQuery>(`/status/buildinfo`); + const { data: runtimeinfo } = + useSuspenseAPIQuery>(`/status/runtimeinfo`); + + return ( + + + + + + Build information + + + + + {Object.entries(buildinfo.data).map(([k, v]) => ( + + {k} + {v} + + ))} + +
+
+ + + + + Runtime information + + + + + {Object.entries(runtimeinfo.data).map(([k, v]) => { + const { title = k, formatValue = (val: string) => val } = + statusConfig[k] || {}; + return ( + + + {title} + + {formatValue(v)} + + ); + })} + +
+
+
+ ); +} diff --git a/web/ui/mantine-ui/src/pages/targets.tsx b/web/ui/mantine-ui/src/pages/targets.tsx new file mode 100644 index 0000000000..7668cbc6e9 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/targets.tsx @@ -0,0 +1,3 @@ +export default function Targets() { + return <>Targets page; +} diff --git a/web/ui/mantine-ui/src/pages/tsdb-status.tsx b/web/ui/mantine-ui/src/pages/tsdb-status.tsx new file mode 100644 index 0000000000..1bdbd03ecc --- /dev/null +++ b/web/ui/mantine-ui/src/pages/tsdb-status.tsx @@ -0,0 +1,101 @@ +import { Stack, Card, Group, Table, Text } from "@mantine/core"; +import { useSuspenseAPIQuery } from "../api/api"; +import { TSDBMap } from "../api/response-types/tsdb-status"; + +export default function TSDBStatus() { + const { + data: { + data: { + headStats, + labelValueCountByLabelName, + seriesCountByMetricName, + memoryInBytesByLabelName, + seriesCountByLabelValuePair, + }, + }, + } = useSuspenseAPIQuery(`/status/tsdb`); + + const unixToTime = (unix: number): string => { + try { + return `${new Date(unix).toISOString()} (${unix})`; + } catch { + if (numSeries === 0) { + return "No datapoints yet"; + } + return `Error parsing time (${unix})`; + } + }; + const { chunkCount, numSeries, numLabelPairs, minTime, maxTime } = headStats; + const stats = [ + { name: "Number of Series", value: numSeries }, + { name: "Number of Chunks", value: chunkCount }, + { name: "Number of Label Pairs", value: numLabelPairs }, + { name: "Current Min Time", value: `${unixToTime(minTime)}` }, + { name: "Current Max Time", value: `${unixToTime(maxTime)}` }, + ]; + + return ( + + {[ + { + title: "TSDB Head Status", + stats, + formatAsCode: false, + }, + { + title: "Top 10 label names with value count", + stats: labelValueCountByLabelName, + formatAsCode: true, + }, + { + title: "Top 10 series count by metric names", + stats: seriesCountByMetricName, + formatAsCode: true, + }, + { + title: "Top 10 label names with high memory usage", + unit: "Bytes", + stats: memoryInBytesByLabelName, + formatAsCode: true, + }, + { + title: "Top 10 series count by label value pairs", + stats: seriesCountByLabelValuePair, + formatAsCode: true, + }, + ].map(({ title, unit = "Count", stats, formatAsCode }) => ( + + + + {title} + + + + + + Name + {unit} + + + + {stats.map(({ name, value }) => { + return ( + + + {formatAsCode ? {name} : name} + + {value} + + ); + })} + +
+
+ ))} +
+ ); +} diff --git a/web/ui/mantine-ui/src/settings.ts b/web/ui/mantine-ui/src/settings.ts new file mode 100644 index 0000000000..1595870a73 --- /dev/null +++ b/web/ui/mantine-ui/src/settings.ts @@ -0,0 +1,13 @@ +import { createContext } from "react"; + +export interface Settings { + consolesLink: string | null; + agentMode: boolean; + ready: boolean; +} + +export const SettingsContext = createContext({ + consolesLink: null, + agentMode: false, + ready: false, +}); diff --git a/web/ui/mantine-ui/src/theme-selector.tsx b/web/ui/mantine-ui/src/theme-selector.tsx new file mode 100644 index 0000000000..f01aa91f7c --- /dev/null +++ b/web/ui/mantine-ui/src/theme-selector.tsx @@ -0,0 +1,67 @@ +import { + useMantineColorScheme, + Group, + SegmentedControl, + rem, + MantineColorScheme, + Tooltip, +} from "@mantine/core"; +import { + IconMoonFilled, + IconSunFilled, + IconUserFilled, +} from "@tabler/icons-react"; +import { FC } from "react"; + +export const ThemeSelector: FC = () => { + const { colorScheme, setColorScheme } = useMantineColorScheme(); + const iconProps = { + style: { width: rem(20), height: rem(20), display: "block" }, + stroke: 1.5, + }; + + return ( + + setColorScheme(v as MantineColorScheme)} + data={[ + { + value: "light", + label: ( + + + + ), + }, + { + value: "dark", + label: ( + + + + ), + }, + { + value: "auto", + label: ( + + + + ), + }, + ]} + /> + + ); +}; diff --git a/web/ui/mantine-ui/vite.config.ts b/web/ui/mantine-ui/vite.config.ts index 5a33944a9b..0ed5aa400d 100644 --- a/web/ui/mantine-ui/vite.config.ts +++ b/web/ui/mantine-ui/vite.config.ts @@ -1,7 +1,14 @@ -import { defineConfig } from 'vite' -import react from '@vitejs/plugin-react' +import { defineConfig } from "vite"; +import react from "@vitejs/plugin-react"; // https://vitejs.dev/config/ export default defineConfig({ plugins: [react()], -}) + server: { + proxy: { + "/api": { + target: "http://localhost:9090", + }, + }, + }, +}); From cc38c1426a874514367276098b1aa454a9cdcf19 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 21 Feb 2024 11:14:35 +0100 Subject: [PATCH 0009/1397] Attempt to integrate Mantine UI into npm workspaces + Prometheus binary Signed-off-by: Julius Volz --- web/ui/build_ui.sh | 6 +- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 12071 ++++++------------------------- web/ui/package.json | 9 +- web/web.go | 4 +- 5 files changed, 2178 insertions(+), 9914 deletions(-) diff --git a/web/ui/build_ui.sh b/web/ui/build_ui.sh index 8761bfa1e3..f0b496efd7 100644 --- a/web/ui/build_ui.sh +++ b/web/ui/build_ui.sh @@ -30,10 +30,10 @@ function buildModule() { } function buildReactApp() { - echo "build react-app" - npm run build -w @prometheus-io/app + echo "build mantine-ui" + npm run build -w @prometheus-io/mantine-ui rm -rf ./static/react - mv ./react-app/build ./static/react + mv ./mantine-ui/dist ./static/react } for i in "$@"; do diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 221a6c36bd..d64d2b1e44 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,5 +1,5 @@ { - "name": "mantine-ui", + "name": "@prometheus-io/mantine-ui", "private": true, "version": "0.0.0", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 5f1db6ce0a..91a85656e2 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -8,10 +8,11 @@ "name": "prometheus-io", "version": "0.49.1", "workspaces": [ - "react-app", + "mantine-ui", "module/*" ], "devDependencies": { + "@rollup/plugin-node-resolve": "^15.2.3", "@types/jest": "^29.5.11", "@types/node": "^20.10.4", "eslint-config-prettier": "^8.10.0", @@ -20,14 +21,496 @@ "jest-canvas-mock": "^2.5.2", "jest-fetch-mock": "^3.0.3", "prettier": "^2.8.8", - "react-scripts": "^5.0.1", + "rollup": "^4.12.0", "ts-jest": "^29.1.1", - "typescript": "^4.9.5" + "typescript": "^5.3.3" }, "engines": { "npm": ">=7.0.0" } }, + "mantine-ui": { + "name": "@prometheus-io/mantine-ui", + "version": "0.0.0", + "dependencies": { + "@codemirror/autocomplete": "^6.12.0", + "@codemirror/language": "^6.10.1", + "@codemirror/lint": "^6.5.0", + "@codemirror/state": "^6.4.0", + "@codemirror/view": "^6.24.0", + "@lezer/common": "^1.2.1", + "@lezer/highlight": "^1.2.0", + "@mantine/code-highlight": "^7.5.3", + "@mantine/core": "^7.5.3", + "@mantine/dates": "^7.5.3", + "@mantine/hooks": "^7.5.3", + "@prometheus-io/codemirror-promql": "^0.50.0-rc.1", + "@tabler/icons-react": "^2.47.0", + "@tanstack/react-query": "^5.22.2", + "@uiw/react-codemirror": "^4.21.22", + "dayjs": "^1.11.10", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-router-dom": "^6.22.1" + }, + "devDependencies": { + "@types/eslint": "~8.56.2", + "@types/react": "^18.2.55", + "@types/react-dom": "^18.2.19", + "@typescript-eslint/eslint-plugin": "^6.21.0", + "@typescript-eslint/parser": "^6.21.0", + "@vitejs/plugin-react": "^4.2.1", + "eslint": "^8.56.0", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.4.5", + "postcss": "^8.4.35", + "postcss-preset-mantine": "^1.13.0", + "postcss-simple-vars": "^7.0.1", + "typescript": "^5.2.2", + "vite": "^5.1.0" + } + }, + "mantine-ui/node_modules/@mantine/code-highlight": { + "version": "7.5.3", + "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-7.5.3.tgz", + "integrity": "sha512-TLZSkVAfX3KH9XKjJl965KX6TjpMKtNzObjI6Uvo/J/5Rvqhe7xbhBPJDT7yhSD+wjnTMsEWEb68rmQa3M/cEA==", + "dependencies": { + "clsx": "2.0.0", + "highlight.js": "^11.9.0" + }, + "peerDependencies": { + "@mantine/core": "7.5.3", + "@mantine/hooks": "7.5.3", + "react": "^18.2.0", + "react-dom": "^18.2.0" + } + }, + "mantine-ui/node_modules/@mantine/core": { + "version": "7.5.3", + "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.5.3.tgz", + "integrity": "sha512-Wvv6DJXI+GX9mmKG5HITTh/24sCZ0RoYQHdTHh0tOfGnEy+RleyhA82UjnMsp0n2NjfCISBwbiKgfya6b2iaFw==", + "dependencies": { + "@floating-ui/react": "^0.24.8", + "clsx": "2.0.0", + "react-number-format": "^5.3.1", + "react-remove-scroll": "^2.5.7", + "react-textarea-autosize": "8.5.3", + "type-fest": "^3.13.1" + }, + "peerDependencies": { + "@mantine/hooks": "7.5.3", + "react": "^18.2.0", + "react-dom": "^18.2.0" + } + }, + "mantine-ui/node_modules/@mantine/dates": { + "version": "7.5.3", + "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-7.5.3.tgz", + "integrity": "sha512-v6fFdW+7HAd7XsZFMJVMuFE2RHbQAVnsUNeP0/5h+H4qEj0soTmMvHPP8wXEed5v85r9CcEMGOGq1n6RFRpWHA==", + "dependencies": { + "clsx": "2.0.0" + }, + "peerDependencies": { + "@mantine/core": "7.5.3", + "@mantine/hooks": "7.5.3", + "dayjs": ">=1.0.0", + "react": "^18.2.0", + "react-dom": "^18.2.0" + } + }, + "mantine-ui/node_modules/@mantine/hooks": { + "version": "7.5.3", + "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.5.3.tgz", + "integrity": "sha512-mFI448mAs12v8FrgSVhytqlhTVrEjIfd/PqPEfwJu5YcZIq4YZdqpzJIUbANnRrFSvmoQpDb1PssdKx7Ds35hw==", + "peerDependencies": { + "react": "^18.2.0" + } + }, + "mantine-ui/node_modules/@prometheus-io/codemirror-promql": { + "version": "0.50.0-rc.1", + "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.50.0-rc.1.tgz", + "integrity": "sha512-UFqVvpjrEn3eqfn8mqiA8SAdUD0MCdwvEiFpjHk5/Bjfbam0u+9RBGLT4cm3HowuluLDQ8G1p/pVGpCcYiQXPA==", + "dependencies": { + "@prometheus-io/lezer-promql": "0.50.0-rc.1", + "lru-cache": "^7.18.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "@codemirror/autocomplete": "^6.4.0", + "@codemirror/language": "^6.3.0", + "@codemirror/lint": "^6.0.0", + "@codemirror/state": "^6.1.1", + "@codemirror/view": "^6.4.0", + "@lezer/common": "^1.0.1" + } + }, + "mantine-ui/node_modules/@prometheus-io/lezer-promql": { + "version": "0.50.0-rc.1", + "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.50.0-rc.1.tgz", + "integrity": "sha512-vNHg4WHi1pA3Y+Y5IMjguRVlha8UhjNbQ1JUWffAfUvMubGGPjPZIvBPF8Pk4ONxkkUAPe3rztCEza810AMHoA==", + "peerDependencies": { + "@lezer/highlight": "^1.1.2", + "@lezer/lr": "^1.2.3" + } + }, + "mantine-ui/node_modules/@tanstack/react-query": { + "version": "5.22.2", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.22.2.tgz", + "integrity": "sha512-TaxJDRzJ8/NWRT4lY2jguKCrNI6MRN+67dELzPjNUlvqzTxGANlMp68l7aC7hG8Bd1uHNxHl7ihv7MT50i/43A==", + "dependencies": { + "@tanstack/query-core": "5.22.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^18.0.0" + } + }, + "mantine-ui/node_modules/@types/react": { + "version": "18.2.57", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.57.tgz", + "integrity": "sha512-ZvQsktJgSYrQiMirAN60y4O/LRevIV8hUzSOSNB6gfR3/o3wCBFQx3sPwIYtuDMeiVgsSS3UzCV26tEzgnfvQw==", + "dev": true, + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "mantine-ui/node_modules/@types/react-dom": { + "version": "18.2.19", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.19.tgz", + "integrity": "sha512-aZvQL6uUbIJpjZk4U8JZGbau9KDeAwMfmhyWorxgBkqDIEf6ROjRozcmPIicqsUwPUjbkDfHKgGee1Lq65APcA==", + "dev": true, + "dependencies": { + "@types/react": "*" + } + }, + "mantine-ui/node_modules/@typescript-eslint/eslint-plugin": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz", + "integrity": "sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==", + "dev": true, + "dependencies": { + "@eslint-community/regexpp": "^4.5.1", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/type-utils": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "graphemer": "^1.4.0", + "ignore": "^5.2.4", + "natural-compare": "^1.4.0", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha", + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "mantine-ui/node_modules/@typescript-eslint/parser": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.21.0.tgz", + "integrity": "sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "mantine-ui/node_modules/@typescript-eslint/scope-manager": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz", + "integrity": "sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "mantine-ui/node_modules/@typescript-eslint/type-utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", + "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", + "dev": true, + "dependencies": { + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "mantine-ui/node_modules/@typescript-eslint/types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", + "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "mantine-ui/node_modules/@typescript-eslint/typescript-estree": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", + "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "9.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "mantine-ui/node_modules/@typescript-eslint/utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", + "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@types/json-schema": "^7.0.12", + "@types/semver": "^7.5.0", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "semver": "^7.5.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + } + }, + "mantine-ui/node_modules/@typescript-eslint/visitor-keys": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz", + "integrity": "sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "mantine-ui/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "mantine-ui/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "engines": { + "node": ">=12" + } + }, + "mantine-ui/node_modules/minimatch": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", + "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "mantine-ui/node_modules/react": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", + "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "mantine-ui/node_modules/react-dom": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", + "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.0" + }, + "peerDependencies": { + "react": "^18.2.0" + } + }, + "mantine-ui/node_modules/react-router": { + "version": "6.22.1", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.22.1.tgz", + "integrity": "sha512-0pdoRGwLtemnJqn1K0XHUbnKiX0S4X8CgvVVmHGOWmofESj31msHo/1YiqcJWK7Wxfq2a4uvvtS01KAQyWK/CQ==", + "dependencies": { + "@remix-run/router": "1.15.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": ">=16.8" + } + }, + "mantine-ui/node_modules/react-router-dom": { + "version": "6.22.1", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.22.1.tgz", + "integrity": "sha512-iwMyyyrbL7zkKY7MRjOVRy+TMnS/OPusaFVxM2P11x9dzSzGmLsebkCvYirGq0DWB9K9hOspHYYtDz33gE5Duw==", + "dependencies": { + "@remix-run/router": "1.15.1", + "react-router": "6.22.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": ">=16.8", + "react-dom": ">=16.8" + } + }, + "mantine-ui/node_modules/scheduler": { + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", + "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "mantine-ui/node_modules/semver": { + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "mantine-ui/node_modules/semver/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "mantine-ui/node_modules/type-fest": { + "version": "3.13.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-3.13.1.tgz", + "integrity": "sha512-tLq3bSNx+xSpwvAJnzrK0Ep5CLNWjvFTOp71URMaAEWBfRb9nnJiBoUe0tF8bI4ZFO3omgBR6NvnbzVUT3Ly4g==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", "version": "0.49.1", @@ -81,6 +564,15 @@ "@lezer/lr": "^1.2.3" } }, + "node_modules/@aashutoshrathi/word-wrap": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz", + "integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/@ampproject/remapping": { "version": "2.2.0", "dev": true, @@ -94,11 +586,12 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.22.13", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz", + "integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/highlight": "^7.22.13", + "@babel/highlight": "^7.23.4", "chalk": "^2.4.2" }, "engines": { @@ -170,33 +663,35 @@ } }, "node_modules/@babel/compat-data": { - "version": "7.19.3", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.5.tgz", + "integrity": "sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/core": { - "version": "7.19.3", + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.9.tgz", + "integrity": "sha512-5q0175NOjddqpvvzU+kDiSOAk4PfdO6FvwCWoQ6RO7rTzEe8vlo+4HVfcnAREhD4npMs0e9uZypjTwzZPCf/cw==", "dev": true, - "license": "MIT", "dependencies": { - "@ampproject/remapping": "^2.1.0", - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.19.3", - "@babel/helper-compilation-targets": "^7.19.3", - "@babel/helper-module-transforms": "^7.19.0", - "@babel/helpers": "^7.19.0", - "@babel/parser": "^7.19.3", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.19.3", - "@babel/types": "^7.19.3", - "convert-source-map": "^1.7.0", + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.23.5", + "@babel/generator": "^7.23.6", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helpers": "^7.23.9", + "@babel/parser": "^7.23.9", + "@babel/template": "^7.23.9", + "@babel/traverse": "^7.23.9", + "@babel/types": "^7.23.9", + "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", - "json5": "^2.2.1", - "semver": "^6.3.0" + "json5": "^2.2.3", + "semver": "^6.3.1" }, "engines": { "node": ">=6.9.0" @@ -206,6 +701,12 @@ "url": "https://opencollective.com/babel" } }, + "node_modules/@babel/core/node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true + }, "node_modules/@babel/eslint-parser": { "version": "7.19.1", "dev": true, @@ -232,11 +733,12 @@ } }, "node_modules/@babel/generator": { - "version": "7.23.3", + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.6.tgz", + "integrity": "sha512-qrSfCYxYQB5owCmGLbl8XRpX1ytXlpueOb0N0UmQwA073KZxejgQTzAmJezxvpwQD9uGtK2shHdi55QT+MbjIw==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/types": "^7.23.3", + "@babel/types": "^7.23.6", "@jridgewell/gen-mapping": "^0.3.2", "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" @@ -282,22 +784,36 @@ } }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.19.3", + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz", + "integrity": "sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/compat-data": "^7.19.3", - "@babel/helper-validator-option": "^7.18.6", - "browserslist": "^4.21.3", - "semver": "^6.3.0" + "@babel/compat-data": "^7.23.5", + "@babel/helper-validator-option": "^7.23.5", + "browserslist": "^4.22.2", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" }, "engines": { "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" } }, + "node_modules/@babel/helper-compilation-targets/node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true + }, "node_modules/@babel/helper-create-class-features-plugin": { "version": "7.19.0", "dev": true, @@ -403,32 +919,34 @@ } }, "node_modules/@babel/helper-module-imports": { - "version": "7.18.6", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz", + "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-transforms": { - "version": "7.19.0", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.3.tgz", + "integrity": "sha512-7bBs4ED9OmswdfDzpz4MpWgSrV7FXlc3zIagvLFjS5H+Mk7Snr21vQ6QwrsoCGMfNC4e4LQPdoULEt4ykz0SRQ==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-simple-access": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/helper-validator-identifier": "^7.18.6", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.19.0", - "@babel/types": "^7.19.0" + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-module-imports": "^7.22.15", + "@babel/helper-simple-access": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/helper-validator-identifier": "^7.22.20" }, "engines": { "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, "node_modules/@babel/helper-optimise-call-expression": { @@ -443,9 +961,10 @@ } }, "node_modules/@babel/helper-plugin-utils": { - "version": "7.19.0", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", + "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } @@ -483,11 +1002,12 @@ } }, "node_modules/@babel/helper-simple-access": { - "version": "7.18.6", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", + "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" @@ -516,9 +1036,10 @@ } }, "node_modules/@babel/helper-string-parser": { - "version": "7.22.5", + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz", + "integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } @@ -532,9 +1053,10 @@ } }, "node_modules/@babel/helper-validator-option": { - "version": "7.18.6", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz", + "integrity": "sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } @@ -554,22 +1076,24 @@ } }, "node_modules/@babel/helpers": { - "version": "7.19.0", + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.9.tgz", + "integrity": "sha512-87ICKgU5t5SzOT7sBMfCOZQ2rHjRU+Pcb9BoILMYz600W6DkVRLFBPwQ18gwUVvggqXivaUakpnxWQGbpywbBQ==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.19.0", - "@babel/types": "^7.19.0" + "@babel/template": "^7.23.9", + "@babel/traverse": "^7.23.9", + "@babel/types": "^7.23.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/highlight": { - "version": "7.22.20", + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz", + "integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==", "dev": true, - "license": "MIT", "dependencies": { "@babel/helper-validator-identifier": "^7.22.20", "chalk": "^2.4.2", @@ -581,8 +1105,9 @@ }, "node_modules/@babel/highlight/node_modules/ansi-styles": { "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", "dev": true, - "license": "MIT", "dependencies": { "color-convert": "^1.9.0" }, @@ -592,8 +1117,9 @@ }, "node_modules/@babel/highlight/node_modules/chalk": { "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", "dev": true, - "license": "MIT", "dependencies": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -605,37 +1131,42 @@ }, "node_modules/@babel/highlight/node_modules/color-convert": { "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", "dev": true, - "license": "MIT", "dependencies": { "color-name": "1.1.3" } }, "node_modules/@babel/highlight/node_modules/color-name": { "version": "1.1.3", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true }, "node_modules/@babel/highlight/node_modules/escape-string-regexp": { "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.8.0" } }, "node_modules/@babel/highlight/node_modules/has-flag": { "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", "dev": true, - "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/@babel/highlight/node_modules/supports-color": { "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", "dev": true, - "license": "MIT", "dependencies": { "has-flag": "^3.0.0" }, @@ -644,9 +1175,10 @@ } }, "node_modules/@babel/parser": { - "version": "7.23.3", + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.9.tgz", + "integrity": "sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA==", "dev": true, - "license": "MIT", "bin": { "parser": "bin/babel-parser.js" }, @@ -951,6 +1483,7 @@ "version": "7.8.3", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1051,6 +1584,7 @@ "version": "7.10.4", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -1562,20 +2096,6 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-react-constant-elements": { - "version": "7.18.12", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, "node_modules/@babel/plugin-transform-react-display-name": { "version": "7.18.6", "dev": true, @@ -1622,6 +2142,36 @@ "@babel/core": "^7.0.0-0" } }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.23.3.tgz", + "integrity": "sha512-qXRvbeKDSfwnlJnanVRp0SfuWE5DQhwQr5xtLBzp56Wabyo+4CMosF6Kfp+eOD/4FYpql64XVJ2W0pVLlJZxOQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.23.3.tgz", + "integrity": "sha512-91RS0MDnAWDNvGC6Wio5XYkyWI39FMFO+JK9+4AlgaTH+yWwVTsw7/sn6LK0lH7c5F+TFkpv/3LfCJ1Ydwof/g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, "node_modules/@babel/plugin-transform-react-pure-annotations": { "version": "7.18.6", "dev": true, @@ -1940,10 +2490,11 @@ } }, "node_modules/@babel/runtime": { - "version": "7.19.0", - "license": "MIT", + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.23.9.tgz", + "integrity": "sha512-0CX6F+BI2s9dkUqr08KFrAIZgNFj75rdBU/DjCyYLIaV/quFjkk6T+EJ2LkZHyZTbEV4L5p97mNkUsHl2wLFAw==", "dependencies": { - "regenerator-runtime": "^0.13.4" + "regenerator-runtime": "^0.14.0" }, "engines": { "node": ">=6.9.0" @@ -1961,33 +2512,40 @@ "node": ">=6.9.0" } }, + "node_modules/@babel/runtime/node_modules/regenerator-runtime": { + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" + }, "node_modules/@babel/template": { - "version": "7.22.15", + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.23.9.tgz", + "integrity": "sha512-+xrD2BWLpvHKNmX2QbpdpsBaWnRxahMwJjO+KZk2JOElj5nSmKezyS1B4u+QbHMTX69t4ukm6hh9lsYQ7GHCKA==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.22.13", - "@babel/parser": "^7.22.15", - "@babel/types": "^7.22.15" + "@babel/code-frame": "^7.23.5", + "@babel/parser": "^7.23.9", + "@babel/types": "^7.23.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.23.3", + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.9.tgz", + "integrity": "sha512-I/4UJ9vs90OkBtY6iiiTORVMyIhJ4kAVmsKo9KFc8UOxMeUfi2hvtIBsET5u9GizXE6/GFSuKCTNfgCswuEjRg==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.22.13", - "@babel/generator": "^7.23.3", + "@babel/code-frame": "^7.23.5", + "@babel/generator": "^7.23.6", "@babel/helper-environment-visitor": "^7.22.20", "@babel/helper-function-name": "^7.23.0", "@babel/helper-hoist-variables": "^7.22.5", "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/parser": "^7.23.3", - "@babel/types": "^7.23.3", - "debug": "^4.1.0", + "@babel/parser": "^7.23.9", + "@babel/types": "^7.23.9", + "debug": "^4.3.1", "globals": "^11.1.0" }, "engines": { @@ -2003,11 +2561,12 @@ } }, "node_modules/@babel/types": { - "version": "7.23.3", + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.9.tgz", + "integrity": "sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-string-parser": "^7.23.4", "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" }, @@ -2018,11 +2577,13 @@ "node_modules/@bcoe/v8-coverage": { "version": "0.2.3", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/@codemirror/autocomplete": { - "version": "6.11.1", - "license": "MIT", + "version": "6.12.0", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.12.0.tgz", + "integrity": "sha512-r4IjdYFthwbCQyvqnSlx0WBHRHi8nBvU+WjJxFUij81qsBfhNudf/XKKmmC2j3m0LaOYUQTf3qiEK1J8lO1sdg==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", @@ -2047,11 +2608,12 @@ } }, "node_modules/@codemirror/language": { - "version": "6.9.3", - "license": "MIT", + "version": "6.10.1", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.1.tgz", + "integrity": "sha512-5GrXzrhq6k+gL5fjkAwt90nYDmjlzTIJV8THnxNFtNKWotMIlzzN+CpqxqwXOECnUdOndmSeWntVrVcv5axWRQ==", "dependencies": { "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.0.0", + "@codemirror/view": "^6.23.0", "@lezer/common": "^1.1.0", "@lezer/highlight": "^1.0.0", "@lezer/lr": "^1.0.0", @@ -2059,8 +2621,9 @@ } }, "node_modules/@codemirror/lint": { - "version": "6.4.2", - "license": "MIT", + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.5.0.tgz", + "integrity": "sha512-+5YyicIaaAZKU8K43IQi8TBy6mF6giGeWAH7N96Z5LC30Wm5JMjqxOYIE9mxwMG1NbhT2mA3l9hA4uuKUM3E5g==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -2077,298 +2640,433 @@ } }, "node_modules/@codemirror/state": { - "version": "6.3.3", - "license": "MIT" + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.4.1.tgz", + "integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A==" + }, + "node_modules/@codemirror/theme-one-dark": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/@codemirror/theme-one-dark/-/theme-one-dark-6.1.2.tgz", + "integrity": "sha512-F+sH0X16j/qFLMAfbciKTxVOwkdAS336b7AXTKOZhy8BR3eH/RelsnLgLFINrpST63mmN2OuwUt0W2ndUgYwUA==", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "@lezer/highlight": "^1.0.0" + } }, "node_modules/@codemirror/view": { - "version": "6.22.1", - "license": "MIT", + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.24.1.tgz", + "integrity": "sha512-sBfP4rniPBRQzNakwuQEqjEuiJDWJyF2kqLLqij4WXRoVwPPJfjx966Eq3F7+OPQxDtMt/Q9MWLoZLWjeveBlg==", "dependencies": { - "@codemirror/state": "^6.1.4", + "@codemirror/state": "^6.4.0", "style-mod": "^4.1.0", "w3c-keyname": "^2.2.4" } }, - "node_modules/@csstools/normalize.css": { - "version": "12.0.0", + "node_modules/@esbuild/aix-ppc64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.19.12.tgz", + "integrity": "sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA==", + "cpu": [ + "ppc64" + ], "dev": true, - "license": "CC0-1.0" + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } }, - "node_modules/@csstools/postcss-cascade-layers": { - "version": "1.1.1", + "node_modules/@esbuild/android-arm": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.19.12.tgz", + "integrity": "sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.19.12.tgz", + "integrity": "sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.19.12.tgz", + "integrity": "sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz", + "integrity": "sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.19.12.tgz", + "integrity": "sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.12.tgz", + "integrity": "sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.19.12.tgz", + "integrity": "sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.19.12.tgz", + "integrity": "sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.19.12.tgz", + "integrity": "sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.19.12.tgz", + "integrity": "sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.19.12.tgz", + "integrity": "sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA==", + "cpu": [ + "loong64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.19.12.tgz", + "integrity": "sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w==", + "cpu": [ + "mips64el" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.19.12.tgz", + "integrity": "sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.19.12.tgz", + "integrity": "sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.19.12.tgz", + "integrity": "sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg==", + "cpu": [ + "s390x" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.19.12.tgz", + "integrity": "sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.19.12.tgz", + "integrity": "sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.19.12.tgz", + "integrity": "sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.19.12.tgz", + "integrity": "sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.19.12.tgz", + "integrity": "sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.19.12.tgz", + "integrity": "sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.19.12.tgz", + "integrity": "sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", "dev": true, - "license": "CC0-1.0", "dependencies": { - "@csstools/selector-specificity": "^2.0.2", - "postcss-selector-parser": "^6.0.10" + "eslint-visitor-keys": "^3.3.0" }, "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" }, "peerDependencies": { - "postcss": "^8.2" + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" } }, - "node_modules/@csstools/postcss-color-function": { - "version": "1.1.1", + "node_modules/@eslint-community/regexpp": { + "version": "4.10.0", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.10.0.tgz", + "integrity": "sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==", "dev": true, - "license": "CC0-1.0", - "dependencies": { - "@csstools/postcss-progressive-custom-properties": "^1.1.0", - "postcss-value-parser": "^4.2.0" - }, "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/@csstools/postcss-font-format-keywords": { - "version": "1.0.1", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/@csstools/postcss-hwb-function": { - "version": "1.0.2", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/@csstools/postcss-ic-unit": { - "version": "1.0.1", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "@csstools/postcss-progressive-custom-properties": "^1.1.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/@csstools/postcss-is-pseudo-class": { - "version": "2.0.7", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "@csstools/selector-specificity": "^2.0.0", - "postcss-selector-parser": "^6.0.10" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/@csstools/postcss-nested-calc": { - "version": "1.0.0", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/@csstools/postcss-normalize-display-values": { - "version": "1.0.1", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/@csstools/postcss-oklab-function": { - "version": "1.1.1", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "@csstools/postcss-progressive-custom-properties": "^1.1.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/@csstools/postcss-progressive-custom-properties": { - "version": "1.3.0", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.3" - } - }, - "node_modules/@csstools/postcss-stepped-value-functions": { - "version": "1.0.1", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/@csstools/postcss-text-decoration-shorthand": { - "version": "1.0.0", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/@csstools/postcss-trigonometric-functions": { - "version": "1.0.2", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/@csstools/postcss-unset-value": { - "version": "1.0.2", - "dev": true, - "license": "CC0-1.0", - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/@csstools/selector-specificity": { - "version": "2.0.2", - "dev": true, - "license": "CC0-1.0", - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2", - "postcss-selector-parser": "^6.0.10" + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" } }, "node_modules/@eslint/eslintrc": { - "version": "1.3.2", + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", "dev": true, - "license": "MIT", "dependencies": { "ajv": "^6.12.4", "debug": "^4.3.2", - "espree": "^9.4.0", - "globals": "^13.15.0", + "espree": "^9.6.0", + "globals": "^13.19.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", "js-yaml": "^4.1.0", @@ -2382,78 +3080,77 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/@forevolve/bootstrap-dark": { - "version": "2.1.1", - "license": "MIT", - "dependencies": { - "bootstrap": "^4.6.2", - "jquery": "^3.5.1", - "popper.js": "^1.16.1" - } - }, - "node_modules/@fortawesome/fontawesome-common-types": { - "version": "6.5.1", - "hasInstallScript": true, - "license": "MIT", + "node_modules/@eslint/js": { + "version": "8.56.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.56.0.tgz", + "integrity": "sha512-gMsVel9D7f2HLkBma9VbtzZRehRogVRfbr++f06nL2vnCGCNlzOD+/MUov/F4p8myyAHspEhVobgjpX64q5m6A==", + "dev": true, "engines": { - "node": ">=6" + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, - "node_modules/@fortawesome/fontawesome-svg-core": { - "version": "6.5.1", - "hasInstallScript": true, - "license": "MIT", + "node_modules/@floating-ui/core": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.0.tgz", + "integrity": "sha512-PcF++MykgmTj3CIyOQbKA/hDzOAiqI3mhuoN44WRCopIs1sgoDoU4oty4Jtqaj/y3oDU6fnVSm4QG0a3t5i0+g==", "dependencies": { - "@fortawesome/fontawesome-common-types": "6.5.1" - }, - "engines": { - "node": ">=6" + "@floating-ui/utils": "^0.2.1" } }, - "node_modules/@fortawesome/free-solid-svg-icons": { - "version": "6.5.1", - "hasInstallScript": true, - "license": "(CC-BY-4.0 AND MIT)", + "node_modules/@floating-ui/dom": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.3.tgz", + "integrity": "sha512-RnDthu3mzPlQ31Ss/BTwQ1zjzIhr3lk1gZB1OC56h/1vEtaXkESrOqL5fQVMfXpwGtRwX+YsZBdyHtJMQnkArw==", "dependencies": { - "@fortawesome/fontawesome-common-types": "6.5.1" - }, - "engines": { - "node": ">=6" + "@floating-ui/core": "^1.0.0", + "@floating-ui/utils": "^0.2.0" } }, - "node_modules/@fortawesome/react-fontawesome": { - "version": "0.2.0", - "license": "MIT", + "node_modules/@floating-ui/react": { + "version": "0.24.8", + "resolved": "https://registry.npmjs.org/@floating-ui/react/-/react-0.24.8.tgz", + "integrity": "sha512-AuYeDoaR8jtUlUXtZ1IJ/6jtBkGnSpJXbGNzokBL87VDJ8opMq1Bgrc0szhK482ReQY6KZsMoZCVSb4xwalkBA==", "dependencies": { - "prop-types": "^15.8.1" + "@floating-ui/react-dom": "^2.0.1", + "aria-hidden": "^1.2.3", + "tabbable": "^6.0.1" }, "peerDependencies": { - "@fortawesome/fontawesome-svg-core": "~1 || ~6", - "react": ">=16.3" + "react": ">=16.8.0", + "react-dom": ">=16.8.0" } }, - "node_modules/@humanwhocodes/config-array": { - "version": "0.10.7", - "dev": true, - "license": "Apache-2.0", + "node_modules/@floating-ui/react-dom": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.8.tgz", + "integrity": "sha512-HOdqOt3R3OGeTKidaLvJKcgg75S6tibQ3Tif4eyd91QnIJWr0NLvoXFpJA/j8HqkFSL68GDca9AuyWEHlhyClw==", "dependencies": { - "@humanwhocodes/object-schema": "^1.2.1", - "debug": "^4.1.1", - "minimatch": "^3.0.4" + "@floating-ui/dom": "^1.6.1" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@floating-ui/utils": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.1.tgz", + "integrity": "sha512-9TANp6GPoMtYzQdt54kfAyMmz1+osLlXdg2ENroU7zzrtflTLrrC/lgrIfaSe+Wu0b89GKccT7vxXA0MoAIO+Q==" + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.11.14", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", + "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "dev": true, + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", + "minimatch": "^3.0.5" }, "engines": { "node": ">=10.10.0" } }, - "node_modules/@humanwhocodes/gitignore-to-minimatch": { - "version": "1.0.2", - "dev": true, - "license": "Apache-2.0", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/nzakas" - } - }, "node_modules/@humanwhocodes/module-importer": { "version": "1.0.1", "dev": true, @@ -2467,26 +3164,16 @@ } }, "node_modules/@humanwhocodes/object-schema": { - "version": "1.2.1", - "dev": true, - "license": "BSD-3-Clause" - }, - "node_modules/@hypnosphi/create-react-context": { - "version": "0.3.1", - "license": "MIT", - "dependencies": { - "gud": "^1.0.0", - "warning": "^4.0.3" - }, - "peerDependencies": { - "prop-types": "^15.0.0", - "react": ">=0.14.0" - } + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.2.tgz", + "integrity": "sha512-6EwiSjwWYP7pTckG6I5eyFANjPhmPjUX9JRLUSfNPC7FX7zK9gyZAfUEaECL6ALTpGX5AjnBq3C9XmVWPitNpw==", + "dev": true }, "node_modules/@istanbuljs/load-nyc-config": { "version": "1.1.0", "dev": true, "license": "ISC", + "peer": true, "dependencies": { "camelcase": "^5.3.1", "find-up": "^4.1.0", @@ -2502,6 +3189,7 @@ "version": "1.0.10", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "sprintf-js": "~1.0.2" } @@ -2510,6 +3198,7 @@ "version": "4.1.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "locate-path": "^5.0.0", "path-exists": "^4.0.0" @@ -2522,6 +3211,7 @@ "version": "3.14.1", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -2534,6 +3224,7 @@ "version": "5.0.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "p-locate": "^4.1.0" }, @@ -2545,6 +3236,7 @@ "version": "2.3.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "p-try": "^2.0.0" }, @@ -2559,6 +3251,7 @@ "version": "4.1.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "p-limit": "^2.2.0" }, @@ -2570,6 +3263,7 @@ "version": "5.0.0", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -2578,6 +3272,7 @@ "version": "0.1.3", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -3311,6 +4006,8 @@ "version": "0.3.5", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@jridgewell/gen-mapping": "^0.3.0", "@jridgewell/trace-mapping": "^0.3.9" @@ -3320,6 +4017,8 @@ "version": "0.3.3", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@jridgewell/set-array": "^1.0.1", "@jridgewell/sourcemap-codec": "^1.4.10", @@ -3343,14 +4042,10 @@ "@jridgewell/sourcemap-codec": "^1.4.14" } }, - "node_modules/@leichtgewicht/ip-codec": { - "version": "2.0.4", - "dev": true, - "license": "MIT" - }, "node_modules/@lezer/common": { - "version": "1.1.1", - "license": "MIT" + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.1.tgz", + "integrity": "sha512-yemX0ZD2xS/73llMZIK6KplkjIjf2EvAHcinDi/TfJ9hS25G0388+ClHt6/3but0oOxinTcQHJLDXh6w1crzFQ==" }, "node_modules/@lezer/generator": { "version": "1.5.1", @@ -3378,17 +4073,6 @@ "@lezer/common": "^1.0.0" } }, - "node_modules/@nexucis/fuzzy": { - "version": "0.4.1", - "license": "MIT" - }, - "node_modules/@nexucis/kvsearch": { - "version": "0.8.1", - "license": "MIT", - "dependencies": { - "@nexucis/fuzzy": "^0.4.1" - } - }, "node_modules/@nicolo-ribaudo/eslint-scope-5-internals": { "version": "5.1.1-v1", "dev": true, @@ -3449,67 +4133,6 @@ "node": ">= 8" } }, - "node_modules/@pmmmwh/react-refresh-webpack-plugin": { - "version": "0.5.7", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-html-community": "^0.0.8", - "common-path-prefix": "^3.0.0", - "core-js-pure": "^3.8.1", - "error-stack-parser": "^2.0.6", - "find-up": "^5.0.0", - "html-entities": "^2.1.0", - "loader-utils": "^2.0.0", - "schema-utils": "^3.0.0", - "source-map": "^0.7.3" - }, - "engines": { - "node": ">= 10.13" - }, - "peerDependencies": { - "@types/webpack": "4.x || 5.x", - "react-refresh": ">=0.10.0 <1.0.0", - "sockjs-client": "^1.4.0", - "type-fest": ">=0.17.0 <3.0.0", - "webpack": ">=4.43.0 <6.0.0", - "webpack-dev-server": "3.x || 4.x", - "webpack-hot-middleware": "2.x", - "webpack-plugin-serve": "0.x || 1.x" - }, - "peerDependenciesMeta": { - "@types/webpack": { - "optional": true - }, - "sockjs-client": { - "optional": true - }, - "type-fest": { - "optional": true - }, - "webpack-dev-server": { - "optional": true - }, - "webpack-hot-middleware": { - "optional": true - }, - "webpack-plugin-serve": { - "optional": true - } - } - }, - "node_modules/@pmmmwh/react-refresh-webpack-plugin/node_modules/source-map": { - "version": "0.7.4", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">= 8" - } - }, - "node_modules/@prometheus-io/app": { - "resolved": "react-app", - "link": true - }, "node_modules/@prometheus-io/codemirror-promql": { "resolved": "module/codemirror-promql", "link": true @@ -3518,79 +4141,233 @@ "resolved": "module/lezer-promql", "link": true }, - "node_modules/@rollup/plugin-babel": { - "version": "5.3.1", + "node_modules/@prometheus-io/mantine-ui": { + "resolved": "mantine-ui", + "link": true + }, + "node_modules/@remix-run/router": { + "version": "1.15.1", + "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.15.1.tgz", + "integrity": "sha512-zcU0gM3z+3iqj8UX45AmWY810l3oUmXM7uH4dt5xtzvMhRtYVhKGOmgOd1877dOPPepfCjUv57w+syamWIYe7w==", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@rollup/plugin-node-resolve": { + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.2.3.tgz", + "integrity": "sha512-j/lym8nf5E21LwBT4Df1VD6hRO2L2iwUeUmP7litikRsVp1H6NWx20NEp0Y7su+7XGc476GnXXc4kFeZNGmaSQ==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/helper-module-imports": "^7.10.4", - "@rollup/pluginutils": "^3.1.0" + "@rollup/pluginutils": "^5.0.1", + "@types/resolve": "1.20.2", + "deepmerge": "^4.2.2", + "is-builtin-module": "^3.2.1", + "is-module": "^1.0.0", + "resolve": "^1.22.1" }, "engines": { - "node": ">= 10.0.0" + "node": ">=14.0.0" }, "peerDependencies": { - "@babel/core": "^7.0.0", - "@types/babel__core": "^7.1.9", - "rollup": "^1.20.0||^2.0.0" + "rollup": "^2.78.0||^3.0.0||^4.0.0" }, "peerDependenciesMeta": { - "@types/babel__core": { + "rollup": { "optional": true } } }, - "node_modules/@rollup/plugin-node-resolve": { - "version": "11.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@rollup/pluginutils": "^3.1.0", - "@types/resolve": "1.17.1", - "builtin-modules": "^3.1.0", - "deepmerge": "^4.2.2", - "is-module": "^1.0.0", - "resolve": "^1.19.0" - }, - "engines": { - "node": ">= 10.0.0" - }, - "peerDependencies": { - "rollup": "^1.20.0||^2.0.0" - } - }, - "node_modules/@rollup/plugin-replace": { - "version": "2.4.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@rollup/pluginutils": "^3.1.0", - "magic-string": "^0.25.7" - }, - "peerDependencies": { - "rollup": "^1.20.0 || ^2.0.0" - } - }, "node_modules/@rollup/pluginutils": { - "version": "3.1.0", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.1.0.tgz", + "integrity": "sha512-XTIWOPPcpvyKI6L1NHo0lFlCyznUEyPmPY1mc3KpPVDYulHSTvyeLNVW00QTLIAFNhR3kYnJTQHeGqU4M3n09g==", "dev": true, - "license": "MIT", "dependencies": { - "@types/estree": "0.0.39", - "estree-walker": "^1.0.1", - "picomatch": "^2.2.2" + "@types/estree": "^1.0.0", + "estree-walker": "^2.0.2", + "picomatch": "^2.3.1" }, "engines": { - "node": ">= 8.0.0" + "node": ">=14.0.0" }, "peerDependencies": { - "rollup": "^1.20.0||^2.0.0" + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } } }, - "node_modules/@rollup/pluginutils/node_modules/@types/estree": { - "version": "0.0.39", + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.12.0.tgz", + "integrity": "sha512-+ac02NL/2TCKRrJu2wffk1kZ+RyqxVUlbjSagNgPm94frxtr+XDL12E5Ll1enWskLrtrZ2r8L3wED1orIibV/w==", + "cpu": [ + "arm" + ], "dev": true, - "license": "MIT" + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.12.0.tgz", + "integrity": "sha512-OBqcX2BMe6nvjQ0Nyp7cC90cnumt8PXmO7Dp3gfAju/6YwG0Tj74z1vKrfRz7qAv23nBcYM8BCbhrsWqO7PzQQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.12.0.tgz", + "integrity": "sha512-X64tZd8dRE/QTrBIEs63kaOBG0b5GVEd3ccoLtyf6IdXtHdh8h+I56C2yC3PtC9Ucnv0CpNFJLqKFVgCYe0lOQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.12.0.tgz", + "integrity": "sha512-cc71KUZoVbUJmGP2cOuiZ9HSOP14AzBAThn3OU+9LcA1+IUqswJyR1cAJj3Mg55HbjZP6OLAIscbQsQLrpgTOg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.12.0.tgz", + "integrity": "sha512-a6w/Y3hyyO6GlpKL2xJ4IOh/7d+APaqLYdMf86xnczU3nurFTaVN9s9jOXQg97BE4nYm/7Ga51rjec5nfRdrvA==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.12.0.tgz", + "integrity": "sha512-0fZBq27b+D7Ar5CQMofVN8sggOVhEtzFUwOwPppQt0k+VR+7UHMZZY4y+64WJ06XOhBTKXtQB/Sv0NwQMXyNAA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.12.0.tgz", + "integrity": "sha512-eTvzUS3hhhlgeAv6bfigekzWZjaEX9xP9HhxB0Dvrdbkk5w/b+1Sxct2ZuDxNJKzsRStSq1EaEkVSEe7A7ipgQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.12.0.tgz", + "integrity": "sha512-ix+qAB9qmrCRiaO71VFfY8rkiAZJL8zQRXveS27HS+pKdjwUfEhqo2+YF2oI+H/22Xsiski+qqwIBxVewLK7sw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.12.0.tgz", + "integrity": "sha512-TenQhZVOtw/3qKOPa7d+QgkeM6xY0LtwzR8OplmyL5LrgTWIXpTQg2Q2ycBf8jm+SFW2Wt/DTn1gf7nFp3ssVA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.12.0.tgz", + "integrity": "sha512-LfFdRhNnW0zdMvdCb5FNuWlls2WbbSridJvxOvYWgSBOYZtgBfW9UGNJG//rwMqTX1xQE9BAodvMH9tAusKDUw==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.12.0.tgz", + "integrity": "sha512-JPDxovheWNp6d7AHCgsUlkuCKvtu3RB55iNEkaQcf0ttsDU/JZF+iQnYcQJSk/7PtT4mjjVG8N1kpwnI9SLYaw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.12.0.tgz", + "integrity": "sha512-fjtuvMWRGJn1oZacG8IPnzIV6GF2/XG+h71FKn76OYFqySXInJtseAqdprVTDTyqPxQOG9Exak5/E9Z3+EJ8ZA==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.12.0.tgz", + "integrity": "sha512-ZYmr5mS2wd4Dew/JjT0Fqi2NPB/ZhZ2VvPp7SmvPZb4Y1CG/LRcS6tcRo2cYU7zLK5A7cdbhWnnWmUjoI4qapg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] }, "node_modules/@rushstack/eslint-patch": { "version": "1.2.0", @@ -3606,6 +4383,7 @@ "version": "1.8.3", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "type-detect": "4.0.8" } @@ -3614,313 +4392,53 @@ "version": "9.1.2", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "@sinonjs/commons": "^1.7.0" } }, - "node_modules/@sinonjs/samsam": { - "version": "7.0.1", - "dev": true, - "license": "BSD-3-Clause", + "node_modules/@tabler/icons": { + "version": "2.47.0", + "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-2.47.0.tgz", + "integrity": "sha512-4w5evLh+7FUUiA1GucvGj2ReX2TvOjEr4ejXdwL/bsjoSkof6r1gQmzqI+VHrE2CpJpB3al7bCTulOkFa/RcyA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/codecalm" + } + }, + "node_modules/@tabler/icons-react": { + "version": "2.47.0", + "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-2.47.0.tgz", + "integrity": "sha512-iqly2FvCF/qUbgmvS8E40rVeYY7laltc5GUjRxQj59DuX0x/6CpKHTXt86YlI2whg4czvd/c8Ce8YR08uEku0g==", "dependencies": { - "@sinonjs/commons": "^2.0.0", - "lodash.get": "^4.4.2", - "type-detect": "^4.0.8" - } - }, - "node_modules/@sinonjs/samsam/node_modules/@sinonjs/commons": { - "version": "2.0.0", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "type-detect": "4.0.8" - } - }, - "node_modules/@sinonjs/text-encoding": { - "version": "0.7.2", - "dev": true, - "license": "(Unlicense OR Apache-2.0)" - }, - "node_modules/@surma/rollup-plugin-off-main-thread": { - "version": "2.2.3", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "ejs": "^3.1.6", - "json5": "^2.2.0", - "magic-string": "^0.25.0", - "string.prototype.matchall": "^4.0.6" - } - }, - "node_modules/@svgr/babel-plugin-add-jsx-attribute": { - "version": "5.4.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" + "@tabler/icons": "2.47.0", + "prop-types": "^15.7.2" }, "funding": { "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { - "version": "5.4.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { - "version": "5.0.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { - "version": "5.0.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/babel-plugin-svg-dynamic-title": { - "version": "5.4.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/babel-plugin-svg-em-dimensions": { - "version": "5.4.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/babel-plugin-transform-react-native-svg": { - "version": "5.4.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/babel-plugin-transform-svg-component": { - "version": "5.5.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/babel-preset": { - "version": "5.5.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@svgr/babel-plugin-add-jsx-attribute": "^5.4.0", - "@svgr/babel-plugin-remove-jsx-attribute": "^5.4.0", - "@svgr/babel-plugin-remove-jsx-empty-expression": "^5.0.1", - "@svgr/babel-plugin-replace-jsx-attribute-value": "^5.0.1", - "@svgr/babel-plugin-svg-dynamic-title": "^5.4.0", - "@svgr/babel-plugin-svg-em-dimensions": "^5.4.0", - "@svgr/babel-plugin-transform-react-native-svg": "^5.4.0", - "@svgr/babel-plugin-transform-svg-component": "^5.5.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/core": { - "version": "5.5.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@svgr/plugin-jsx": "^5.5.0", - "camelcase": "^6.2.0", - "cosmiconfig": "^7.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/core/node_modules/camelcase": { - "version": "6.3.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@svgr/hast-util-to-babel-ast": { - "version": "5.5.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/types": "^7.12.6" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/plugin-jsx": { - "version": "5.5.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/core": "^7.12.3", - "@svgr/babel-preset": "^5.5.0", - "@svgr/hast-util-to-babel-ast": "^5.5.0", - "svg-parser": "^2.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/plugin-svgo": { - "version": "5.5.0", - "dev": true, - "license": "MIT", - "dependencies": { - "cosmiconfig": "^7.0.0", - "deepmerge": "^4.2.2", - "svgo": "^1.2.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@svgr/webpack": { - "version": "5.5.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/core": "^7.12.3", - "@babel/plugin-transform-react-constant-elements": "^7.12.1", - "@babel/preset-env": "^7.12.1", - "@babel/preset-react": "^7.12.5", - "@svgr/core": "^5.5.0", - "@svgr/plugin-jsx": "^5.5.0", - "@svgr/plugin-svgo": "^5.5.0", - "loader-utils": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/gregberge" - } - }, - "node_modules/@testing-library/react-hooks": { - "version": "7.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.12.5", - "@types/react": ">=16.9.0", - "@types/react-dom": ">=16.9.0", - "@types/react-test-renderer": ">=16.9.0", - "react-error-boundary": "^3.1.0" - }, - "engines": { - "node": ">=12" + "url": "https://github.com/sponsors/codecalm" }, "peerDependencies": { - "react": ">=16.9.0", - "react-dom": ">=16.9.0", - "react-test-renderer": ">=16.9.0" - }, - "peerDependenciesMeta": { - "react-dom": { - "optional": true - }, - "react-test-renderer": { - "optional": true - } + "react": "^16.5.1 || ^17.0.0 || ^18.0.0" } }, - "node_modules/@tootallnate/once": { - "version": "1.1.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, - "node_modules/@trysound/sax": { - "version": "0.2.0", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=10.13.0" + "node_modules/@tanstack/query-core": { + "version": "5.22.2", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.22.2.tgz", + "integrity": "sha512-z3PwKFUFACMUqe1eyesCIKg3Jv1mysSrYfrEW5ww5DCDUD4zlpTKBvUDaEjsfZzL3ULrFLDM9yVUxI/fega1Qg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" } }, "node_modules/@types/babel__core": { - "version": "7.1.19", + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/parser": "^7.1.0", - "@babel/types": "^7.0.0", + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", "@types/babel__generator": "*", "@types/babel__template": "*", "@types/babel__traverse": "*" @@ -3951,140 +4469,27 @@ "@babel/types": "^7.3.0" } }, - "node_modules/@types/body-parser": { - "version": "1.19.2", - "devOptional": true, - "license": "MIT", - "dependencies": { - "@types/connect": "*", - "@types/node": "*" - } - }, - "node_modules/@types/bonjour": { - "version": "3.5.10", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/cheerio": { - "version": "0.22.31", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/connect": { - "version": "3.4.35", - "devOptional": true, - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/connect-history-api-fallback": { - "version": "1.3.5", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/express-serve-static-core": "*", - "@types/node": "*" - } - }, - "node_modules/@types/enzyme": { - "version": "3.10.18", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/cheerio": "*", - "@types/react": "^16" - } - }, - "node_modules/@types/enzyme/node_modules/@types/react": { - "version": "16.14.42", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/prop-types": "*", - "@types/scheduler": "*", - "csstype": "^3.0.2" - } - }, "node_modules/@types/eslint": { - "version": "8.4.6", + "version": "8.56.2", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.56.2.tgz", + "integrity": "sha512-uQDwm1wFHmbBbCZCqAlq6Do9LYwByNZHWzXppSnay9SuwJ+VRbjkbLABer54kcPnMSlG6Fdiy2yaFXm/z9Z5gw==", "dev": true, - "license": "MIT", "dependencies": { "@types/estree": "*", "@types/json-schema": "*" } }, - "node_modules/@types/eslint-scope": { - "version": "3.7.4", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/eslint": "*", - "@types/estree": "*" - } - }, "node_modules/@types/estree": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/express": { - "version": "4.17.14", - "devOptional": true, - "license": "MIT", - "dependencies": { - "@types/body-parser": "*", - "@types/express-serve-static-core": "^4.17.18", - "@types/qs": "*", - "@types/serve-static": "*" - } - }, - "node_modules/@types/express-serve-static-core": { - "version": "4.17.31", - "devOptional": true, - "license": "MIT", - "dependencies": { - "@types/node": "*", - "@types/qs": "*", - "@types/range-parser": "*" - } - }, - "node_modules/@types/flot": { - "version": "0.0.36", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/jquery": "*" - } + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==", + "dev": true }, "node_modules/@types/graceful-fs": { "version": "4.1.5", "dev": true, "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/history": { - "version": "4.7.11", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/html-minifier-terser": { - "version": "6.1.0", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/http-proxy": { - "version": "1.17.9", - "license": "MIT", + "peer": true, "dependencies": { "@types/node": "*" } @@ -4148,31 +4553,20 @@ "dev": true, "license": "MIT" }, - "node_modules/@types/jquery": { - "version": "3.5.29", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/sizzle": "*" - } - }, "node_modules/@types/json-schema": { - "version": "7.0.11", - "dev": true, - "license": "MIT" + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true }, "node_modules/@types/json5": { "version": "0.0.29", "dev": true, "license": "MIT" }, - "node_modules/@types/mime": { - "version": "3.0.1", - "devOptional": true, - "license": "MIT" - }, "node_modules/@types/node": { "version": "20.10.4", + "dev": true, "license": "MIT", "dependencies": { "undici-types": "~5.26.4" @@ -4186,186 +4580,47 @@ "node_modules/@types/prettier": { "version": "2.7.1", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/@types/prop-types": { "version": "15.7.5", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/q": { - "version": "1.5.5", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/qs": { - "version": "6.9.7", - "devOptional": true, - "license": "MIT" - }, - "node_modules/@types/range-parser": { - "version": "1.2.4", "devOptional": true, "license": "MIT" }, "node_modules/@types/react": { "version": "17.0.71", - "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@types/prop-types": "*", "@types/scheduler": "*", "csstype": "^3.0.2" } }, - "node_modules/@types/react-copy-to-clipboard": { - "version": "5.0.7", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/react": "*" - } - }, - "node_modules/@types/react-dom": { - "version": "17.0.25", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/react": "^17" - } - }, - "node_modules/@types/react-router": { - "version": "5.1.19", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/history": "^4.7.11", - "@types/react": "*" - } - }, - "node_modules/@types/react-router-dom": { - "version": "5.3.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router": "*" - } - }, - "node_modules/@types/react-test-renderer": { - "version": "18.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/react": "*" - } - }, "node_modules/@types/resolve": { - "version": "1.17.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/retry": { - "version": "0.12.0", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/sanitize-html": { - "version": "2.9.5", - "dev": true, - "license": "MIT", - "dependencies": { - "htmlparser2": "^8.0.0" - } - }, - "node_modules/@types/sanitize-html/node_modules/htmlparser2": { - "version": "8.0.1", - "dev": true, - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "MIT", - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "domutils": "^3.0.1", - "entities": "^4.3.0" - } + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.20.2.tgz", + "integrity": "sha512-60BCwRFOZCQhDncwQdxxeOEEkbc5dIMccYLwbxsS4TUNeVECQ/pBJ0j09mrHOl/JJvpRPGwO9SvE4nR2Nb/a4Q==", + "dev": true }, "node_modules/@types/scheduler": { "version": "0.16.2", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/serve-index": { - "version": "1.9.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/express": "*" - } - }, - "node_modules/@types/serve-static": { - "version": "1.15.0", "devOptional": true, - "license": "MIT", - "dependencies": { - "@types/mime": "*", - "@types/node": "*" - } - }, - "node_modules/@types/sinon": { - "version": "10.0.20", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/sinonjs__fake-timers": "*" - } - }, - "node_modules/@types/sinonjs__fake-timers": { - "version": "8.1.2", - "dev": true, "license": "MIT" }, - "node_modules/@types/sizzle": { - "version": "2.3.3", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/sockjs": { - "version": "0.3.33", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/node": "*" - } + "node_modules/@types/semver": { + "version": "7.5.7", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.7.tgz", + "integrity": "sha512-/wdoPq1QqkSj9/QOeKkFquEuPzQbHTWAMPH/PaUMB+JuR31lXhlWXRZ52IpfDYVlDOUBvX09uBrPwxGT1hjNBg==", + "dev": true }, "node_modules/@types/stack-utils": { "version": "2.0.1", "dev": true, "license": "MIT" }, - "node_modules/@types/trusted-types": { - "version": "2.0.2", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/ws": { - "version": "8.5.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, "node_modules/@types/yargs": { "version": "17.0.13", "dev": true, @@ -4621,200 +4876,89 @@ "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@webassemblyjs/ast": { - "version": "1.11.6", - "dev": true, - "license": "MIT", + "node_modules/@uiw/codemirror-extensions-basic-setup": { + "version": "4.21.22", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.21.22.tgz", + "integrity": "sha512-Lxq2EitQb/MwbNrMHBmVdSIR96WmaICnYBYeZbLUxmr4kQcbrA6HXqNSNZJ0V4ZihPfKnNs9+g87QK0HsadE6A==", "dependencies": { - "@webassemblyjs/helper-numbers": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6" - } - }, - "node_modules/@webassemblyjs/floating-point-hex-parser": { - "version": "1.11.6", - "dev": true, - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-api-error": { - "version": "1.11.6", - "dev": true, - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-buffer": { - "version": "1.11.6", - "dev": true, - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-numbers": { - "version": "1.11.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@webassemblyjs/floating-point-hex-parser": "1.11.6", - "@webassemblyjs/helper-api-error": "1.11.6", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/helper-wasm-bytecode": { - "version": "1.11.6", - "dev": true, - "license": "MIT" - }, - "node_modules/@webassemblyjs/helper-wasm-section": { - "version": "1.11.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-buffer": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/wasm-gen": "1.11.6" - } - }, - "node_modules/@webassemblyjs/ieee754": { - "version": "1.11.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@xtuc/ieee754": "^1.2.0" - } - }, - "node_modules/@webassemblyjs/leb128": { - "version": "1.11.6", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/utf8": { - "version": "1.11.6", - "dev": true, - "license": "MIT" - }, - "node_modules/@webassemblyjs/wasm-edit": { - "version": "1.11.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-buffer": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/helper-wasm-section": "1.11.6", - "@webassemblyjs/wasm-gen": "1.11.6", - "@webassemblyjs/wasm-opt": "1.11.6", - "@webassemblyjs/wasm-parser": "1.11.6", - "@webassemblyjs/wast-printer": "1.11.6" - } - }, - "node_modules/@webassemblyjs/wasm-gen": { - "version": "1.11.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/ieee754": "1.11.6", - "@webassemblyjs/leb128": "1.11.6", - "@webassemblyjs/utf8": "1.11.6" - } - }, - "node_modules/@webassemblyjs/wasm-opt": { - "version": "1.11.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-buffer": "1.11.6", - "@webassemblyjs/wasm-gen": "1.11.6", - "@webassemblyjs/wasm-parser": "1.11.6" - } - }, - "node_modules/@webassemblyjs/wasm-parser": { - "version": "1.11.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-api-error": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/ieee754": "1.11.6", - "@webassemblyjs/leb128": "1.11.6", - "@webassemblyjs/utf8": "1.11.6" - } - }, - "node_modules/@webassemblyjs/wast-printer": { - "version": "1.11.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@wojtekmaj/enzyme-adapter-react-17": { - "version": "0.8.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@wojtekmaj/enzyme-adapter-utils": "^0.2.0", - "enzyme-shallow-equal": "^1.0.0", - "has": "^1.0.0", - "prop-types": "^15.7.0", - "react-is": "^17.0.0", - "react-test-renderer": "^17.0.0" + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/commands": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/lint": "^6.0.0", + "@codemirror/search": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0" }, "funding": { - "url": "https://github.com/wojtekmaj/enzyme-adapter-react-17?sponsor=1" + "url": "https://jaywcjlove.github.io/#/sponsor" }, "peerDependencies": { - "enzyme": "^3.0.0", - "react": "^17.0.0-0", - "react-dom": "^17.0.0-0" + "@codemirror/autocomplete": ">=6.0.0", + "@codemirror/commands": ">=6.0.0", + "@codemirror/language": ">=6.0.0", + "@codemirror/lint": ">=6.0.0", + "@codemirror/search": ">=6.0.0", + "@codemirror/state": ">=6.0.0", + "@codemirror/view": ">=6.0.0" } }, - "node_modules/@wojtekmaj/enzyme-adapter-utils": { - "version": "0.2.0", - "dev": true, - "license": "MIT", + "node_modules/@uiw/react-codemirror": { + "version": "4.21.22", + "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.21.22.tgz", + "integrity": "sha512-VmxU9oRXwcleG2u5Ui2xVXaLVPL8cBuRN3vA41hlu4OQ/ftJb+4p+dBd6bZ+NJKSXm3LufbPGzu8oKwNO4tG4A==", "dependencies": { - "function.prototype.name": "^1.1.0", - "has": "^1.0.0", - "object.fromentries": "^2.0.0", - "prop-types": "^15.7.0" + "@babel/runtime": "^7.18.6", + "@codemirror/commands": "^6.1.0", + "@codemirror/state": "^6.1.1", + "@codemirror/theme-one-dark": "^6.0.0", + "@uiw/codemirror-extensions-basic-setup": "4.21.22", + "codemirror": "^6.0.0" }, "funding": { - "url": "https://github.com/wojtekmaj/enzyme-adapter-utils?sponsor=1" + "url": "https://jaywcjlove.github.io/#/sponsor" }, "peerDependencies": { - "react": "^17.0.0-0" + "@babel/runtime": ">=7.11.0", + "@codemirror/state": ">=6.0.0", + "@codemirror/theme-one-dark": ">=6.0.0", + "@codemirror/view": ">=6.0.0", + "codemirror": ">=6.0.0", + "react": ">=16.8.0", + "react-dom": ">=16.8.0" } }, - "node_modules/@xtuc/ieee754": { + "node_modules/@ungap/structured-clone": { "version": "1.2.0", - "dev": true, - "license": "BSD-3-Clause" + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "dev": true }, - "node_modules/@xtuc/long": { - "version": "4.2.2", + "node_modules/@vitejs/plugin-react": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.2.1.tgz", + "integrity": "sha512-oojO9IDc4nCUUi8qIR11KoQm0XFFLIwsRBwHRR4d/88IWghn1y6ckz/bJ8GHDCsYEJee8mDzqtJxh15/cisJNQ==", "dev": true, - "license": "Apache-2.0" - }, - "node_modules/abab": { - "version": "2.0.6", - "dev": true, - "license": "BSD-3-Clause" - }, - "node_modules/accepts": { - "version": "1.3.8", - "dev": true, - "license": "MIT", "dependencies": { - "mime-types": "~2.1.34", - "negotiator": "0.6.3" + "@babel/core": "^7.23.5", + "@babel/plugin-transform-react-jsx-self": "^7.23.3", + "@babel/plugin-transform-react-jsx-source": "^7.23.3", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.14.0" }, "engines": { - "node": ">= 0.6" + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0" + } + }, + "node_modules/@vitejs/plugin-react/node_modules/react-refresh": { + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.14.0.tgz", + "integrity": "sha512-wViHqhAd8OHeLS/IRMJjTSDHF3U9eWi62F/MledQGPdJGDhodXJ9PBLNGr6WWL7qlH12Mt3TyTpbS+hGXMjCzQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" } }, "node_modules/acorn": { @@ -4828,102 +4972,15 @@ "node": ">=0.4.0" } }, - "node_modules/acorn-globals": { - "version": "6.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "acorn": "^7.1.1", - "acorn-walk": "^7.1.1" - } - }, - "node_modules/acorn-globals/node_modules/acorn": { - "version": "7.4.1", - "dev": true, - "license": "MIT", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/acorn-import-assertions": { - "version": "1.9.0", - "dev": true, - "license": "MIT", - "peerDependencies": { - "acorn": "^8" - } - }, "node_modules/acorn-jsx": { "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", "dev": true, - "license": "MIT", "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, - "node_modules/acorn-node": { - "version": "1.8.2", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "acorn": "^7.0.0", - "acorn-walk": "^7.0.0", - "xtend": "^4.0.2" - } - }, - "node_modules/acorn-node/node_modules/acorn": { - "version": "7.4.1", - "dev": true, - "license": "MIT", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/acorn-walk": { - "version": "7.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/address": { - "version": "1.2.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/adjust-sourcemap-loader": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "loader-utils": "^2.0.0", - "regex-parser": "^2.2.11" - }, - "engines": { - "node": ">=8.9" - } - }, - "node_modules/agent-base": { - "version": "6.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "debug": "4" - }, - "engines": { - "node": ">= 6.0.0" - } - }, "node_modules/ajv": { "version": "6.12.6", "dev": true, @@ -4939,54 +4996,11 @@ "url": "https://github.com/sponsors/epoberezkin" } }, - "node_modules/ajv-formats": { - "version": "2.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "ajv": "^8.0.0" - }, - "peerDependencies": { - "ajv": "^8.0.0" - }, - "peerDependenciesMeta": { - "ajv": { - "optional": true - } - } - }, - "node_modules/ajv-formats/node_modules/ajv": { - "version": "8.11.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/ajv-formats/node_modules/json-schema-traverse": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/ajv-keywords": { - "version": "3.5.2", - "dev": true, - "license": "MIT", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, "node_modules/ansi-escapes": { "version": "4.3.2", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "type-fest": "^0.21.3" }, @@ -5001,6 +5015,7 @@ "version": "0.21.3", "dev": true, "license": "(MIT OR CC0-1.0)", + "peer": true, "engines": { "node": ">=10" }, @@ -5008,17 +5023,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/ansi-html-community": { - "version": "0.0.8", - "dev": true, - "engines": [ - "node >= 0.8.0" - ], - "license": "Apache-2.0", - "bin": { - "ansi-html": "bin/ansi-html" - } - }, "node_modules/ansi-regex": { "version": "5.0.1", "dev": true, @@ -5043,7 +5047,9 @@ }, "node_modules/anymatch": { "version": "3.1.2", + "dev": true, "license": "ISC", + "peer": true, "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" @@ -5052,15 +5058,22 @@ "node": ">= 8" } }, - "node_modules/arg": { - "version": "5.0.2", - "dev": true, - "license": "MIT" - }, "node_modules/argparse": { "version": "2.0.1", - "dev": true, - "license": "Python-2.0" + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true + }, + "node_modules/aria-hidden": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.3.tgz", + "integrity": "sha512-xcLxITLe2HYa1cnYnwCjkOO1PqUHQpozB8x9AR0OgWN2woOBi5kSDVxKfd0b7sb1hw5qFeJhXm9H1nu3xSfLeQ==", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + } }, "node_modules/aria-query": { "version": "4.2.2", @@ -5074,11 +5087,6 @@ "node": ">=6.0" } }, - "node_modules/array-flatten": { - "version": "2.1.2", - "dev": true, - "license": "MIT" - }, "node_modules/array-includes": { "version": "3.1.5", "dev": true, @@ -5105,24 +5113,6 @@ "node": ">=8" } }, - "node_modules/array.prototype.filter": { - "version": "1.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.0", - "es-array-method-boxes-properly": "^1.0.0", - "is-string": "^1.0.7" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/array.prototype.flat": { "version": "1.3.0", "dev": true, @@ -5157,84 +5147,11 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/array.prototype.reduce": { - "version": "1.0.4", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.2", - "es-array-method-boxes-properly": "^1.0.0", - "is-string": "^1.0.7" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/asap": { - "version": "2.0.6", - "dev": true, - "license": "MIT" - }, "node_modules/ast-types-flow": { "version": "0.0.7", "dev": true, "license": "ISC" }, - "node_modules/async": { - "version": "3.2.4", - "dev": true, - "license": "MIT" - }, - "node_modules/asynckit": { - "version": "0.4.0", - "dev": true, - "license": "MIT" - }, - "node_modules/at-least-node": { - "version": "1.0.0", - "dev": true, - "license": "ISC", - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/autoprefixer": { - "version": "10.4.12", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/autoprefixer" - } - ], - "license": "MIT", - "dependencies": { - "browserslist": "^4.21.4", - "caniuse-lite": "^1.0.30001407", - "fraction.js": "^4.2.0", - "normalize-range": "^0.1.2", - "picocolors": "^1.0.0", - "postcss-value-parser": "^4.2.0" - }, - "bin": { - "autoprefixer": "bin/autoprefixer" - }, - "engines": { - "node": "^10 || ^12 || >=14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, "node_modules/axe-core": { "version": "4.4.3", "dev": true, @@ -5269,41 +5186,6 @@ "@babel/core": "^7.8.0" } }, - "node_modules/babel-loader": { - "version": "8.2.5", - "dev": true, - "license": "MIT", - "dependencies": { - "find-cache-dir": "^3.3.1", - "loader-utils": "^2.0.0", - "make-dir": "^3.1.0", - "schema-utils": "^2.6.5" - }, - "engines": { - "node": ">= 8.9" - }, - "peerDependencies": { - "@babel/core": "^7.0.0", - "webpack": ">=2" - } - }, - "node_modules/babel-loader/node_modules/schema-utils": { - "version": "2.7.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.5", - "ajv": "^6.12.4", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 8.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, "node_modules/babel-plugin-dynamic-import-node": { "version": "2.3.3", "dev": true, @@ -5316,6 +5198,7 @@ "version": "6.1.1", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.0.0", "@istanbuljs/load-nyc-config": "^1.0.0", @@ -5356,14 +5239,6 @@ "npm": ">=6" } }, - "node_modules/babel-plugin-named-asset-import": { - "version": "0.3.8", - "dev": true, - "license": "MIT", - "peerDependencies": { - "@babel/core": "^7.1.0" - } - }, "node_modules/babel-plugin-polyfill-corejs2": { "version": "0.3.3", "dev": true, @@ -5409,6 +5284,7 @@ "version": "1.0.1", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/plugin-syntax-async-generators": "^7.8.4", "@babel/plugin-syntax-bigint": "^7.8.3", @@ -5471,134 +5347,16 @@ "dev": true, "license": "MIT" }, - "node_modules/batch": { - "version": "0.6.1", - "dev": true, - "license": "MIT" - }, - "node_modules/bfj": { - "version": "7.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "bluebird": "^3.5.5", - "check-types": "^11.1.1", - "hoopy": "^0.1.4", - "tryer": "^1.0.1" - }, - "engines": { - "node": ">= 8.0.0" - } - }, - "node_modules/big.js": { - "version": "5.2.2", - "dev": true, - "license": "MIT", - "engines": { - "node": "*" - } - }, "node_modules/binary-extensions": { "version": "2.2.0", + "dev": true, "license": "MIT", + "optional": true, + "peer": true, "engines": { "node": ">=8" } }, - "node_modules/bluebird": { - "version": "3.7.2", - "dev": true, - "license": "MIT" - }, - "node_modules/body-parser": { - "version": "1.20.0", - "dev": true, - "license": "MIT", - "dependencies": { - "bytes": "3.1.2", - "content-type": "~1.0.4", - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "1.2.0", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "on-finished": "2.4.1", - "qs": "6.10.3", - "raw-body": "2.5.1", - "type-is": "~1.6.18", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8", - "npm": "1.2.8000 || >= 1.4.16" - } - }, - "node_modules/body-parser/node_modules/bytes": { - "version": "3.1.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/body-parser/node_modules/debug": { - "version": "2.6.9", - "dev": true, - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/body-parser/node_modules/iconv-lite": { - "version": "0.4.24", - "dev": true, - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/body-parser/node_modules/ms": { - "version": "2.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/bonjour-service": { - "version": "1.0.14", - "dev": true, - "license": "MIT", - "dependencies": { - "array-flatten": "^2.1.2", - "dns-equal": "^1.0.0", - "fast-deep-equal": "^3.1.3", - "multicast-dns": "^7.2.5" - } - }, - "node_modules/boolbase": { - "version": "1.0.0", - "dev": true, - "license": "ISC" - }, - "node_modules/bootstrap": { - "version": "4.6.2", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/twbs" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/bootstrap" - } - ], - "license": "MIT", - "peerDependencies": { - "jquery": "1.9.1 - 3", - "popper.js": "^1.16.1" - } - }, "node_modules/brace-expansion": { "version": "1.1.11", "dev": true, @@ -5610,6 +5368,7 @@ }, "node_modules/braces": { "version": "3.0.2", + "dev": true, "license": "MIT", "dependencies": { "fill-range": "^7.0.1" @@ -5618,13 +5377,10 @@ "node": ">=8" } }, - "node_modules/browser-process-hrtime": { - "version": "1.0.0", - "dev": true, - "license": "BSD-2-Clause" - }, "node_modules/browserslist": { - "version": "4.21.4", + "version": "4.23.0", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.0.tgz", + "integrity": "sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ==", "dev": true, "funding": [ { @@ -5634,14 +5390,17 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], - "license": "MIT", "dependencies": { - "caniuse-lite": "^1.0.30001400", - "electron-to-chromium": "^1.4.251", - "node-releases": "^2.0.6", - "update-browserslist-db": "^1.0.9" + "caniuse-lite": "^1.0.30001587", + "electron-to-chromium": "^1.4.668", + "node-releases": "^2.0.14", + "update-browserslist-db": "^1.0.13" }, "bin": { "browserslist": "cli.js" @@ -5665,6 +5424,7 @@ "version": "2.1.1", "dev": true, "license": "Apache-2.0", + "peer": true, "dependencies": { "node-int64": "^0.4.0" } @@ -5672,12 +5432,14 @@ "node_modules/buffer-from": { "version": "1.1.2", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/builtin-modules": { "version": "3.3.0", + "resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-3.3.0.tgz", + "integrity": "sha512-zhaCDicdLuWN5UbN5IMnFqNMhNfo919sH85y2/ea+5Yg9TsTkeZxpL+JLbp6cgYFS4sRLp3YV4S6yDuqVWHYOw==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" }, @@ -5685,16 +5447,9 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/bytes": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, "node_modules/call-bind": { "version": "1.0.2", + "dev": true, "license": "MIT", "dependencies": { "function-bind": "^1.1.1", @@ -5712,19 +5467,11 @@ "node": ">=6" } }, - "node_modules/camel-case": { - "version": "4.1.2", - "dev": true, - "license": "MIT", - "dependencies": { - "pascal-case": "^3.1.2", - "tslib": "^2.0.3" - } - }, "node_modules/camelcase": { "version": "5.3.1", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6" } @@ -5737,19 +5484,10 @@ "node": ">= 6" } }, - "node_modules/caniuse-api": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "browserslist": "^4.0.0", - "caniuse-lite": "^1.0.0", - "lodash.memoize": "^4.1.2", - "lodash.uniq": "^4.5.0" - } - }, "node_modules/caniuse-lite": { - "version": "1.0.30001414", + "version": "1.0.30001588", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001588.tgz", + "integrity": "sha512-+hVY9jE44uKLkH0SrUTqxjxqNTOWHsbnQDIKjwkZ3lNTzUUVdBLBGXtj/q5Mp5u98r3droaZAewQuEDzjQdZlQ==", "dev": true, "funding": [ { @@ -5759,17 +5497,12 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } - ], - "license": "CC-BY-4.0" - }, - "node_modules/case-sensitive-paths-webpack-plugin": { - "version": "2.4.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4" - } + ] }, "node_modules/chalk": { "version": "4.1.2", @@ -5790,71 +5523,14 @@ "version": "1.0.2", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" } }, - "node_modules/check-types": { - "version": "11.1.2", - "dev": true, - "license": "MIT" - }, - "node_modules/cheerio": { - "version": "1.0.0-rc.12", - "dev": true, - "license": "MIT", - "dependencies": { - "cheerio-select": "^2.1.0", - "dom-serializer": "^2.0.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1", - "htmlparser2": "^8.0.1", - "parse5": "^7.0.0", - "parse5-htmlparser2-tree-adapter": "^7.0.0" - }, - "engines": { - "node": ">= 6" - }, - "funding": { - "url": "https://github.com/cheeriojs/cheerio?sponsor=1" - } - }, - "node_modules/cheerio-select": { - "version": "2.1.0", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0", - "css-select": "^5.1.0", - "css-what": "^6.1.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/cheerio/node_modules/htmlparser2": { - "version": "8.0.1", - "dev": true, - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "MIT", - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "domutils": "^3.0.1", - "entities": "^4.3.0" - } - }, "node_modules/chokidar": { "version": "3.5.3", + "dev": true, "funding": [ { "type": "individual", @@ -5862,6 +5538,8 @@ } ], "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", @@ -5880,7 +5558,10 @@ }, "node_modules/chokidar/node_modules/glob-parent": { "version": "5.1.2", + "dev": true, "license": "ISC", + "optional": true, + "peer": true, "dependencies": { "is-glob": "^4.0.1" }, @@ -5888,14 +5569,6 @@ "node": ">= 6" } }, - "node_modules/chrome-trace-event": { - "version": "1.0.3", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6.0" - } - }, "node_modules/ci-info": { "version": "3.4.0", "dev": true, @@ -5904,22 +5577,8 @@ "node_modules/cjs-module-lexer": { "version": "1.2.2", "dev": true, - "license": "MIT" - }, - "node_modules/classnames": { - "version": "2.3.2", - "license": "MIT" - }, - "node_modules/clean-css": { - "version": "5.3.1", - "dev": true, "license": "MIT", - "dependencies": { - "source-map": "~0.6.0" - }, - "engines": { - "node": ">= 10.0" - } + "peer": true }, "node_modules/cliui": { "version": "8.0.1", @@ -5935,96 +5594,43 @@ "node": ">=12" } }, + "node_modules/clsx": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.0.0.tgz", + "integrity": "sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q==", + "engines": { + "node": ">=6" + } + }, "node_modules/co": { "version": "4.6.0", "dev": true, "license": "MIT", + "peer": true, "engines": { "iojs": ">= 1.0.0", "node": ">= 0.12.0" } }, - "node_modules/coa": { - "version": "2.0.2", - "dev": true, - "license": "MIT", + "node_modules/codemirror": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-6.0.1.tgz", + "integrity": "sha512-J8j+nZ+CdWmIeFIGXEFbFPtpiYacFMDR8GlHK3IyHQJMCaVRfGx9NT+Hxivv1ckLWPvNdZqndbr/7lVhrf/Svg==", "dependencies": { - "@types/q": "^1.5.1", - "chalk": "^2.4.1", - "q": "^1.1.2" - }, - "engines": { - "node": ">= 4.0" - } - }, - "node_modules/coa/node_modules/ansi-styles": { - "version": "3.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/coa/node_modules/chalk": { - "version": "2.4.2", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/coa/node_modules/color-convert": { - "version": "1.9.3", - "dev": true, - "license": "MIT", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/coa/node_modules/color-name": { - "version": "1.1.3", - "dev": true, - "license": "MIT" - }, - "node_modules/coa/node_modules/escape-string-regexp": { - "version": "1.0.5", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/coa/node_modules/has-flag": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/coa/node_modules/supports-color": { - "version": "5.5.0", - "dev": true, - "license": "MIT", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/commands": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/lint": "^6.0.0", + "@codemirror/search": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0" } }, "node_modules/collect-v8-coverage": { "version": "1.0.1", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/color-convert": { "version": "2.0.1", @@ -6042,98 +5648,6 @@ "dev": true, "license": "MIT" }, - "node_modules/colord": { - "version": "2.9.3", - "dev": true, - "license": "MIT" - }, - "node_modules/colorette": { - "version": "2.0.19", - "dev": true, - "license": "MIT" - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "dev": true, - "license": "MIT", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/commander": { - "version": "8.3.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 12" - } - }, - "node_modules/common-path-prefix": { - "version": "3.0.0", - "dev": true, - "license": "ISC" - }, - "node_modules/common-tags": { - "version": "1.8.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/commondir": { - "version": "1.0.1", - "dev": true, - "license": "MIT" - }, - "node_modules/compressible": { - "version": "2.0.18", - "dev": true, - "license": "MIT", - "dependencies": { - "mime-db": ">= 1.43.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/compression": { - "version": "1.7.4", - "dev": true, - "license": "MIT", - "dependencies": { - "accepts": "~1.3.5", - "bytes": "3.0.0", - "compressible": "~2.0.16", - "debug": "2.6.9", - "on-headers": "~1.0.2", - "safe-buffer": "5.1.2", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/compression/node_modules/debug": { - "version": "2.6.9", - "dev": true, - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/compression/node_modules/ms": { - "version": "2.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/compute-scroll-into-view": { - "version": "2.0.4", - "license": "MIT" - }, "node_modules/concat-map": { "version": "0.0.1", "dev": true, @@ -6144,90 +5658,15 @@ "dev": true, "license": "MIT" }, - "node_modules/connect-history-api-fallback": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/content-disposition": { - "version": "0.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "safe-buffer": "5.2.1" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/content-disposition/node_modules/safe-buffer": { - "version": "5.2.1", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/content-type": { - "version": "1.0.4", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, "node_modules/convert-source-map": { "version": "1.8.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "safe-buffer": "~5.1.1" } }, - "node_modules/cookie": { - "version": "0.5.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookie-signature": { - "version": "1.0.6", - "dev": true, - "license": "MIT" - }, - "node_modules/copy-to-clipboard": { - "version": "3.3.2", - "license": "MIT", - "dependencies": { - "toggle-selection": "^1.0.6" - } - }, - "node_modules/core-js": { - "version": "3.25.4", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, "node_modules/core-js-compat": { "version": "3.25.4", "dev": true, @@ -6250,11 +5689,6 @@ "url": "https://opencollective.com/core-js" } }, - "node_modules/core-util-is": { - "version": "1.0.3", - "dev": true, - "license": "MIT" - }, "node_modules/cosmiconfig": { "version": "7.0.1", "dev": true, @@ -6295,254 +5729,6 @@ "node": ">= 8" } }, - "node_modules/crypto-random-string": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/css-blank-pseudo": { - "version": "3.0.3", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-selector-parser": "^6.0.9" - }, - "bin": { - "css-blank-pseudo": "dist/cli.cjs" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/css-declaration-sorter": { - "version": "6.3.1", - "dev": true, - "license": "ISC", - "engines": { - "node": "^10 || ^12 || >=14" - }, - "peerDependencies": { - "postcss": "^8.0.9" - } - }, - "node_modules/css-has-pseudo": { - "version": "3.0.4", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-selector-parser": "^6.0.9" - }, - "bin": { - "css-has-pseudo": "dist/cli.cjs" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/css-loader": { - "version": "6.7.1", - "dev": true, - "license": "MIT", - "dependencies": { - "icss-utils": "^5.1.0", - "postcss": "^8.4.7", - "postcss-modules-extract-imports": "^3.0.0", - "postcss-modules-local-by-default": "^4.0.0", - "postcss-modules-scope": "^3.0.0", - "postcss-modules-values": "^4.0.0", - "postcss-value-parser": "^4.2.0", - "semver": "^7.3.5" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - } - }, - "node_modules/css-loader/node_modules/semver": { - "version": "7.5.4", - "dev": true, - "license": "ISC", - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/css-minimizer-webpack-plugin": { - "version": "3.4.1", - "dev": true, - "license": "MIT", - "dependencies": { - "cssnano": "^5.0.6", - "jest-worker": "^27.0.2", - "postcss": "^8.3.5", - "schema-utils": "^4.0.0", - "serialize-javascript": "^6.0.0", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - }, - "peerDependenciesMeta": { - "@parcel/css": { - "optional": true - }, - "clean-css": { - "optional": true - }, - "csso": { - "optional": true - }, - "esbuild": { - "optional": true - } - } - }, - "node_modules/css-minimizer-webpack-plugin/node_modules/ajv": { - "version": "8.11.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/css-minimizer-webpack-plugin/node_modules/ajv-keywords": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/css-minimizer-webpack-plugin/node_modules/json-schema-traverse": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/css-minimizer-webpack-plugin/node_modules/schema-utils": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/css-prefers-color-scheme": { - "version": "6.0.3", - "dev": true, - "license": "CC0-1.0", - "bin": { - "css-prefers-color-scheme": "dist/cli.cjs" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/css-select": { - "version": "5.1.0", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^6.1.0", - "domhandler": "^5.0.2", - "domutils": "^3.0.1", - "nth-check": "^2.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/css-select-base-adapter": { - "version": "0.1.1", - "dev": true, - "license": "MIT" - }, - "node_modules/css-tree": { - "version": "1.0.0-alpha.37", - "dev": true, - "license": "MIT", - "dependencies": { - "mdn-data": "2.0.4", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/css-what": { - "version": "6.1.0", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">= 6" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/css.escape": { - "version": "1.5.1", - "license": "MIT" - }, - "node_modules/cssdb": { - "version": "7.0.1", - "dev": true, - "license": "CC0-1.0", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } - }, "node_modules/cssesc": { "version": "3.0.0", "dev": true, @@ -6559,131 +5745,9 @@ "dev": true, "license": "MIT" }, - "node_modules/cssnano": { - "version": "5.1.13", - "dev": true, - "license": "MIT", - "dependencies": { - "cssnano-preset-default": "^5.2.12", - "lilconfig": "^2.0.3", - "yaml": "^1.10.2" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/cssnano" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/cssnano-preset-default": { - "version": "5.2.12", - "dev": true, - "license": "MIT", - "dependencies": { - "css-declaration-sorter": "^6.3.0", - "cssnano-utils": "^3.1.0", - "postcss-calc": "^8.2.3", - "postcss-colormin": "^5.3.0", - "postcss-convert-values": "^5.1.2", - "postcss-discard-comments": "^5.1.2", - "postcss-discard-duplicates": "^5.1.0", - "postcss-discard-empty": "^5.1.1", - "postcss-discard-overridden": "^5.1.0", - "postcss-merge-longhand": "^5.1.6", - "postcss-merge-rules": "^5.1.2", - "postcss-minify-font-values": "^5.1.0", - "postcss-minify-gradients": "^5.1.1", - "postcss-minify-params": "^5.1.3", - "postcss-minify-selectors": "^5.2.1", - "postcss-normalize-charset": "^5.1.0", - "postcss-normalize-display-values": "^5.1.0", - "postcss-normalize-positions": "^5.1.1", - "postcss-normalize-repeat-style": "^5.1.1", - "postcss-normalize-string": "^5.1.0", - "postcss-normalize-timing-functions": "^5.1.0", - "postcss-normalize-unicode": "^5.1.0", - "postcss-normalize-url": "^5.1.0", - "postcss-normalize-whitespace": "^5.1.1", - "postcss-ordered-values": "^5.1.3", - "postcss-reduce-initial": "^5.1.0", - "postcss-reduce-transforms": "^5.1.0", - "postcss-svgo": "^5.1.0", - "postcss-unique-selectors": "^5.1.1" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/cssnano-utils": { - "version": "3.1.0", - "dev": true, - "license": "MIT", - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/csso": { - "version": "4.2.0", - "dev": true, - "license": "MIT", - "dependencies": { - "css-tree": "^1.1.2" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/csso/node_modules/css-tree": { - "version": "1.1.3", - "dev": true, - "license": "MIT", - "dependencies": { - "mdn-data": "2.0.14", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/csso/node_modules/mdn-data": { - "version": "2.0.14", - "dev": true, - "license": "CC0-1.0" - }, - "node_modules/cssom": { - "version": "0.4.4", - "dev": true, - "license": "MIT" - }, - "node_modules/cssstyle": { - "version": "2.3.0", - "dev": true, - "license": "MIT", - "dependencies": { - "cssom": "~0.3.6" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cssstyle/node_modules/cssom": { - "version": "0.3.8", - "dev": true, - "license": "MIT" - }, "node_modules/csstype": { "version": "3.1.1", - "dev": true, + "devOptional": true, "license": "MIT" }, "node_modules/damerau-levenshtein": { @@ -6691,18 +5755,10 @@ "dev": true, "license": "BSD-2-Clause" }, - "node_modules/data-urls": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "abab": "^2.0.3", - "whatwg-mimetype": "^2.3.0", - "whatwg-url": "^8.0.0" - }, - "engines": { - "node": ">=10" - } + "node_modules/dayjs": { + "version": "1.11.10", + "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.10.tgz", + "integrity": "sha512-vjAczensTgRcqDERK0SR2XMwsF/tSvnvlv6VcF2GIhg6Sx4yOIt/irsr1RDJsKiIyBzJDpCoXiWWq28MqH2cnQ==" }, "node_modules/debug": { "version": "4.3.4", @@ -6720,30 +5776,11 @@ } } }, - "node_modules/decimal.js": { - "version": "10.4.2", - "dev": true, - "license": "MIT" - }, "node_modules/dedent": { "version": "0.7.0", "dev": true, - "license": "MIT" - }, - "node_modules/deep-equal": { - "version": "1.1.1", "license": "MIT", - "dependencies": { - "is-arguments": "^1.0.4", - "is-date-object": "^1.0.1", - "is-regex": "^1.0.4", - "object-is": "^1.0.1", - "object-keys": "^1.1.1", - "regexp.prototype.flags": "^1.2.0" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } + "peer": true }, "node_modules/deep-is": { "version": "0.1.4", @@ -6752,32 +5789,15 @@ }, "node_modules/deepmerge": { "version": "4.2.2", + "dev": true, "license": "MIT", "engines": { "node": ">=0.10.0" } }, - "node_modules/default-gateway": { - "version": "6.0.3", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "execa": "^5.0.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/define-lazy-prop": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/define-properties": { "version": "1.1.4", + "dev": true, "license": "MIT", "dependencies": { "has-property-descriptors": "^1.0.0", @@ -6790,114 +5810,19 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/defined": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/depd": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/destroy": { - "version": "1.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.8", - "npm": "1.2.8000 || >= 1.4.16" - } - }, "node_modules/detect-newline": { "version": "3.1.0", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } }, - "node_modules/detect-node": { - "version": "2.1.0", - "dev": true, - "license": "MIT" - }, - "node_modules/detect-port-alt": { - "version": "1.1.6", - "dev": true, - "license": "MIT", - "dependencies": { - "address": "^1.0.1", - "debug": "^2.6.0" - }, - "bin": { - "detect": "bin/detect-port", - "detect-port": "bin/detect-port" - }, - "engines": { - "node": ">= 4.2.1" - } - }, - "node_modules/detect-port-alt/node_modules/debug": { - "version": "2.6.9", - "dev": true, - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/detect-port-alt/node_modules/ms": { - "version": "2.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/detective": { - "version": "5.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "acorn-node": "^1.8.2", - "defined": "^1.0.0", - "minimist": "^1.2.6" - }, - "bin": { - "detective": "bin/detective.js" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/didyoumean": { - "version": "1.2.2", - "dev": true, - "license": "Apache-2.0" - }, - "node_modules/diff": { - "version": "5.1.0", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.3.1" - } - }, - "node_modules/diff-sequences": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } + "node_modules/detect-node-es": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", + "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==" }, "node_modules/dir-glob": { "version": "3.0.1", @@ -6910,32 +5835,6 @@ "node": ">=8" } }, - "node_modules/discontinuous-range": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/dlv": { - "version": "1.1.3", - "dev": true, - "license": "MIT" - }, - "node_modules/dns-equal": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/dns-packet": { - "version": "5.4.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@leichtgewicht/ip-codec": "^2.0.1" - }, - "engines": { - "node": ">=6" - } - }, "node_modules/doctrine": { "version": "3.0.0", "dev": true, @@ -6947,151 +5846,11 @@ "node": ">=6.0.0" } }, - "node_modules/dom-converter": { - "version": "0.2.0", - "dev": true, - "license": "MIT", - "dependencies": { - "utila": "~0.4" - } - }, - "node_modules/dom-helpers": { - "version": "3.4.0", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.1.2" - } - }, - "node_modules/dom-serializer": { - "version": "2.0.0", - "license": "MIT", - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "entities": "^4.2.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/domelementtype": { - "version": "2.3.0", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "BSD-2-Clause" - }, - "node_modules/domexception": { - "version": "2.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "webidl-conversions": "^5.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/domexception/node_modules/webidl-conversions": { - "version": "5.0.0", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=8" - } - }, - "node_modules/domhandler": { - "version": "5.0.3", - "license": "BSD-2-Clause", - "dependencies": { - "domelementtype": "^2.3.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/domutils": { - "version": "3.0.1", - "license": "BSD-2-Clause", - "dependencies": { - "dom-serializer": "^2.0.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.1" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, - "node_modules/dot-case": { - "version": "3.0.4", - "dev": true, - "license": "MIT", - "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, - "node_modules/dotenv": { - "version": "10.0.0", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=10" - } - }, - "node_modules/dotenv-expand": { - "version": "5.1.0", - "dev": true, - "license": "BSD-2-Clause" - }, - "node_modules/downshift": { - "version": "7.6.2", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.14.8", - "compute-scroll-into-view": "^2.0.4", - "prop-types": "^15.7.2", - "react-is": "^17.0.2", - "tslib": "^2.3.0" - }, - "peerDependencies": { - "react": ">=16.12.0" - } - }, - "node_modules/duplexer": { - "version": "0.1.2", - "dev": true, - "license": "MIT" - }, - "node_modules/ee-first": { - "version": "1.1.1", - "dev": true, - "license": "MIT" - }, - "node_modules/ejs": { - "version": "3.1.8", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "jake": "^10.8.5" - }, - "bin": { - "ejs": "bin/cli.js" - }, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/electron-to-chromium": { - "version": "1.4.270", - "dev": true, - "license": "ISC" + "version": "1.4.677", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.677.tgz", + "integrity": "sha512-erDa3CaDzwJOpyvfKhOiJjBVNnMM0qxHq47RheVVwsSQrgBA9ZSGV9kdaOfZDPXcHzhG7lBxhj6A7KvfLJBd6Q==", + "dev": true }, "node_modules/emittery": { "version": "0.13.1", @@ -7110,109 +5869,6 @@ "dev": true, "license": "MIT" }, - "node_modules/emojis-list": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/encodeurl": { - "version": "1.0.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/enhanced-resolve": { - "version": "5.15.0", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.4", - "tapable": "^2.2.0" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/entities": { - "version": "4.4.0", - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.12" - }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/enzyme": { - "version": "3.11.0", - "dev": true, - "license": "MIT", - "dependencies": { - "array.prototype.flat": "^1.2.3", - "cheerio": "^1.0.0-rc.3", - "enzyme-shallow-equal": "^1.0.1", - "function.prototype.name": "^1.1.2", - "has": "^1.0.3", - "html-element-map": "^1.2.0", - "is-boolean-object": "^1.0.1", - "is-callable": "^1.1.5", - "is-number-object": "^1.0.4", - "is-regex": "^1.0.5", - "is-string": "^1.0.5", - "is-subset": "^0.1.1", - "lodash.escape": "^4.0.1", - "lodash.isequal": "^4.5.0", - "object-inspect": "^1.7.0", - "object-is": "^1.0.2", - "object.assign": "^4.1.0", - "object.entries": "^1.1.1", - "object.values": "^1.1.1", - "raf": "^3.4.1", - "rst-selector-parser": "^2.2.3", - "string.prototype.trim": "^1.2.1" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/enzyme-shallow-equal": { - "version": "1.0.4", - "dev": true, - "license": "MIT", - "dependencies": { - "has": "^1.0.3", - "object-is": "^1.1.2" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/enzyme-to-json": { - "version": "3.6.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/cheerio": "^0.22.22", - "lodash": "^4.17.21", - "react-is": "^16.12.0" - }, - "engines": { - "node": ">=6.0.0" - }, - "peerDependencies": { - "enzyme": "^3.4.0" - } - }, - "node_modules/enzyme-to-json/node_modules/react-is": { - "version": "16.13.1", - "dev": true, - "license": "MIT" - }, "node_modules/error-ex": { "version": "1.3.2", "dev": true, @@ -7221,14 +5877,6 @@ "is-arrayish": "^0.2.1" } }, - "node_modules/error-stack-parser": { - "version": "2.1.4", - "dev": true, - "license": "MIT", - "dependencies": { - "stackframe": "^1.3.4" - } - }, "node_modules/es-abstract": { "version": "1.20.3", "dev": true, @@ -7266,16 +5914,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/es-array-method-boxes-properly": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/es-module-lexer": { - "version": "1.3.1", - "dev": true, - "license": "MIT" - }, "node_modules/es-shim-unscopables": { "version": "1.0.0", "dev": true, @@ -7300,6 +5938,44 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/esbuild": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.12.tgz", + "integrity": "sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg==", + "dev": true, + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.19.12", + "@esbuild/android-arm": "0.19.12", + "@esbuild/android-arm64": "0.19.12", + "@esbuild/android-x64": "0.19.12", + "@esbuild/darwin-arm64": "0.19.12", + "@esbuild/darwin-x64": "0.19.12", + "@esbuild/freebsd-arm64": "0.19.12", + "@esbuild/freebsd-x64": "0.19.12", + "@esbuild/linux-arm": "0.19.12", + "@esbuild/linux-arm64": "0.19.12", + "@esbuild/linux-ia32": "0.19.12", + "@esbuild/linux-loong64": "0.19.12", + "@esbuild/linux-mips64el": "0.19.12", + "@esbuild/linux-ppc64": "0.19.12", + "@esbuild/linux-riscv64": "0.19.12", + "@esbuild/linux-s390x": "0.19.12", + "@esbuild/linux-x64": "0.19.12", + "@esbuild/netbsd-x64": "0.19.12", + "@esbuild/openbsd-x64": "0.19.12", + "@esbuild/sunos-x64": "0.19.12", + "@esbuild/win32-arm64": "0.19.12", + "@esbuild/win32-ia32": "0.19.12", + "@esbuild/win32-x64": "0.19.12" + } + }, "node_modules/escalade": { "version": "3.1.1", "dev": true, @@ -7308,13 +5984,9 @@ "node": ">=6" } }, - "node_modules/escape-html": { - "version": "1.0.3", - "dev": true, - "license": "MIT" - }, "node_modules/escape-string-regexp": { "version": "4.0.0", + "dev": true, "license": "MIT", "engines": { "node": ">=10" @@ -7323,116 +5995,49 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/escodegen": { - "version": "2.0.0", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "esprima": "^4.0.1", - "estraverse": "^5.2.0", - "esutils": "^2.0.2", - "optionator": "^0.8.1" - }, - "bin": { - "escodegen": "bin/escodegen.js", - "esgenerate": "bin/esgenerate.js" - }, - "engines": { - "node": ">=6.0" - }, - "optionalDependencies": { - "source-map": "~0.6.1" - } - }, - "node_modules/escodegen/node_modules/levn": { - "version": "0.3.0", - "dev": true, - "license": "MIT", - "dependencies": { - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/escodegen/node_modules/optionator": { - "version": "0.8.3", - "dev": true, - "license": "MIT", - "dependencies": { - "deep-is": "~0.1.3", - "fast-levenshtein": "~2.0.6", - "levn": "~0.3.0", - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2", - "word-wrap": "~1.2.3" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/escodegen/node_modules/prelude-ls": { - "version": "1.1.2", - "dev": true, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/escodegen/node_modules/type-check": { - "version": "0.3.2", - "dev": true, - "license": "MIT", - "dependencies": { - "prelude-ls": "~1.1.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, "node_modules/eslint": { - "version": "8.24.0", + "version": "8.56.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.56.0.tgz", + "integrity": "sha512-Go19xM6T9puCOWntie1/P997aXxFsOi37JIHRWI514Hc6ZnaHGKY9xFhrU65RT6CcBEzZoGG1e6Nq+DT04ZtZQ==", "dev": true, - "license": "MIT", "dependencies": { - "@eslint/eslintrc": "^1.3.2", - "@humanwhocodes/config-array": "^0.10.5", - "@humanwhocodes/gitignore-to-minimatch": "^1.0.2", + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.56.0", + "@humanwhocodes/config-array": "^0.11.13", "@humanwhocodes/module-importer": "^1.0.1", - "ajv": "^6.10.0", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", "debug": "^4.3.2", "doctrine": "^3.0.0", "escape-string-regexp": "^4.0.0", - "eslint-scope": "^7.1.1", - "eslint-utils": "^3.0.0", - "eslint-visitor-keys": "^3.3.0", - "espree": "^9.4.0", - "esquery": "^1.4.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", "file-entry-cache": "^6.0.1", "find-up": "^5.0.0", - "glob-parent": "^6.0.1", - "globals": "^13.15.0", - "globby": "^11.1.0", - "grapheme-splitter": "^1.0.4", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", "ignore": "^5.2.0", - "import-fresh": "^3.0.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", - "js-sdsl": "^4.1.4", + "is-path-inside": "^3.0.3", "js-yaml": "^4.1.0", "json-stable-stringify-without-jsonify": "^1.0.1", "levn": "^0.4.1", "lodash.merge": "^4.6.2", "minimatch": "^3.1.2", "natural-compare": "^1.4.0", - "optionator": "^0.9.1", - "regexpp": "^3.2.0", + "optionator": "^0.9.3", "strip-ansi": "^6.0.1", - "strip-json-comments": "^3.1.0", "text-table": "^0.2.0" }, "bin": { @@ -7698,6 +6303,15 @@ "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" } }, + "node_modules/eslint-plugin-react-refresh": { + "version": "0.4.5", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.5.tgz", + "integrity": "sha512-D53FYKJa+fDmZMtriODxvhwrO+IOqrxoEo21gMA0sjHdU6dPVH4OhyFip9ypl8HOF5RV5KdTo+rBQLvnY2cO8w==", + "dev": true, + "peerDependencies": { + "eslint": ">=7" + } + }, "node_modules/eslint-plugin-react/node_modules/doctrine": { "version": "2.1.0", "dev": true, @@ -7741,15 +6355,19 @@ } }, "node_modules/eslint-scope": { - "version": "7.1.1", + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" } }, "node_modules/eslint-utils": { @@ -7778,120 +6396,26 @@ } }, "node_modules/eslint-visitor-keys": { - "version": "3.3.0", + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", "dev": true, - "license": "Apache-2.0", "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - } - }, - "node_modules/eslint-webpack-plugin": { - "version": "3.2.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/eslint": "^7.29.0 || ^8.4.1", - "jest-worker": "^28.0.2", - "micromatch": "^4.0.5", - "normalize-path": "^3.0.0", - "schema-utils": "^4.0.0" - }, - "engines": { - "node": ">= 12.13.0" }, "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0", - "webpack": "^5.0.0" - } - }, - "node_modules/eslint-webpack-plugin/node_modules/ajv": { - "version": "8.11.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/eslint-webpack-plugin/node_modules/ajv-keywords": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/eslint-webpack-plugin/node_modules/jest-worker": { - "version": "28.1.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/node": "*", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" - } - }, - "node_modules/eslint-webpack-plugin/node_modules/json-schema-traverse": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/eslint-webpack-plugin/node_modules/schema-utils": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/eslint-webpack-plugin/node_modules/supports-color": { - "version": "8.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" + "url": "https://opencollective.com/eslint" } }, "node_modules/espree": { - "version": "9.4.0", + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { - "acorn": "^8.8.0", + "acorn": "^8.9.0", "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.3.0" + "eslint-visitor-keys": "^3.4.1" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -7904,6 +6428,7 @@ "version": "4.0.1", "dev": true, "license": "BSD-2-Clause", + "peer": true, "bin": { "esparse": "bin/esparse.js", "esvalidate": "bin/esvalidate.js" @@ -7913,9 +6438,10 @@ } }, "node_modules/esquery": { - "version": "1.4.0", + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", "dev": true, - "license": "BSD-3-Clause", "dependencies": { "estraverse": "^5.1.0" }, @@ -7943,9 +6469,10 @@ } }, "node_modules/estree-walker": { - "version": "1.0.1", - "dev": true, - "license": "MIT" + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "dev": true }, "node_modules/esutils": { "version": "2.0.3", @@ -7955,30 +6482,11 @@ "node": ">=0.10.0" } }, - "node_modules/etag": { - "version": "1.8.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/eventemitter3": { - "version": "4.0.7", - "license": "MIT" - }, - "node_modules/events": { - "version": "3.3.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.8.x" - } - }, "node_modules/execa": { "version": "5.1.1", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^6.0.0", @@ -8000,6 +6508,7 @@ "node_modules/exit": { "version": "0.1.2", "dev": true, + "peer": true, "engines": { "node": ">= 0.8.0" } @@ -8108,89 +6617,6 @@ "dev": true, "license": "MIT" }, - "node_modules/express": { - "version": "4.18.1", - "dev": true, - "license": "MIT", - "dependencies": { - "accepts": "~1.3.8", - "array-flatten": "1.1.1", - "body-parser": "1.20.0", - "content-disposition": "0.5.4", - "content-type": "~1.0.4", - "cookie": "0.5.0", - "cookie-signature": "1.0.6", - "debug": "2.6.9", - "depd": "2.0.0", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "1.2.0", - "fresh": "0.5.2", - "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", - "methods": "~1.1.2", - "on-finished": "2.4.1", - "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", - "proxy-addr": "~2.0.7", - "qs": "6.10.3", - "range-parser": "~1.2.1", - "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "type-is": "~1.6.18", - "utils-merge": "1.0.1", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/express/node_modules/array-flatten": { - "version": "1.1.1", - "dev": true, - "license": "MIT" - }, - "node_modules/express/node_modules/debug": { - "version": "2.6.9", - "dev": true, - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/express/node_modules/ms": { - "version": "2.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/express/node_modules/path-to-regexp": { - "version": "0.1.7", - "dev": true, - "license": "MIT" - }, - "node_modules/express/node_modules/safe-buffer": { - "version": "5.2.1", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, "node_modules/fast-deep-equal": { "version": "3.1.3", "dev": true, @@ -8245,21 +6671,11 @@ "reusify": "^1.0.4" } }, - "node_modules/faye-websocket": { - "version": "0.11.4", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "websocket-driver": ">=0.5.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, "node_modules/fb-watchman": { "version": "2.0.2", "dev": true, "license": "Apache-2.0", + "peer": true, "dependencies": { "bser": "2.1.1" } @@ -8275,62 +6691,9 @@ "node": "^10.12.0 || >=12.0.0" } }, - "node_modules/file-loader": { - "version": "6.2.0", - "dev": true, - "license": "MIT", - "dependencies": { - "loader-utils": "^2.0.0", - "schema-utils": "^3.0.0" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.0.0 || ^5.0.0" - } - }, - "node_modules/filelist": { - "version": "1.0.4", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "minimatch": "^5.0.1" - } - }, - "node_modules/filelist/node_modules/brace-expansion": { - "version": "2.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/filelist/node_modules/minimatch": { - "version": "5.1.0", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/filesize": { - "version": "8.0.7", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">= 0.4.0" - } - }, "node_modules/fill-range": { "version": "7.0.1", + "dev": true, "license": "MIT", "dependencies": { "to-regex-range": "^5.0.1" @@ -8339,52 +6702,6 @@ "node": ">=8" } }, - "node_modules/finalhandler": { - "version": "1.2.0", - "dev": true, - "license": "MIT", - "dependencies": { - "debug": "2.6.9", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "on-finished": "2.4.1", - "parseurl": "~1.3.3", - "statuses": "2.0.1", - "unpipe": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/finalhandler/node_modules/debug": { - "version": "2.6.9", - "dev": true, - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/finalhandler/node_modules/ms": { - "version": "2.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/find-cache-dir": { - "version": "3.3.2", - "dev": true, - "license": "MIT", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^3.0.2", - "pkg-dir": "^4.1.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/avajs/find-cache-dir?sponsor=1" - } - }, "node_modules/find-up": { "version": "5.0.0", "dev": true, @@ -8417,189 +6734,6 @@ "dev": true, "license": "ISC" }, - "node_modules/follow-redirects": { - "version": "1.15.2", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "license": "MIT", - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, - "node_modules/fork-ts-checker-webpack-plugin": { - "version": "6.5.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.8.3", - "@types/json-schema": "^7.0.5", - "chalk": "^4.1.0", - "chokidar": "^3.4.2", - "cosmiconfig": "^6.0.0", - "deepmerge": "^4.2.2", - "fs-extra": "^9.0.0", - "glob": "^7.1.6", - "memfs": "^3.1.2", - "minimatch": "^3.0.4", - "schema-utils": "2.7.0", - "semver": "^7.3.2", - "tapable": "^1.0.0" - }, - "engines": { - "node": ">=10", - "yarn": ">=1.0.0" - }, - "peerDependencies": { - "eslint": ">= 6", - "typescript": ">= 2.7", - "vue-template-compiler": "*", - "webpack": ">= 4" - }, - "peerDependenciesMeta": { - "eslint": { - "optional": true - }, - "vue-template-compiler": { - "optional": true - } - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": { - "version": "6.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.1.0", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.7.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": { - "version": "9.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": { - "version": "2.7.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.4", - "ajv": "^6.12.2", - "ajv-keywords": "^3.4.1" - }, - "engines": { - "node": ">= 8.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/semver": { - "version": "7.5.4", - "dev": true, - "license": "ISC", - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/tapable": { - "version": "1.1.3", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/form-data": { - "version": "3.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/forwarded": { - "version": "0.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fraction.js": { - "version": "4.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": "*" - }, - "funding": { - "type": "patreon", - "url": "https://www.patreon.com/infusion" - } - }, - "node_modules/fresh": { - "version": "0.5.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fs-extra": { - "version": "10.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/fs-monkey": { - "version": "1.0.3", - "dev": true, - "license": "Unlicense" - }, "node_modules/fs.realpath": { "version": "1.0.0", "dev": true, @@ -8607,6 +6741,7 @@ }, "node_modules/fsevents": { "version": "2.3.3", + "dev": true, "license": "MIT", "optional": true, "os": [ @@ -8618,6 +6753,7 @@ }, "node_modules/function-bind": { "version": "1.1.1", + "dev": true, "license": "MIT" }, "node_modules/function.prototype.name": { @@ -8639,6 +6775,7 @@ }, "node_modules/functions-have-names": { "version": "1.2.3", + "dev": true, "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" @@ -8656,12 +6793,14 @@ "version": "2.0.5", "dev": true, "license": "ISC", + "peer": true, "engines": { "node": "6.* || 8.* || >= 10.*" } }, "node_modules/get-intrinsic": { "version": "1.1.3", + "dev": true, "license": "MIT", "dependencies": { "function-bind": "^1.1.1", @@ -8672,15 +6811,19 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/get-own-enumerable-property-symbols": { - "version": "3.0.2", - "dev": true, - "license": "ISC" + "node_modules/get-nonce": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", + "integrity": "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==", + "engines": { + "node": ">=6" + } }, "node_modules/get-package-type": { "version": "0.1.0", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8.0.0" } @@ -8689,6 +6832,7 @@ "version": "6.0.1", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -8741,50 +6885,11 @@ "node": ">=10.13.0" } }, - "node_modules/glob-to-regexp": { - "version": "0.4.1", - "dev": true, - "license": "BSD-2-Clause" - }, - "node_modules/global-modules": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "global-prefix": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/global-prefix": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "ini": "^1.3.5", - "kind-of": "^6.0.2", - "which": "^1.3.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/global-prefix/node_modules/which": { - "version": "1.3.1", - "dev": true, - "license": "ISC", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "which": "bin/which" - } - }, "node_modules/globals": { - "version": "13.17.0", + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", "dev": true, - "license": "MIT", "dependencies": { "type-fest": "^0.20.2" }, @@ -8819,41 +6924,15 @@ "dev": true, "license": "ISC" }, - "node_modules/grapheme-splitter": { - "version": "1.0.4", - "dev": true, - "license": "MIT" - }, - "node_modules/gud": { - "version": "1.0.0", - "license": "MIT" - }, - "node_modules/gzip-size": { - "version": "6.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "duplexer": "^0.1.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/handle-thing": { - "version": "2.0.1", - "dev": true, - "license": "MIT" - }, - "node_modules/harmony-reflect": { - "version": "1.6.2", - "dev": true, - "license": "(Apache-2.0 OR MPL-1.1)" + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true }, "node_modules/has": { "version": "1.0.3", + "dev": true, "license": "MIT", "dependencies": { "function-bind": "^1.1.1" @@ -8880,6 +6959,7 @@ }, "node_modules/has-property-descriptors": { "version": "1.0.0", + "dev": true, "license": "MIT", "dependencies": { "get-intrinsic": "^1.1.1" @@ -8890,6 +6970,7 @@ }, "node_modules/has-symbols": { "version": "1.0.3", + "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -8900,6 +6981,7 @@ }, "node_modules/has-tostringtag": { "version": "1.0.0", + "dev": true, "license": "MIT", "dependencies": { "has-symbols": "^1.0.2" @@ -8911,374 +6993,44 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/he": { - "version": "1.2.0", - "dev": true, - "license": "MIT", - "bin": { - "he": "bin/he" - } - }, - "node_modules/history": { - "version": "4.10.1", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.1.2", - "loose-envify": "^1.2.0", - "resolve-pathname": "^3.0.0", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0", - "value-equal": "^1.0.1" - } - }, - "node_modules/hoist-non-react-statics": { - "version": "3.3.2", - "license": "BSD-3-Clause", - "dependencies": { - "react-is": "^16.7.0" - } - }, - "node_modules/hoist-non-react-statics/node_modules/react-is": { - "version": "16.13.1", - "license": "MIT" - }, - "node_modules/hoopy": { - "version": "0.1.4", - "dev": true, - "license": "MIT", + "node_modules/highlight.js": { + "version": "11.9.0", + "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-11.9.0.tgz", + "integrity": "sha512-fJ7cW7fQGCYAkgv4CPfwFHrfd/cLS4Hau96JuJ+ZTOWhjnhoeN1ub1tFmALm/+lW5z4WCAuAV9bm05AP0mS6Gw==", "engines": { - "node": ">= 6.0.0" + "node": ">=12.0.0" } }, - "node_modules/hpack.js": { - "version": "2.1.6", - "dev": true, - "license": "MIT", - "dependencies": { - "inherits": "^2.0.1", - "obuf": "^1.0.0", - "readable-stream": "^2.0.1", - "wbuf": "^1.1.0" - } - }, - "node_modules/hpack.js/node_modules/isarray": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/hpack.js/node_modules/readable-stream": { - "version": "2.3.7", - "dev": true, - "license": "MIT", - "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "node_modules/hpack.js/node_modules/string_decoder": { - "version": "1.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "safe-buffer": "~5.1.0" - } - }, - "node_modules/html-element-map": { - "version": "1.3.1", - "dev": true, - "license": "MIT", - "dependencies": { - "array.prototype.filter": "^1.0.0", - "call-bind": "^1.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/html-encoding-sniffer": { - "version": "2.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "whatwg-encoding": "^1.0.5" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/html-entities": { - "version": "2.3.3", - "dev": true, - "license": "MIT" - }, "node_modules/html-escaper": { "version": "2.0.2", "dev": true, - "license": "MIT" - }, - "node_modules/html-minifier-terser": { - "version": "6.1.0", - "dev": true, "license": "MIT", - "dependencies": { - "camel-case": "^4.1.2", - "clean-css": "^5.2.2", - "commander": "^8.3.0", - "he": "^1.2.0", - "param-case": "^3.0.4", - "relateurl": "^0.2.7", - "terser": "^5.10.0" - }, - "bin": { - "html-minifier-terser": "cli.js" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/html-webpack-plugin": { - "version": "5.5.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/html-minifier-terser": "^6.0.0", - "html-minifier-terser": "^6.0.2", - "lodash": "^4.17.21", - "pretty-error": "^4.0.0", - "tapable": "^2.0.0" - }, - "engines": { - "node": ">=10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/html-webpack-plugin" - }, - "peerDependencies": { - "webpack": "^5.20.0" - } - }, - "node_modules/htmlparser2": { - "version": "6.1.0", - "dev": true, - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "domutils": "^2.5.2", - "entities": "^2.0.0" - } - }, - "node_modules/htmlparser2/node_modules/dom-serializer": { - "version": "1.4.1", - "dev": true, - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.2.0", - "entities": "^2.0.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/htmlparser2/node_modules/domhandler": { - "version": "4.3.1", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "domelementtype": "^2.2.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/htmlparser2/node_modules/domutils": { - "version": "2.8.0", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.2.0" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, - "node_modules/htmlparser2/node_modules/entities": { - "version": "2.2.0", - "dev": true, - "license": "BSD-2-Clause", - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/http-deceiver": { - "version": "1.2.7", - "dev": true, - "license": "MIT" - }, - "node_modules/http-errors": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "depd": "2.0.0", - "inherits": "2.0.4", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "toidentifier": "1.0.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/http-parser-js": { - "version": "0.5.8", - "dev": true, - "license": "MIT" - }, - "node_modules/http-proxy": { - "version": "1.18.1", - "license": "MIT", - "dependencies": { - "eventemitter3": "^4.0.0", - "follow-redirects": "^1.0.0", - "requires-port": "^1.0.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/http-proxy-agent": { - "version": "4.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@tootallnate/once": "1", - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/http-proxy-middleware": { - "version": "2.0.6", - "license": "MIT", - "dependencies": { - "@types/http-proxy": "^1.17.8", - "http-proxy": "^1.18.1", - "is-glob": "^4.0.1", - "is-plain-obj": "^3.0.0", - "micromatch": "^4.0.2" - }, - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "@types/express": "^4.17.13" - }, - "peerDependenciesMeta": { - "@types/express": { - "optional": true - } - } - }, - "node_modules/https-proxy-agent": { - "version": "5.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } + "peer": true }, "node_modules/human-signals": { "version": "2.1.0", "dev": true, "license": "Apache-2.0", + "peer": true, "engines": { "node": ">=10.17.0" } }, - "node_modules/iconv-lite": { - "version": "0.6.3", - "dev": true, - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/icss-utils": { - "version": "5.1.0", - "dev": true, - "license": "ISC", - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/idb": { - "version": "7.1.0", - "dev": true, - "license": "ISC" - }, - "node_modules/identity-obj-proxy": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "harmony-reflect": "^1.4.6" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/ignore": { - "version": "5.2.0", + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", "dev": true, - "license": "MIT", "engines": { "node": ">= 4" } }, - "node_modules/immer": { - "version": "9.0.15", - "dev": true, - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/immer" - } - }, "node_modules/immutable": { "version": "4.1.0", - "license": "MIT" + "dev": true, + "license": "MIT", + "optional": true, + "peer": true }, "node_modules/import-fresh": { "version": "3.3.0", @@ -9299,6 +7051,7 @@ "version": "3.1.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "pkg-dir": "^4.2.0", "resolve-cwd": "^3.0.0" @@ -9335,11 +7088,6 @@ "dev": true, "license": "ISC" }, - "node_modules/ini": { - "version": "1.3.8", - "dev": true, - "license": "ISC" - }, "node_modules/internal-slot": { "version": "1.0.3", "dev": true, @@ -9353,26 +7101,12 @@ "node": ">= 0.4" } }, - "node_modules/ipaddr.js": { - "version": "2.0.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10" - } - }, - "node_modules/is-arguments": { - "version": "1.1.1", - "license": "MIT", + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", "dependencies": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" + "loose-envify": "^1.0.0" } }, "node_modules/is-arrayish": { @@ -9393,7 +7127,10 @@ }, "node_modules/is-binary-path": { "version": "2.1.0", + "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "binary-extensions": "^2.0.0" }, @@ -9416,6 +7153,21 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-builtin-module": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/is-builtin-module/-/is-builtin-module-3.2.1.tgz", + "integrity": "sha512-BSLE3HnV2syZ0FK0iMA/yUGplUeMmNz4AW5fnTunbCIqZi4vG3WjJT9FHMy5D69xmAYBHXQhJdALdpwVxV501A==", + "dev": true, + "dependencies": { + "builtin-modules": "^3.3.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/is-callable": { "version": "1.2.7", "dev": true, @@ -9440,6 +7192,7 @@ }, "node_modules/is-date-object": { "version": "1.0.5", + "dev": true, "license": "MIT", "dependencies": { "has-tostringtag": "^1.0.0" @@ -9451,22 +7204,9 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-docker": { - "version": "2.2.1", - "dev": true, - "license": "MIT", - "bin": { - "is-docker": "cli.js" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/is-extglob": { "version": "2.1.1", + "dev": true, "license": "MIT", "engines": { "node": ">=0.10.0" @@ -9476,6 +7216,7 @@ "version": "3.0.0", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -9484,12 +7225,14 @@ "version": "2.1.0", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6" } }, "node_modules/is-glob": { "version": "4.0.3", + "dev": true, "license": "MIT", "dependencies": { "is-extglob": "^2.1.1" @@ -9516,6 +7259,7 @@ }, "node_modules/is-number": { "version": "7.0.0", + "dev": true, "license": "MIT", "engines": { "node": ">=0.12.0" @@ -9535,38 +7279,18 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-obj": { - "version": "1.0.1", + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", "dev": true, - "license": "MIT", "engines": { - "node": ">=0.10.0" + "node": ">=8" } }, - "node_modules/is-plain-obj": { - "version": "3.0.0", - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-plain-object": { - "version": "5.0.0", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-potential-custom-element-name": { - "version": "1.0.1", - "dev": true, - "license": "MIT" - }, "node_modules/is-regex": { "version": "1.1.4", + "dev": true, "license": "MIT", "dependencies": { "call-bind": "^1.0.2", @@ -9579,22 +7303,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-regexp": { - "version": "1.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-root": { - "version": "2.1.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, "node_modules/is-shared-array-buffer": { "version": "1.0.2", "dev": true, @@ -9610,6 +7318,7 @@ "version": "2.0.1", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" }, @@ -9631,11 +7340,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-subset": { - "version": "0.1.1", - "dev": true, - "license": "MIT" - }, "node_modules/is-symbol": { "version": "1.0.4", "dev": true, @@ -9650,11 +7354,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-typedarray": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, "node_modules/is-weakref": { "version": "1.0.2", "dev": true, @@ -9666,21 +7365,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-wsl": { - "version": "2.2.0", - "dev": true, - "license": "MIT", - "dependencies": { - "is-docker": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/isarray": { - "version": "0.0.1", - "license": "MIT" - }, "node_modules/isexe": { "version": "2.0.0", "dev": true, @@ -9699,6 +7383,7 @@ "version": "3.2.0", "dev": true, "license": "BSD-3-Clause", + "peer": true, "engines": { "node": ">=8" } @@ -9707,6 +7392,7 @@ "version": "5.2.1", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "@babel/core": "^7.12.3", "@babel/parser": "^7.14.7", @@ -9722,6 +7408,7 @@ "version": "3.0.0", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "istanbul-lib-coverage": "^3.0.0", "make-dir": "^3.0.0", @@ -9735,6 +7422,7 @@ "version": "4.0.1", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "debug": "^4.1.1", "istanbul-lib-coverage": "^3.0.0", @@ -9748,6 +7436,7 @@ "version": "3.1.5", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "html-escaper": "^2.0.0", "istanbul-lib-report": "^3.0.0" @@ -9756,23 +7445,6 @@ "node": ">=8" } }, - "node_modules/jake": { - "version": "10.8.5", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "async": "^3.2.3", - "chalk": "^4.0.2", - "filelist": "^1.0.1", - "minimatch": "^3.0.4" - }, - "bin": { - "jake": "bin/cli.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/jest": { "version": "29.2.2", "dev": true, @@ -10285,20 +7957,6 @@ "url": "https://github.com/chalk/supports-color?sponsor=1" } }, - "node_modules/jest-diff": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "chalk": "^4.0.0", - "diff-sequences": "^27.5.1", - "jest-get-type": "^27.5.1", - "pretty-format": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, "node_modules/jest-docblock": { "version": "29.2.0", "dev": true, @@ -10385,115 +8043,6 @@ "license": "MIT", "peer": true }, - "node_modules/jest-environment-jsdom": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/environment": "^27.5.1", - "@jest/fake-timers": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/node": "*", - "jest-mock": "^27.5.1", - "jest-util": "^27.5.1", - "jsdom": "^16.6.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-environment-jsdom/node_modules/@jest/environment": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/fake-timers": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/node": "*", - "jest-mock": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-environment-jsdom/node_modules/@jest/fake-timers": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^27.5.1", - "@sinonjs/fake-timers": "^8.0.1", - "@types/node": "*", - "jest-message-util": "^27.5.1", - "jest-mock": "^27.5.1", - "jest-util": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-environment-jsdom/node_modules/@jest/types": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^16.0.0", - "chalk": "^4.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-environment-jsdom/node_modules/@sinonjs/fake-timers": { - "version": "8.1.0", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "@sinonjs/commons": "^1.7.0" - } - }, - "node_modules/jest-environment-jsdom/node_modules/@types/yargs": { - "version": "16.0.4", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/yargs-parser": "*" - } - }, - "node_modules/jest-environment-jsdom/node_modules/jest-message-util": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.12.13", - "@jest/types": "^27.5.1", - "@types/stack-utils": "^2.0.0", - "chalk": "^4.0.0", - "graceful-fs": "^4.2.9", - "micromatch": "^4.0.4", - "pretty-format": "^27.5.1", - "slash": "^3.0.0", - "stack-utils": "^2.0.3" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-environment-jsdom/node_modules/jest-mock": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^27.5.1", - "@types/node": "*" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, "node_modules/jest-environment-node": { "version": "29.2.2", "dev": true, @@ -10541,6 +8090,8 @@ "version": "27.5.1", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "engines": { "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" } @@ -10549,6 +8100,8 @@ "version": "27.5.1", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@jest/types": "^27.5.1", "@types/graceful-fs": "^4.1.2", @@ -10574,6 +8127,8 @@ "version": "27.5.1", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@types/istanbul-lib-coverage": "^2.0.0", "@types/istanbul-reports": "^3.0.0", @@ -10589,328 +8144,12 @@ "version": "16.0.4", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@types/yargs-parser": "*" } }, - "node_modules/jest-jasmine2": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/environment": "^27.5.1", - "@jest/source-map": "^27.5.1", - "@jest/test-result": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/node": "*", - "chalk": "^4.0.0", - "co": "^4.6.0", - "expect": "^27.5.1", - "is-generator-fn": "^2.0.0", - "jest-each": "^27.5.1", - "jest-matcher-utils": "^27.5.1", - "jest-message-util": "^27.5.1", - "jest-runtime": "^27.5.1", - "jest-snapshot": "^27.5.1", - "jest-util": "^27.5.1", - "pretty-format": "^27.5.1", - "throat": "^6.0.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/@jest/console": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^27.5.1", - "@types/node": "*", - "chalk": "^4.0.0", - "jest-message-util": "^27.5.1", - "jest-util": "^27.5.1", - "slash": "^3.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/@jest/environment": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/fake-timers": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/node": "*", - "jest-mock": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/@jest/fake-timers": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^27.5.1", - "@sinonjs/fake-timers": "^8.0.1", - "@types/node": "*", - "jest-message-util": "^27.5.1", - "jest-mock": "^27.5.1", - "jest-util": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/@jest/globals": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/environment": "^27.5.1", - "@jest/types": "^27.5.1", - "expect": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/@jest/source-map": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "callsites": "^3.0.0", - "graceful-fs": "^4.2.9", - "source-map": "^0.6.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/@jest/test-result": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/console": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/istanbul-lib-coverage": "^2.0.0", - "collect-v8-coverage": "^1.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/@jest/transform": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/core": "^7.1.0", - "@jest/types": "^27.5.1", - "babel-plugin-istanbul": "^6.1.1", - "chalk": "^4.0.0", - "convert-source-map": "^1.4.0", - "fast-json-stable-stringify": "^2.0.0", - "graceful-fs": "^4.2.9", - "jest-haste-map": "^27.5.1", - "jest-regex-util": "^27.5.1", - "jest-util": "^27.5.1", - "micromatch": "^4.0.4", - "pirates": "^4.0.4", - "slash": "^3.0.0", - "source-map": "^0.6.1", - "write-file-atomic": "^3.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/@jest/types": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^16.0.0", - "chalk": "^4.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/@sinonjs/fake-timers": { - "version": "8.1.0", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "@sinonjs/commons": "^1.7.0" - } - }, - "node_modules/jest-jasmine2/node_modules/@types/yargs": { - "version": "16.0.4", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/yargs-parser": "*" - } - }, - "node_modules/jest-jasmine2/node_modules/expect": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^27.5.1", - "jest-get-type": "^27.5.1", - "jest-matcher-utils": "^27.5.1", - "jest-message-util": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/jest-each": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^27.5.1", - "chalk": "^4.0.0", - "jest-get-type": "^27.5.1", - "jest-util": "^27.5.1", - "pretty-format": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/jest-message-util": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.12.13", - "@jest/types": "^27.5.1", - "@types/stack-utils": "^2.0.0", - "chalk": "^4.0.0", - "graceful-fs": "^4.2.9", - "micromatch": "^4.0.4", - "pretty-format": "^27.5.1", - "slash": "^3.0.0", - "stack-utils": "^2.0.3" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/jest-mock": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^27.5.1", - "@types/node": "*" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/jest-runtime": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/environment": "^27.5.1", - "@jest/fake-timers": "^27.5.1", - "@jest/globals": "^27.5.1", - "@jest/source-map": "^27.5.1", - "@jest/test-result": "^27.5.1", - "@jest/transform": "^27.5.1", - "@jest/types": "^27.5.1", - "chalk": "^4.0.0", - "cjs-module-lexer": "^1.0.0", - "collect-v8-coverage": "^1.0.0", - "execa": "^5.0.0", - "glob": "^7.1.3", - "graceful-fs": "^4.2.9", - "jest-haste-map": "^27.5.1", - "jest-message-util": "^27.5.1", - "jest-mock": "^27.5.1", - "jest-regex-util": "^27.5.1", - "jest-resolve": "^27.5.1", - "jest-snapshot": "^27.5.1", - "jest-util": "^27.5.1", - "slash": "^3.0.0", - "strip-bom": "^4.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/jest-snapshot": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/core": "^7.7.2", - "@babel/generator": "^7.7.2", - "@babel/plugin-syntax-typescript": "^7.7.2", - "@babel/traverse": "^7.7.2", - "@babel/types": "^7.0.0", - "@jest/transform": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/babel__traverse": "^7.0.4", - "@types/prettier": "^2.1.5", - "babel-preset-current-node-syntax": "^1.0.0", - "chalk": "^4.0.0", - "expect": "^27.5.1", - "graceful-fs": "^4.2.9", - "jest-diff": "^27.5.1", - "jest-get-type": "^27.5.1", - "jest-haste-map": "^27.5.1", - "jest-matcher-utils": "^27.5.1", - "jest-message-util": "^27.5.1", - "jest-util": "^27.5.1", - "natural-compare": "^1.4.0", - "pretty-format": "^27.5.1", - "semver": "^7.3.2" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-jasmine2/node_modules/semver": { - "version": "7.5.4", - "dev": true, - "license": "ISC", - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/jest-jasmine2/node_modules/write-file-atomic": { - "version": "3.0.3", - "dev": true, - "license": "ISC", - "dependencies": { - "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" - } - }, "node_modules/jest-leak-detector": { "version": "29.2.1", "dev": true, @@ -10965,20 +8204,6 @@ "license": "MIT", "peer": true }, - "node_modules/jest-matcher-utils": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "chalk": "^4.0.0", - "jest-diff": "^27.5.1", - "jest-get-type": "^27.5.1", - "pretty-format": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, "node_modules/jest-message-util": { "version": "29.2.1", "dev": true, @@ -11062,6 +8287,7 @@ "version": "1.2.2", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6" }, @@ -11078,6 +8304,8 @@ "version": "27.5.1", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "engines": { "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" } @@ -11086,6 +8314,8 @@ "version": "27.5.1", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@jest/types": "^27.5.1", "chalk": "^4.0.0", @@ -11128,6 +8358,8 @@ "version": "27.5.1", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@types/istanbul-lib-coverage": "^2.0.0", "@types/istanbul-reports": "^3.0.0", @@ -11143,6 +8375,8 @@ "version": "16.0.4", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@types/yargs-parser": "*" } @@ -11568,6 +8802,8 @@ "version": "27.5.1", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@types/node": "*", "graceful-fs": "^4.2.9" @@ -11791,6 +9027,8 @@ "version": "27.5.1", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@jest/types": "^27.5.1", "@types/node": "*", @@ -11807,6 +9045,8 @@ "version": "27.5.1", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@types/istanbul-lib-coverage": "^2.0.0", "@types/istanbul-reports": "^3.0.0", @@ -11822,6 +9062,8 @@ "version": "16.0.4", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@types/yargs-parser": "*" } @@ -11830,6 +9072,8 @@ "version": "27.5.1", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@jest/types": "^27.5.1", "camelcase": "^6.2.0", @@ -11846,6 +9090,8 @@ "version": "27.5.1", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@types/istanbul-lib-coverage": "^2.0.0", "@types/istanbul-reports": "^3.0.0", @@ -11861,6 +9107,8 @@ "version": "16.0.4", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@types/yargs-parser": "*" } @@ -11869,6 +9117,8 @@ "version": "6.3.0", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "engines": { "node": ">=10" }, @@ -11916,6 +9166,8 @@ "version": "27.5.1", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@types/node": "*", "merge-stream": "^2.0.0", @@ -11929,6 +9181,8 @@ "version": "8.1.1", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "has-flag": "^4.0.0" }, @@ -11939,27 +9193,15 @@ "url": "https://github.com/chalk/supports-color?sponsor=1" } }, - "node_modules/jquery": { - "version": "3.7.1", - "license": "MIT" - }, - "node_modules/jquery.flot.tooltip": { - "version": "0.9.0", - "license": "MIT" - }, - "node_modules/js-sdsl": { - "version": "4.1.5", - "dev": true, - "license": "MIT" - }, "node_modules/js-tokens": { "version": "4.0.0", "license": "MIT" }, "node_modules/js-yaml": { "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", "dev": true, - "license": "MIT", "dependencies": { "argparse": "^2.0.1" }, @@ -11967,56 +9209,6 @@ "js-yaml": "bin/js-yaml.js" } }, - "node_modules/jsdom": { - "version": "16.7.0", - "dev": true, - "license": "MIT", - "dependencies": { - "abab": "^2.0.5", - "acorn": "^8.2.4", - "acorn-globals": "^6.0.0", - "cssom": "^0.4.4", - "cssstyle": "^2.3.0", - "data-urls": "^2.0.0", - "decimal.js": "^10.2.1", - "domexception": "^2.0.1", - "escodegen": "^2.0.0", - "form-data": "^3.0.0", - "html-encoding-sniffer": "^2.0.1", - "http-proxy-agent": "^4.0.1", - "https-proxy-agent": "^5.0.0", - "is-potential-custom-element-name": "^1.0.1", - "nwsapi": "^2.2.0", - "parse5": "6.0.1", - "saxes": "^5.0.1", - "symbol-tree": "^3.2.4", - "tough-cookie": "^4.0.0", - "w3c-hr-time": "^1.0.2", - "w3c-xmlserializer": "^2.0.0", - "webidl-conversions": "^6.1.0", - "whatwg-encoding": "^1.0.5", - "whatwg-mimetype": "^2.3.0", - "whatwg-url": "^8.5.0", - "ws": "^7.4.6", - "xml-name-validator": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "peerDependencies": { - "canvas": "^2.5.0" - }, - "peerDependenciesMeta": { - "canvas": { - "optional": true - } - } - }, - "node_modules/jsdom/node_modules/parse5": { - "version": "6.0.1", - "dev": true, - "license": "MIT" - }, "node_modules/jsesc": { "version": "2.5.2", "dev": true, @@ -12033,11 +9225,6 @@ "dev": true, "license": "MIT" }, - "node_modules/json-schema": { - "version": "0.4.0", - "dev": true, - "license": "(AFL-2.1 OR BSD-3-Clause)" - }, "node_modules/json-schema-traverse": { "version": "0.4.1", "dev": true, @@ -12064,25 +9251,6 @@ "node": ">=6" } }, - "node_modules/jsonfile": { - "version": "6.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/jsonpointer": { - "version": "5.0.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/jsx-ast-utils": { "version": "3.3.3", "dev": true, @@ -12095,35 +9263,15 @@ "node": ">=4.0" } }, - "node_modules/just-extend": { - "version": "4.2.1", - "dev": true, - "license": "MIT" - }, - "node_modules/kind-of": { - "version": "6.0.3", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/kleur": { "version": "3.0.3", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6" } }, - "node_modules/klona": { - "version": "2.0.5", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, "node_modules/language-subtag-registry": { "version": "0.3.22", "dev": true, @@ -12141,14 +9289,16 @@ "version": "3.1.0", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6" } }, "node_modules/levn": { "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", "dev": true, - "license": "MIT", "dependencies": { "prelude-ls": "^1.2.1", "type-check": "~0.4.0" @@ -12157,40 +9307,11 @@ "node": ">= 0.8.0" } }, - "node_modules/lilconfig": { - "version": "2.0.6", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - } - }, "node_modules/lines-and-columns": { "version": "1.2.4", "dev": true, "license": "MIT" }, - "node_modules/loader-runner": { - "version": "4.3.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6.11.5" - } - }, - "node_modules/loader-utils": { - "version": "2.0.4", - "dev": true, - "license": "MIT", - "dependencies": { - "big.js": "^5.2.2", - "emojis-list": "^3.0.0", - "json5": "^2.1.2" - }, - "engines": { - "node": ">=8.9.0" - } - }, "node_modules/locate-path": { "version": "6.0.0", "dev": true, @@ -12207,6 +9328,7 @@ }, "node_modules/lodash": { "version": "4.17.21", + "dev": true, "license": "MIT" }, "node_modules/lodash.debounce": { @@ -12214,26 +9336,6 @@ "dev": true, "license": "MIT" }, - "node_modules/lodash.escape": { - "version": "4.0.1", - "dev": true, - "license": "MIT" - }, - "node_modules/lodash.flattendeep": { - "version": "4.4.0", - "dev": true, - "license": "MIT" - }, - "node_modules/lodash.get": { - "version": "4.4.2", - "dev": true, - "license": "MIT" - }, - "node_modules/lodash.isequal": { - "version": "4.5.0", - "dev": true, - "license": "MIT" - }, "node_modules/lodash.memoize": { "version": "4.1.2", "dev": true, @@ -12244,16 +9346,6 @@ "dev": true, "license": "MIT" }, - "node_modules/lodash.sortby": { - "version": "4.7.0", - "dev": true, - "license": "MIT" - }, - "node_modules/lodash.uniq": { - "version": "4.5.0", - "dev": true, - "license": "MIT" - }, "node_modules/loose-envify": { "version": "1.4.0", "license": "MIT", @@ -12264,14 +9356,6 @@ "loose-envify": "cli.js" } }, - "node_modules/lower-case": { - "version": "2.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "tslib": "^2.0.3" - } - }, "node_modules/lru-cache": { "version": "6.0.0", "dev": true, @@ -12283,18 +9367,11 @@ "node": ">=10" } }, - "node_modules/magic-string": { - "version": "0.25.9", - "dev": true, - "license": "MIT", - "dependencies": { - "sourcemap-codec": "^1.4.8" - } - }, "node_modules/make-dir": { "version": "3.1.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "semver": "^6.0.0" }, @@ -12314,43 +9391,16 @@ "version": "1.0.12", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "tmpl": "1.0.5" } }, - "node_modules/mdn-data": { - "version": "2.0.4", - "dev": true, - "license": "CC0-1.0" - }, - "node_modules/media-typer": { - "version": "0.3.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/memfs": { - "version": "3.4.7", - "dev": true, - "license": "Unlicense", - "dependencies": { - "fs-monkey": "^1.0.3" - }, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/merge-descriptors": { - "version": "1.0.1", - "dev": true, - "license": "MIT" - }, "node_modules/merge-stream": { "version": "2.0.0", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/merge2": { "version": "1.4.1", @@ -12360,16 +9410,9 @@ "node": ">= 8" } }, - "node_modules/methods": { - "version": "1.1.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, "node_modules/micromatch": { "version": "4.0.5", + "dev": true, "license": "MIT", "dependencies": { "braces": "^3.0.2", @@ -12379,116 +9422,15 @@ "node": ">=8.6" } }, - "node_modules/mime": { - "version": "1.6.0", - "dev": true, - "license": "MIT", - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/mime-db": { - "version": "1.52.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.35", - "dev": true, - "license": "MIT", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, "node_modules/mimic-fn": { "version": "2.1.0", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6" } }, - "node_modules/mini-css-extract-plugin": { - "version": "2.6.1", - "dev": true, - "license": "MIT", - "dependencies": { - "schema-utils": "^4.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - } - }, - "node_modules/mini-css-extract-plugin/node_modules/ajv": { - "version": "8.11.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/mini-css-extract-plugin/node_modules/ajv-keywords": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/mini-css-extract-plugin/node_modules/json-schema-traverse": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/mini-css-extract-plugin/node_modules/schema-utils": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/minimalistic-assert": { - "version": "1.0.1", - "dev": true, - "license": "ISC" - }, "node_modules/minimatch": { "version": "3.1.2", "dev": true, @@ -12505,39 +9447,6 @@ "dev": true, "license": "MIT" }, - "node_modules/mkdirp": { - "version": "0.5.6", - "dev": true, - "license": "MIT", - "dependencies": { - "minimist": "^1.2.6" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/moment": { - "version": "2.29.4", - "license": "MIT", - "engines": { - "node": "*" - } - }, - "node_modules/moment-timezone": { - "version": "0.5.43", - "license": "MIT", - "dependencies": { - "moment": "^2.29.4" - }, - "engines": { - "node": "*" - } - }, - "node_modules/moo": { - "version": "0.5.2", - "dev": true, - "license": "BSD-3-Clause" - }, "node_modules/moo-color": { "version": "1.0.3", "dev": true, @@ -12551,26 +9460,17 @@ "dev": true, "license": "MIT" }, - "node_modules/multicast-dns": { - "version": "7.2.5", - "dev": true, - "license": "MIT", - "dependencies": { - "dns-packet": "^5.2.2", - "thunky": "^1.0.2" - }, - "bin": { - "multicast-dns": "cli.js" - } - }, - "node_modules/mutationobserver-shim": { - "version": "0.3.7", - "dev": true, - "license": "MIT" - }, "node_modules/nanoid": { - "version": "3.3.4", - "license": "MIT", + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], "bin": { "nanoid": "bin/nanoid.cjs" }, @@ -12583,82 +9483,6 @@ "dev": true, "license": "MIT" }, - "node_modules/nearley": { - "version": "2.20.1", - "dev": true, - "license": "MIT", - "dependencies": { - "commander": "^2.19.0", - "moo": "^0.5.0", - "railroad-diagrams": "^1.0.0", - "randexp": "0.4.6" - }, - "bin": { - "nearley-railroad": "bin/nearley-railroad.js", - "nearley-test": "bin/nearley-test.js", - "nearley-unparse": "bin/nearley-unparse.js", - "nearleyc": "bin/nearleyc.js" - }, - "funding": { - "type": "individual", - "url": "https://nearley.js.org/#give-to-nearley" - } - }, - "node_modules/nearley/node_modules/commander": { - "version": "2.20.3", - "dev": true, - "license": "MIT" - }, - "node_modules/negotiator": { - "version": "0.6.3", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/neo-async": { - "version": "2.6.2", - "dev": true, - "license": "MIT" - }, - "node_modules/nise": { - "version": "5.1.4", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "@sinonjs/commons": "^2.0.0", - "@sinonjs/fake-timers": "^10.0.2", - "@sinonjs/text-encoding": "^0.7.1", - "just-extend": "^4.0.2", - "path-to-regexp": "^1.7.0" - } - }, - "node_modules/nise/node_modules/@sinonjs/commons": { - "version": "2.0.0", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "type-detect": "4.0.8" - } - }, - "node_modules/nise/node_modules/@sinonjs/fake-timers": { - "version": "10.0.2", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "@sinonjs/commons": "^2.0.0" - } - }, - "node_modules/no-case": { - "version": "3.0.4", - "dev": true, - "license": "MIT", - "dependencies": { - "lower-case": "^2.0.2", - "tslib": "^2.0.3" - } - }, "node_modules/nock": { "version": "13.4.0", "dev": true, @@ -12710,54 +9534,32 @@ "webidl-conversions": "^3.0.0" } }, - "node_modules/node-forge": { - "version": "1.3.1", - "dev": true, - "license": "(BSD-3-Clause OR GPL-2.0)", - "engines": { - "node": ">= 6.13.0" - } - }, "node_modules/node-int64": { "version": "0.4.0", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/node-releases": { - "version": "2.0.6", - "dev": true, - "license": "MIT" + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", + "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==", + "dev": true }, "node_modules/normalize-path": { "version": "3.0.0", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-range": { - "version": "0.1.2", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=0.10.0" } }, - "node_modules/normalize-url": { - "version": "6.1.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/npm-run-path": { "version": "4.0.1", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "path-key": "^3.0.0" }, @@ -12765,22 +9567,6 @@ "node": ">=8" } }, - "node_modules/nth-check": { - "version": "2.1.1", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0" - }, - "funding": { - "url": "https://github.com/fb55/nth-check?sponsor=1" - } - }, - "node_modules/nwsapi": { - "version": "2.2.2", - "dev": true, - "license": "MIT" - }, "node_modules/object-assign": { "version": "4.1.1", "license": "MIT", @@ -12788,14 +9574,6 @@ "node": ">=0.10.0" } }, - "node_modules/object-hash": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, "node_modules/object-inspect": { "version": "1.12.2", "dev": true, @@ -12804,22 +9582,9 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/object-is": { - "version": "1.1.5", - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/object-keys": { "version": "1.1.1", + "dev": true, "license": "MIT", "engines": { "node": ">= 0.4" @@ -12871,23 +9636,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/object.getownpropertydescriptors": { - "version": "2.1.4", - "dev": true, - "license": "MIT", - "dependencies": { - "array.prototype.reduce": "^1.0.4", - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.1" - }, - "engines": { - "node": ">= 0.8" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/object.hasown": { "version": "1.1.1", "dev": true, @@ -12916,30 +9664,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/obuf": { - "version": "1.1.2", - "dev": true, - "license": "MIT" - }, - "node_modules/on-finished": { - "version": "2.4.1", - "dev": true, - "license": "MIT", - "dependencies": { - "ee-first": "1.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/on-headers": { - "version": "1.0.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, "node_modules/once": { "version": "1.4.0", "dev": true, @@ -12952,6 +9676,7 @@ "version": "5.1.2", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "mimic-fn": "^2.1.0" }, @@ -12962,33 +9687,18 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/open": { - "version": "8.4.0", - "dev": true, - "license": "MIT", - "dependencies": { - "define-lazy-prop": "^2.0.0", - "is-docker": "^2.1.1", - "is-wsl": "^2.2.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/optionator": { - "version": "0.9.1", + "version": "0.9.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz", + "integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==", "dev": true, - "license": "MIT", "dependencies": { + "@aashutoshrathi/word-wrap": "^1.2.3", "deep-is": "^0.1.3", "fast-levenshtein": "^2.0.6", "levn": "^0.4.1", "prelude-ls": "^1.2.1", - "type-check": "^0.4.0", - "word-wrap": "^1.2.3" + "type-check": "^0.4.0" }, "engines": { "node": ">= 0.8.0" @@ -13022,35 +9732,15 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/p-retry": { - "version": "4.6.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/retry": "0.12.0", - "retry": "^0.13.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/p-try": { "version": "2.2.0", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6" } }, - "node_modules/param-case": { - "version": "3.0.4", - "dev": true, - "license": "MIT", - "dependencies": { - "dot-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, "node_modules/parent-module": { "version": "1.0.1", "dev": true, @@ -13079,50 +9769,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/parse-srcset": { - "version": "1.0.2", - "license": "MIT" - }, - "node_modules/parse5": { - "version": "7.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "entities": "^4.4.0" - }, - "funding": { - "url": "https://github.com/inikulin/parse5?sponsor=1" - } - }, - "node_modules/parse5-htmlparser2-tree-adapter": { - "version": "7.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "domhandler": "^5.0.2", - "parse5": "^7.0.0" - }, - "funding": { - "url": "https://github.com/inikulin/parse5?sponsor=1" - } - }, - "node_modules/parseurl": { - "version": "1.3.3", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/pascal-case": { - "version": "3.1.2", - "dev": true, - "license": "MIT", - "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" - } - }, "node_modules/path-exists": { "version": "4.0.0", "dev": true, @@ -13152,13 +9798,6 @@ "dev": true, "license": "MIT" }, - "node_modules/path-to-regexp": { - "version": "1.8.0", - "license": "MIT", - "dependencies": { - "isarray": "0.0.1" - } - }, "node_modules/path-type": { "version": "4.0.0", "dev": true, @@ -13167,17 +9806,14 @@ "node": ">=8" } }, - "node_modules/performance-now": { - "version": "2.1.0", - "dev": true, - "license": "MIT" - }, "node_modules/picocolors": { "version": "1.0.0", + "dev": true, "license": "ISC" }, "node_modules/picomatch": { "version": "2.3.1", + "dev": true, "license": "MIT", "engines": { "node": ">=8.6" @@ -13186,18 +9822,11 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, - "node_modules/pify": { - "version": "2.3.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/pirates": { "version": "4.0.5", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">= 6" } @@ -13206,6 +9835,7 @@ "version": "4.2.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "find-up": "^4.0.0" }, @@ -13217,6 +9847,7 @@ "version": "4.1.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "locate-path": "^5.0.0", "path-exists": "^4.0.0" @@ -13229,6 +9860,7 @@ "version": "5.0.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "p-locate": "^4.1.0" }, @@ -13240,6 +9872,7 @@ "version": "2.3.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "p-try": "^2.0.0" }, @@ -13254,6 +9887,7 @@ "version": "4.1.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "p-limit": "^2.2.0" }, @@ -13261,83 +9895,11 @@ "node": ">=8" } }, - "node_modules/pkg-up": { - "version": "3.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/pkg-up/node_modules/find-up": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-up/node_modules/locate-path": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-up/node_modules/p-limit": { - "version": "2.3.0", - "dev": true, - "license": "MIT", - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/pkg-up/node_modules/p-locate": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/pkg-up/node_modules/path-exists": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/popper.js": { - "version": "1.16.1", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/popperjs" - } - }, "node_modules/postcss": { - "version": "8.4.17", + "version": "8.4.35", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.35.tgz", + "integrity": "sha512-u5U8qYpBCpN13BsiEB0CbR1Hhh4Gc0zLFuedrHJKMctHCHAGrMdG0PRM/KErzAL3CU6/eckEtmHNB3x6e3c0vA==", + "dev": true, "funding": [ { "type": "opencollective", @@ -13346,11 +9908,14 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], - "license": "MIT", "dependencies": { - "nanoid": "^3.3.4", + "nanoid": "^3.3.7", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" }, @@ -13358,398 +9923,6 @@ "node": "^10 || ^12 || >=14" } }, - "node_modules/postcss-attribute-case-insensitive": { - "version": "5.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.10" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/postcss-browser-comments": { - "version": "4.0.0", - "dev": true, - "license": "CC0-1.0", - "engines": { - "node": ">=8" - }, - "peerDependencies": { - "browserslist": ">=4", - "postcss": ">=8" - } - }, - "node_modules/postcss-calc": { - "version": "8.2.4", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.9", - "postcss-value-parser": "^4.2.0" - }, - "peerDependencies": { - "postcss": "^8.2.2" - } - }, - "node_modules/postcss-clamp": { - "version": "4.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": ">=7.6.0" - }, - "peerDependencies": { - "postcss": "^8.4.6" - } - }, - "node_modules/postcss-color-functional-notation": { - "version": "4.2.4", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/postcss-color-hex-alpha": { - "version": "8.0.4", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-color-rebeccapurple": { - "version": "7.1.1", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/postcss-colormin": { - "version": "5.3.0", - "dev": true, - "license": "MIT", - "dependencies": { - "browserslist": "^4.16.6", - "caniuse-api": "^3.0.0", - "colord": "^2.9.1", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-convert-values": { - "version": "5.1.2", - "dev": true, - "license": "MIT", - "dependencies": { - "browserslist": "^4.20.3", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-custom-media": { - "version": "8.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.3" - } - }, - "node_modules/postcss-custom-properties": { - "version": "12.1.9", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/postcss-custom-selectors": { - "version": "6.0.3", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.4" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.3" - } - }, - "node_modules/postcss-dir-pseudo-class": { - "version": "6.0.5", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-selector-parser": "^6.0.10" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/postcss-discard-comments": { - "version": "5.1.2", - "dev": true, - "license": "MIT", - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-discard-duplicates": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-discard-empty": { - "version": "5.1.1", - "dev": true, - "license": "MIT", - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-discard-overridden": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-double-position-gradients": { - "version": "3.1.2", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "@csstools/postcss-progressive-custom-properties": "^1.1.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/postcss-env-function": { - "version": "4.0.6", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-flexbugs-fixes": { - "version": "5.0.2", - "dev": true, - "license": "MIT", - "peerDependencies": { - "postcss": "^8.1.4" - } - }, - "node_modules/postcss-focus-visible": { - "version": "6.0.4", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-selector-parser": "^6.0.9" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-focus-within": { - "version": "5.0.4", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-selector-parser": "^6.0.9" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-font-variant": { - "version": "5.0.0", - "dev": true, - "license": "MIT", - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-gap-properties": { - "version": "3.0.5", - "dev": true, - "license": "CC0-1.0", - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/postcss-image-set-function": { - "version": "4.0.7", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/postcss-import": { - "version": "14.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.0.0", - "read-cache": "^1.0.0", - "resolve": "^1.1.7" - }, - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "postcss": "^8.0.0" - } - }, - "node_modules/postcss-initial": { - "version": "4.0.1", - "dev": true, - "license": "MIT", - "peerDependencies": { - "postcss": "^8.0.0" - } - }, "node_modules/postcss-js": { "version": "4.0.0", "dev": true, @@ -13768,263 +9941,48 @@ "postcss": "^8.3.3" } }, - "node_modules/postcss-lab-function": { - "version": "4.2.1", + "node_modules/postcss-mixins": { + "version": "9.0.4", + "resolved": "https://registry.npmjs.org/postcss-mixins/-/postcss-mixins-9.0.4.tgz", + "integrity": "sha512-XVq5jwQJDRu5M1XGkdpgASqLk37OqkH4JCFDXl/Dn7janOJjCTEKL+36cnRVy7bMtoBzALfO7bV7nTIsFnUWLA==", "dev": true, - "license": "CC0-1.0", "dependencies": { - "@csstools/postcss-progressive-custom-properties": "^1.1.0", - "postcss-value-parser": "^4.2.0" + "fast-glob": "^3.2.11", + "postcss-js": "^4.0.0", + "postcss-simple-vars": "^7.0.0", + "sugarss": "^4.0.1" }, "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/postcss-load-config": { - "version": "3.1.4", - "dev": true, - "license": "MIT", - "dependencies": { - "lilconfig": "^2.0.5", - "yaml": "^1.10.2" - }, - "engines": { - "node": ">= 10" + "node": ">=14.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/postcss/" }, "peerDependencies": { - "postcss": ">=8.0.9", - "ts-node": ">=9.0.0" - }, - "peerDependenciesMeta": { - "postcss": { - "optional": true - }, - "ts-node": { - "optional": true - } + "postcss": "^8.2.14" } }, - "node_modules/postcss-loader": { - "version": "6.2.1", + "node_modules/postcss-preset-mantine": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/postcss-preset-mantine/-/postcss-preset-mantine-1.13.0.tgz", + "integrity": "sha512-1bv/mQz2K+/FixIMxYd83BYH7PusDZaI7LpUtKbb1l/5N5w6t1p/V9ONHfRJeeAZyfa6Xc+AtR+95VKdFXRH1g==", "dev": true, - "license": "MIT", "dependencies": { - "cosmiconfig": "^7.0.0", - "klona": "^2.0.5", - "semver": "^7.3.5" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" + "postcss-mixins": "^9.0.4", + "postcss-nested": "^6.0.1" }, "peerDependencies": { - "postcss": "^7.0.0 || ^8.0.1", - "webpack": "^5.0.0" + "postcss": ">=8.0.0" } }, - "node_modules/postcss-loader/node_modules/semver": { - "version": "7.5.4", + "node_modules/postcss-preset-mantine/node_modules/postcss-nested": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.1.tgz", + "integrity": "sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==", "dev": true, - "license": "ISC", "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/postcss-logical": { - "version": "5.0.4", - "dev": true, - "license": "CC0-1.0", - "engines": { - "node": "^12 || ^14 || >=16" - }, - "peerDependencies": { - "postcss": "^8.4" - } - }, - "node_modules/postcss-media-minmax": { - "version": "5.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-merge-longhand": { - "version": "5.1.6", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0", - "stylehacks": "^5.1.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-merge-rules": { - "version": "5.1.2", - "dev": true, - "license": "MIT", - "dependencies": { - "browserslist": "^4.16.6", - "caniuse-api": "^3.0.0", - "cssnano-utils": "^3.1.0", - "postcss-selector-parser": "^6.0.5" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-minify-font-values": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-minify-gradients": { - "version": "5.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "colord": "^2.9.1", - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-minify-params": { - "version": "5.1.3", - "dev": true, - "license": "MIT", - "dependencies": { - "browserslist": "^4.16.6", - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-minify-selectors": { - "version": "5.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.5" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-modules-extract-imports": { - "version": "3.0.0", - "dev": true, - "license": "ISC", - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-local-by-default": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "icss-utils": "^5.0.0", - "postcss-selector-parser": "^6.0.2", - "postcss-value-parser": "^4.1.0" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-scope": { - "version": "3.0.0", - "dev": true, - "license": "ISC", - "dependencies": { - "postcss-selector-parser": "^6.0.4" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-modules-values": { - "version": "4.0.0", - "dev": true, - "license": "ISC", - "dependencies": { - "icss-utils": "^5.0.0" - }, - "engines": { - "node": "^10 || ^12 || >= 14" - }, - "peerDependencies": { - "postcss": "^8.1.0" - } - }, - "node_modules/postcss-nested": { - "version": "5.0.6", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.6" + "postcss-selector-parser": "^6.0.11" }, "engines": { "node": ">=12.0" @@ -14037,387 +9995,11 @@ "postcss": "^8.2.14" } }, - "node_modules/postcss-nesting": { - "version": "10.2.0", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "@csstools/selector-specificity": "^2.0.0", - "postcss-selector-parser": "^6.0.10" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/postcss-normalize": { - "version": "10.0.1", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "@csstools/normalize.css": "*", - "postcss-browser-comments": "^4", - "sanitize.css": "*" - }, - "engines": { - "node": ">= 12" - }, - "peerDependencies": { - "browserslist": ">= 4", - "postcss": ">= 8" - } - }, - "node_modules/postcss-normalize-charset": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-display-values": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-positions": { - "version": "5.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-repeat-style": { - "version": "5.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-string": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-timing-functions": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-unicode": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "browserslist": "^4.16.6", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-url": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "normalize-url": "^6.0.1", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-normalize-whitespace": { - "version": "5.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-opacity-percentage": { - "version": "1.1.2", - "dev": true, - "funding": [ - { - "type": "kofi", - "url": "https://ko-fi.com/mrcgrtz" - }, - { - "type": "liberapay", - "url": "https://liberapay.com/mrcgrtz" - } - ], - "license": "MIT", - "engines": { - "node": "^12 || ^14 || >=16" - } - }, - "node_modules/postcss-ordered-values": { - "version": "5.1.3", - "dev": true, - "license": "MIT", - "dependencies": { - "cssnano-utils": "^3.1.0", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-overflow-shorthand": { - "version": "3.0.4", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/postcss-page-break": { - "version": "3.0.4", - "dev": true, - "license": "MIT", - "peerDependencies": { - "postcss": "^8" - } - }, - "node_modules/postcss-place": { - "version": "7.0.5", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/postcss-preset-env": { - "version": "7.8.2", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "@csstools/postcss-cascade-layers": "^1.1.0", - "@csstools/postcss-color-function": "^1.1.1", - "@csstools/postcss-font-format-keywords": "^1.0.1", - "@csstools/postcss-hwb-function": "^1.0.2", - "@csstools/postcss-ic-unit": "^1.0.1", - "@csstools/postcss-is-pseudo-class": "^2.0.7", - "@csstools/postcss-nested-calc": "^1.0.0", - "@csstools/postcss-normalize-display-values": "^1.0.1", - "@csstools/postcss-oklab-function": "^1.1.1", - "@csstools/postcss-progressive-custom-properties": "^1.3.0", - "@csstools/postcss-stepped-value-functions": "^1.0.1", - "@csstools/postcss-text-decoration-shorthand": "^1.0.0", - "@csstools/postcss-trigonometric-functions": "^1.0.2", - "@csstools/postcss-unset-value": "^1.0.2", - "autoprefixer": "^10.4.11", - "browserslist": "^4.21.3", - "css-blank-pseudo": "^3.0.3", - "css-has-pseudo": "^3.0.4", - "css-prefers-color-scheme": "^6.0.3", - "cssdb": "^7.0.1", - "postcss-attribute-case-insensitive": "^5.0.2", - "postcss-clamp": "^4.1.0", - "postcss-color-functional-notation": "^4.2.4", - "postcss-color-hex-alpha": "^8.0.4", - "postcss-color-rebeccapurple": "^7.1.1", - "postcss-custom-media": "^8.0.2", - "postcss-custom-properties": "^12.1.9", - "postcss-custom-selectors": "^6.0.3", - "postcss-dir-pseudo-class": "^6.0.5", - "postcss-double-position-gradients": "^3.1.2", - "postcss-env-function": "^4.0.6", - "postcss-focus-visible": "^6.0.4", - "postcss-focus-within": "^5.0.4", - "postcss-font-variant": "^5.0.0", - "postcss-gap-properties": "^3.0.5", - "postcss-image-set-function": "^4.0.7", - "postcss-initial": "^4.0.1", - "postcss-lab-function": "^4.2.1", - "postcss-logical": "^5.0.4", - "postcss-media-minmax": "^5.0.0", - "postcss-nesting": "^10.2.0", - "postcss-opacity-percentage": "^1.1.2", - "postcss-overflow-shorthand": "^3.0.4", - "postcss-page-break": "^3.0.4", - "postcss-place": "^7.0.5", - "postcss-pseudo-class-any-link": "^7.1.6", - "postcss-replace-overflow-wrap": "^4.0.0", - "postcss-selector-not": "^6.0.1", - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/postcss-pseudo-class-any-link": { - "version": "7.1.6", - "dev": true, - "license": "CC0-1.0", - "dependencies": { - "postcss-selector-parser": "^6.0.10" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, - "node_modules/postcss-reduce-initial": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "browserslist": "^4.16.6", - "caniuse-api": "^3.0.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-reduce-transforms": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-replace-overflow-wrap": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "peerDependencies": { - "postcss": "^8.0.3" - } - }, - "node_modules/postcss-selector-not": { - "version": "6.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.10" - }, - "engines": { - "node": "^12 || ^14 || >=16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, - "peerDependencies": { - "postcss": "^8.2" - } - }, "node_modules/postcss-selector-parser": { - "version": "6.0.10", + "version": "6.0.15", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.15.tgz", + "integrity": "sha512-rEYkQOMUCEMhsKbK66tbEU9QVIxbhN18YiniAwA7XQYTVBqrBy+P2p5JcdqsHgKM2zWylp8d7J6eszocfds5Sw==", "dev": true, - "license": "MIT", "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" @@ -14426,152 +10008,27 @@ "node": ">=4" } }, - "node_modules/postcss-svgo": { - "version": "5.1.0", + "node_modules/postcss-simple-vars": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/postcss-simple-vars/-/postcss-simple-vars-7.0.1.tgz", + "integrity": "sha512-5GLLXaS8qmzHMOjVxqkk1TZPf1jMqesiI7qLhnlyERalG0sMbHIbJqrcnrpmZdKCLglHnRHoEBB61RtGTsj++A==", "dev": true, - "license": "MIT", - "dependencies": { - "postcss-value-parser": "^4.2.0", - "svgo": "^2.7.0" - }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": ">=14.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.2.1" } }, - "node_modules/postcss-svgo/node_modules/commander": { - "version": "7.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10" - } - }, - "node_modules/postcss-svgo/node_modules/css-select": { - "version": "4.3.0", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^6.0.1", - "domhandler": "^4.3.1", - "domutils": "^2.8.0", - "nth-check": "^2.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/postcss-svgo/node_modules/css-tree": { - "version": "1.1.3", - "dev": true, - "license": "MIT", - "dependencies": { - "mdn-data": "2.0.14", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/postcss-svgo/node_modules/dom-serializer": { - "version": "1.4.1", - "dev": true, - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.2.0", - "entities": "^2.0.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/postcss-svgo/node_modules/domhandler": { - "version": "4.3.1", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "domelementtype": "^2.2.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/postcss-svgo/node_modules/domutils": { - "version": "2.8.0", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.2.0" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, - "node_modules/postcss-svgo/node_modules/entities": { - "version": "2.2.0", - "dev": true, - "license": "BSD-2-Clause", - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/postcss-svgo/node_modules/mdn-data": { - "version": "2.0.14", - "dev": true, - "license": "CC0-1.0" - }, - "node_modules/postcss-svgo/node_modules/svgo": { - "version": "2.8.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@trysound/sax": "0.2.0", - "commander": "^7.2.0", - "css-select": "^4.1.3", - "css-tree": "^1.1.3", - "csso": "^4.2.0", - "picocolors": "^1.0.0", - "stable": "^0.1.8" - }, - "bin": { - "svgo": "bin/svgo" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/postcss-unique-selectors": { - "version": "5.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "postcss-selector-parser": "^6.0.5" - }, - "engines": { - "node": "^10 || ^12 || >=14.0" - }, - "peerDependencies": { - "postcss": "^8.2.15" - } - }, - "node_modules/postcss-value-parser": { - "version": "4.2.0", - "dev": true, - "license": "MIT" - }, "node_modules/prelude-ls": { "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", "dev": true, - "license": "MIT", "engines": { "node": ">= 0.8.0" } @@ -14601,30 +10058,12 @@ "node": ">=6.0.0" } }, - "node_modules/pretty-bytes": { - "version": "5.6.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/pretty-error": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "lodash": "^4.17.20", - "renderkid": "^3.0.0" - } - }, "node_modules/pretty-format": { "version": "27.5.1", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "ansi-regex": "^5.0.1", "ansi-styles": "^5.0.0", @@ -14638,6 +10077,8 @@ "version": "5.2.0", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "engines": { "node": ">=10" }, @@ -14645,19 +10086,6 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "node_modules/process-nextick-args": { - "version": "2.0.1", - "dev": true, - "license": "MIT" - }, - "node_modules/promise": { - "version": "8.2.0", - "dev": true, - "license": "MIT", - "dependencies": { - "asap": "~2.0.6" - } - }, "node_modules/promise-polyfill": { "version": "8.2.3", "dev": true, @@ -14667,6 +10095,7 @@ "version": "2.4.2", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "kleur": "^3.0.3", "sisteransi": "^1.0.5" @@ -14696,31 +10125,6 @@ "node": ">= 8" } }, - "node_modules/proxy-addr": { - "version": "2.0.7", - "dev": true, - "license": "MIT", - "dependencies": { - "forwarded": "0.2.0", - "ipaddr.js": "1.9.1" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/proxy-addr/node_modules/ipaddr.js": { - "version": "1.9.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/psl": { - "version": "1.9.0", - "dev": true, - "license": "MIT" - }, "node_modules/punycode": { "version": "2.1.1", "dev": true, @@ -14729,34 +10133,6 @@ "node": ">=6" } }, - "node_modules/q": { - "version": "1.5.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.6.0", - "teleport": ">=0.2.0" - } - }, - "node_modules/qs": { - "version": "6.10.3", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "side-channel": "^1.0.4" - }, - "engines": { - "node": ">=0.6" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/querystringify": { - "version": "2.2.0", - "dev": true, - "license": "MIT" - }, "node_modules/queue-microtask": { "version": "1.2.3", "dev": true, @@ -14776,94 +10152,10 @@ ], "license": "MIT" }, - "node_modules/quick-lru": { - "version": "5.1.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/raf": { - "version": "3.4.1", - "dev": true, - "license": "MIT", - "dependencies": { - "performance-now": "^2.1.0" - } - }, - "node_modules/railroad-diagrams": { - "version": "1.0.0", - "dev": true, - "license": "CC0-1.0" - }, - "node_modules/randexp": { - "version": "0.4.6", - "dev": true, - "license": "MIT", - "dependencies": { - "discontinuous-range": "1.0.0", - "ret": "~0.1.10" - }, - "engines": { - "node": ">=0.12" - } - }, - "node_modules/randombytes": { - "version": "2.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "safe-buffer": "^5.1.0" - } - }, - "node_modules/range-parser": { - "version": "1.2.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/raw-body": { - "version": "2.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "bytes": "3.1.2", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/raw-body/node_modules/bytes": { - "version": "3.1.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/raw-body/node_modules/iconv-lite": { - "version": "0.4.24", - "dev": true, - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/react": { "version": "17.0.2", "license": "MIT", + "peer": true, "dependencies": { "loose-envify": "^1.1.0", "object-assign": "^4.1.1" @@ -14872,78 +10164,10 @@ "node": ">=0.10.0" } }, - "node_modules/react-app-polyfill": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "core-js": "^3.19.2", - "object-assign": "^4.1.1", - "promise": "^8.1.0", - "raf": "^3.4.1", - "regenerator-runtime": "^0.13.9", - "whatwg-fetch": "^3.6.2" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/react-copy-to-clipboard": { - "version": "5.1.0", - "license": "MIT", - "dependencies": { - "copy-to-clipboard": "^3.3.1", - "prop-types": "^15.8.1" - }, - "peerDependencies": { - "react": "^15.3.0 || 16 || 17 || 18" - } - }, - "node_modules/react-dev-utils": { - "version": "12.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.16.0", - "address": "^1.1.2", - "browserslist": "^4.18.1", - "chalk": "^4.1.2", - "cross-spawn": "^7.0.3", - "detect-port-alt": "^1.1.6", - "escape-string-regexp": "^4.0.0", - "filesize": "^8.0.6", - "find-up": "^5.0.0", - "fork-ts-checker-webpack-plugin": "^6.5.0", - "global-modules": "^2.0.0", - "globby": "^11.0.4", - "gzip-size": "^6.0.0", - "immer": "^9.0.7", - "is-root": "^2.1.0", - "loader-utils": "^3.2.0", - "open": "^8.4.0", - "pkg-up": "^3.1.0", - "prompts": "^2.4.2", - "react-error-overlay": "^6.0.11", - "recursive-readdir": "^2.2.2", - "shell-quote": "^1.7.3", - "strip-ansi": "^6.0.1", - "text-table": "^0.2.0" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/react-dev-utils/node_modules/loader-utils": { - "version": "3.2.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 12.13.0" - } - }, "node_modules/react-dom": { "version": "17.0.2", "license": "MIT", + "peer": true, "dependencies": { "loose-envify": "^1.1.0", "object-assign": "^4.1.1", @@ -14953,1314 +10177,114 @@ "react": "17.0.2" } }, - "node_modules/react-error-boundary": { - "version": "3.1.4", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.12.5" - }, - "engines": { - "node": ">=10", - "npm": ">=6" - }, - "peerDependencies": { - "react": ">=16.13.1" - } - }, - "node_modules/react-error-overlay": { - "version": "6.0.11", - "dev": true, - "license": "MIT" - }, - "node_modules/react-infinite-scroll-component": { - "version": "6.1.0", - "license": "MIT", - "dependencies": { - "throttle-debounce": "^2.1.0" - }, - "peerDependencies": { - "react": ">=16.0.0" - } - }, "node_modules/react-is": { "version": "17.0.2", - "license": "MIT" - }, - "node_modules/react-lifecycles-compat": { - "version": "3.0.4", - "license": "MIT" - }, - "node_modules/react-popper": { - "version": "1.3.11", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.1.2", - "@hypnosphi/create-react-context": "^0.3.1", - "deep-equal": "^1.1.1", - "popper.js": "^1.14.4", - "prop-types": "^15.6.1", - "typed-styles": "^0.0.7", - "warning": "^4.0.2" - }, - "peerDependencies": { - "react": "0.14.x || ^15.0.0 || ^16.0.0 || ^17.0.0" - } - }, - "node_modules/react-refresh": { - "version": "0.11.0", "dev": true, "license": "MIT", - "engines": { - "node": ">=0.10.0" - } + "optional": true, + "peer": true }, - "node_modules/react-resize-detector": { - "version": "7.1.2", - "license": "MIT", + "node_modules/react-number-format": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/react-number-format/-/react-number-format-5.3.1.tgz", + "integrity": "sha512-qpYcQLauIeEhCZUZY9jXZnnroOtdy3jYaS1zQ3M1Sr6r/KMOBEIGNIb7eKT19g2N1wbYgFgvDzs19hw5TrB8XQ==", "dependencies": { - "lodash": "^4.17.21" + "prop-types": "^15.7.2" }, "peerDependencies": { - "react": "^16.0.0 || ^17.0.0 || ^18.0.0", - "react-dom": "^16.0.0 || ^17.0.0 || ^18.0.0" + "react": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0" } }, - "node_modules/react-router": { - "version": "5.3.4", - "license": "MIT", + "node_modules/react-remove-scroll": { + "version": "2.5.7", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.7.tgz", + "integrity": "sha512-FnrTWO4L7/Bhhf3CYBNArEG/yROV0tKmTv7/3h9QCFvH6sndeFf1wPqOcbFVu5VAulS5dV1wGT3GZZ/1GawqiA==", "dependencies": { - "@babel/runtime": "^7.12.13", - "history": "^4.9.0", - "hoist-non-react-statics": "^3.1.0", - "loose-envify": "^1.3.1", - "path-to-regexp": "^1.7.0", - "prop-types": "^15.6.2", - "react-is": "^16.6.0", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0" - }, - "peerDependencies": { - "react": ">=15" - } - }, - "node_modules/react-router-dom": { - "version": "5.3.4", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.12.13", - "history": "^4.9.0", - "loose-envify": "^1.3.1", - "prop-types": "^15.6.2", - "react-router": "5.3.4", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0" - }, - "peerDependencies": { - "react": ">=15" - } - }, - "node_modules/react-router/node_modules/react-is": { - "version": "16.13.1", - "license": "MIT" - }, - "node_modules/react-scripts": { - "version": "5.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/core": "^7.16.0", - "@pmmmwh/react-refresh-webpack-plugin": "^0.5.3", - "@svgr/webpack": "^5.5.0", - "babel-jest": "^27.4.2", - "babel-loader": "^8.2.3", - "babel-plugin-named-asset-import": "^0.3.8", - "babel-preset-react-app": "^10.0.1", - "bfj": "^7.0.2", - "browserslist": "^4.18.1", - "camelcase": "^6.2.1", - "case-sensitive-paths-webpack-plugin": "^2.4.0", - "css-loader": "^6.5.1", - "css-minimizer-webpack-plugin": "^3.2.0", - "dotenv": "^10.0.0", - "dotenv-expand": "^5.1.0", - "eslint": "^8.3.0", - "eslint-config-react-app": "^7.0.1", - "eslint-webpack-plugin": "^3.1.1", - "file-loader": "^6.2.0", - "fs-extra": "^10.0.0", - "html-webpack-plugin": "^5.5.0", - "identity-obj-proxy": "^3.0.0", - "jest": "^27.4.3", - "jest-resolve": "^27.4.2", - "jest-watch-typeahead": "^1.0.0", - "mini-css-extract-plugin": "^2.4.5", - "postcss": "^8.4.4", - "postcss-flexbugs-fixes": "^5.0.2", - "postcss-loader": "^6.2.1", - "postcss-normalize": "^10.0.1", - "postcss-preset-env": "^7.0.1", - "prompts": "^2.4.2", - "react-app-polyfill": "^3.0.0", - "react-dev-utils": "^12.0.1", - "react-refresh": "^0.11.0", - "resolve": "^1.20.0", - "resolve-url-loader": "^4.0.0", - "sass-loader": "^12.3.0", - "semver": "^7.3.5", - "source-map-loader": "^3.0.0", - "style-loader": "^3.3.1", - "tailwindcss": "^3.0.2", - "terser-webpack-plugin": "^5.2.5", - "webpack": "^5.64.4", - "webpack-dev-server": "^4.6.0", - "webpack-manifest-plugin": "^4.0.2", - "workbox-webpack-plugin": "^6.4.1" - }, - "bin": { - "react-scripts": "bin/react-scripts.js" + "react-remove-scroll-bar": "^2.3.4", + "react-style-singleton": "^2.2.1", + "tslib": "^2.1.0", + "use-callback-ref": "^1.3.0", + "use-sidecar": "^1.1.2" }, "engines": { - "node": ">=14.0.0" - }, - "optionalDependencies": { - "fsevents": "^2.3.2" + "node": ">=10" }, "peerDependencies": { - "react": ">= 16", - "typescript": "^3.2.1 || ^4" + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" }, "peerDependenciesMeta": { - "typescript": { + "@types/react": { "optional": true } } }, - "node_modules/react-scripts/node_modules/@jest/console": { - "version": "27.5.1", - "dev": true, - "license": "MIT", + "node_modules/react-remove-scroll-bar": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.5.tgz", + "integrity": "sha512-3cqjOqg6s0XbOjWvmasmqHch+RLxIEk2r/70rzGXuz3iIGQsQheEQyqYCBb5EECoD01Vo2SIbDqW4paLeLTASw==", "dependencies": { - "@jest/types": "^27.5.1", - "@types/node": "*", - "chalk": "^4.0.0", - "jest-message-util": "^27.5.1", - "jest-util": "^27.5.1", - "slash": "^3.0.0" + "react-style-singleton": "^2.2.1", + "tslib": "^2.0.0" }, "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/@jest/core": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/console": "^27.5.1", - "@jest/reporters": "^27.5.1", - "@jest/test-result": "^27.5.1", - "@jest/transform": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/node": "*", - "ansi-escapes": "^4.2.1", - "chalk": "^4.0.0", - "emittery": "^0.8.1", - "exit": "^0.1.2", - "graceful-fs": "^4.2.9", - "jest-changed-files": "^27.5.1", - "jest-config": "^27.5.1", - "jest-haste-map": "^27.5.1", - "jest-message-util": "^27.5.1", - "jest-regex-util": "^27.5.1", - "jest-resolve": "^27.5.1", - "jest-resolve-dependencies": "^27.5.1", - "jest-runner": "^27.5.1", - "jest-runtime": "^27.5.1", - "jest-snapshot": "^27.5.1", - "jest-util": "^27.5.1", - "jest-validate": "^27.5.1", - "jest-watcher": "^27.5.1", - "micromatch": "^4.0.4", - "rimraf": "^3.0.0", - "slash": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + "node": ">=10" }, "peerDependencies": { - "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" }, "peerDependenciesMeta": { - "node-notifier": { + "@types/react": { "optional": true } } }, - "node_modules/react-scripts/node_modules/@jest/environment": { - "version": "27.5.1", - "dev": true, - "license": "MIT", + "node_modules/react-style-singleton": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.1.tgz", + "integrity": "sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==", "dependencies": { - "@jest/fake-timers": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/node": "*", - "jest-mock": "^27.5.1" + "get-nonce": "^1.0.0", + "invariant": "^2.2.4", + "tslib": "^2.0.0" }, "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/@jest/fake-timers": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^27.5.1", - "@sinonjs/fake-timers": "^8.0.1", - "@types/node": "*", - "jest-message-util": "^27.5.1", - "jest-mock": "^27.5.1", - "jest-util": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/@jest/globals": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/environment": "^27.5.1", - "@jest/types": "^27.5.1", - "expect": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/@jest/reporters": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@bcoe/v8-coverage": "^0.2.3", - "@jest/console": "^27.5.1", - "@jest/test-result": "^27.5.1", - "@jest/transform": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/node": "*", - "chalk": "^4.0.0", - "collect-v8-coverage": "^1.0.0", - "exit": "^0.1.2", - "glob": "^7.1.2", - "graceful-fs": "^4.2.9", - "istanbul-lib-coverage": "^3.0.0", - "istanbul-lib-instrument": "^5.1.0", - "istanbul-lib-report": "^3.0.0", - "istanbul-lib-source-maps": "^4.0.0", - "istanbul-reports": "^3.1.3", - "jest-haste-map": "^27.5.1", - "jest-resolve": "^27.5.1", - "jest-util": "^27.5.1", - "jest-worker": "^27.5.1", - "slash": "^3.0.0", - "source-map": "^0.6.0", - "string-length": "^4.0.1", - "terminal-link": "^2.0.0", - "v8-to-istanbul": "^8.1.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + "node": ">=10" }, "peerDependencies": { - "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" }, "peerDependenciesMeta": { - "node-notifier": { + "@types/react": { "optional": true } } }, - "node_modules/react-scripts/node_modules/@jest/schemas": { - "version": "28.1.3", - "dev": true, - "license": "MIT", + "node_modules/react-textarea-autosize": { + "version": "8.5.3", + "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.3.tgz", + "integrity": "sha512-XT1024o2pqCuZSuBt9FwHlaDeNtVrtCXu0Rnz88t1jUGheCLa3PhjE1GH8Ctm2axEtvdCl5SUHYschyQ0L5QHQ==", "dependencies": { - "@sinclair/typebox": "^0.24.1" + "@babel/runtime": "^7.20.13", + "use-composed-ref": "^1.3.0", + "use-latest": "^1.2.1" }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" - } - }, - "node_modules/react-scripts/node_modules/@jest/source-map": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "callsites": "^3.0.0", - "graceful-fs": "^4.2.9", - "source-map": "^0.6.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/@jest/test-result": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/console": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/istanbul-lib-coverage": "^2.0.0", - "collect-v8-coverage": "^1.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/@jest/test-sequencer": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/test-result": "^27.5.1", - "graceful-fs": "^4.2.9", - "jest-haste-map": "^27.5.1", - "jest-runtime": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/@jest/transform": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/core": "^7.1.0", - "@jest/types": "^27.5.1", - "babel-plugin-istanbul": "^6.1.1", - "chalk": "^4.0.0", - "convert-source-map": "^1.4.0", - "fast-json-stable-stringify": "^2.0.0", - "graceful-fs": "^4.2.9", - "jest-haste-map": "^27.5.1", - "jest-regex-util": "^27.5.1", - "jest-util": "^27.5.1", - "micromatch": "^4.0.4", - "pirates": "^4.0.4", - "slash": "^3.0.0", - "source-map": "^0.6.1", - "write-file-atomic": "^3.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/@jest/types": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^16.0.0", - "chalk": "^4.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/@sinonjs/fake-timers": { - "version": "8.1.0", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "@sinonjs/commons": "^1.7.0" - } - }, - "node_modules/react-scripts/node_modules/@types/yargs": { - "version": "16.0.4", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/yargs-parser": "*" - } - }, - "node_modules/react-scripts/node_modules/ansi-styles": { - "version": "5.2.0", - "dev": true, - "license": "MIT", "engines": { "node": ">=10" }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/react-scripts/node_modules/babel-jest": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/transform": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/babel__core": "^7.1.14", - "babel-plugin-istanbul": "^6.1.1", - "babel-preset-jest": "^27.5.1", - "chalk": "^4.0.0", - "graceful-fs": "^4.2.9", - "slash": "^3.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - }, "peerDependencies": { - "@babel/core": "^7.8.0" - } - }, - "node_modules/react-scripts/node_modules/babel-plugin-jest-hoist": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/template": "^7.3.3", - "@babel/types": "^7.3.3", - "@types/babel__core": "^7.0.0", - "@types/babel__traverse": "^7.0.6" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/babel-preset-jest": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "babel-plugin-jest-hoist": "^27.5.1", - "babel-preset-current-node-syntax": "^1.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/react-scripts/node_modules/camelcase": { - "version": "6.3.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/react-scripts/node_modules/cliui": { - "version": "7.0.4", - "dev": true, - "license": "ISC", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" - } - }, - "node_modules/react-scripts/node_modules/emittery": { - "version": "0.8.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sindresorhus/emittery?sponsor=1" - } - }, - "node_modules/react-scripts/node_modules/expect": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^27.5.1", - "jest-get-type": "^27.5.1", - "jest-matcher-utils": "^27.5.1", - "jest-message-util": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/core": "^27.5.1", - "import-local": "^3.0.2", - "jest-cli": "^27.5.1" - }, - "bin": { - "jest": "bin/jest.js" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - }, - "peerDependencies": { - "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" - }, - "peerDependenciesMeta": { - "node-notifier": { - "optional": true - } - } - }, - "node_modules/react-scripts/node_modules/jest-changed-files": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^27.5.1", - "execa": "^5.0.0", - "throat": "^6.0.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-circus": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/environment": "^27.5.1", - "@jest/test-result": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/node": "*", - "chalk": "^4.0.0", - "co": "^4.6.0", - "dedent": "^0.7.0", - "expect": "^27.5.1", - "is-generator-fn": "^2.0.0", - "jest-each": "^27.5.1", - "jest-matcher-utils": "^27.5.1", - "jest-message-util": "^27.5.1", - "jest-runtime": "^27.5.1", - "jest-snapshot": "^27.5.1", - "jest-util": "^27.5.1", - "pretty-format": "^27.5.1", - "slash": "^3.0.0", - "stack-utils": "^2.0.3", - "throat": "^6.0.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-cli": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/core": "^27.5.1", - "@jest/test-result": "^27.5.1", - "@jest/types": "^27.5.1", - "chalk": "^4.0.0", - "exit": "^0.1.2", - "graceful-fs": "^4.2.9", - "import-local": "^3.0.2", - "jest-config": "^27.5.1", - "jest-util": "^27.5.1", - "jest-validate": "^27.5.1", - "prompts": "^2.0.1", - "yargs": "^16.2.0" - }, - "bin": { - "jest": "bin/jest.js" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - }, - "peerDependencies": { - "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" - }, - "peerDependenciesMeta": { - "node-notifier": { - "optional": true - } - } - }, - "node_modules/react-scripts/node_modules/jest-config": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/core": "^7.8.0", - "@jest/test-sequencer": "^27.5.1", - "@jest/types": "^27.5.1", - "babel-jest": "^27.5.1", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "deepmerge": "^4.2.2", - "glob": "^7.1.1", - "graceful-fs": "^4.2.9", - "jest-circus": "^27.5.1", - "jest-environment-jsdom": "^27.5.1", - "jest-environment-node": "^27.5.1", - "jest-get-type": "^27.5.1", - "jest-jasmine2": "^27.5.1", - "jest-regex-util": "^27.5.1", - "jest-resolve": "^27.5.1", - "jest-runner": "^27.5.1", - "jest-util": "^27.5.1", - "jest-validate": "^27.5.1", - "micromatch": "^4.0.4", - "parse-json": "^5.2.0", - "pretty-format": "^27.5.1", - "slash": "^3.0.0", - "strip-json-comments": "^3.1.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - }, - "peerDependencies": { - "ts-node": ">=9.0.0" - }, - "peerDependenciesMeta": { - "ts-node": { - "optional": true - } - } - }, - "node_modules/react-scripts/node_modules/jest-docblock": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "detect-newline": "^3.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-each": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^27.5.1", - "chalk": "^4.0.0", - "jest-get-type": "^27.5.1", - "jest-util": "^27.5.1", - "pretty-format": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-environment-node": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/environment": "^27.5.1", - "@jest/fake-timers": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/node": "*", - "jest-mock": "^27.5.1", - "jest-util": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-leak-detector": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "jest-get-type": "^27.5.1", - "pretty-format": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-message-util": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.12.13", - "@jest/types": "^27.5.1", - "@types/stack-utils": "^2.0.0", - "chalk": "^4.0.0", - "graceful-fs": "^4.2.9", - "micromatch": "^4.0.4", - "pretty-format": "^27.5.1", - "slash": "^3.0.0", - "stack-utils": "^2.0.3" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-mock": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^27.5.1", - "@types/node": "*" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-resolve-dependencies": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^27.5.1", - "jest-regex-util": "^27.5.1", - "jest-snapshot": "^27.5.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-runner": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/console": "^27.5.1", - "@jest/environment": "^27.5.1", - "@jest/test-result": "^27.5.1", - "@jest/transform": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/node": "*", - "chalk": "^4.0.0", - "emittery": "^0.8.1", - "graceful-fs": "^4.2.9", - "jest-docblock": "^27.5.1", - "jest-environment-jsdom": "^27.5.1", - "jest-environment-node": "^27.5.1", - "jest-haste-map": "^27.5.1", - "jest-leak-detector": "^27.5.1", - "jest-message-util": "^27.5.1", - "jest-resolve": "^27.5.1", - "jest-runtime": "^27.5.1", - "jest-util": "^27.5.1", - "jest-worker": "^27.5.1", - "source-map-support": "^0.5.6", - "throat": "^6.0.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-runtime": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/environment": "^27.5.1", - "@jest/fake-timers": "^27.5.1", - "@jest/globals": "^27.5.1", - "@jest/source-map": "^27.5.1", - "@jest/test-result": "^27.5.1", - "@jest/transform": "^27.5.1", - "@jest/types": "^27.5.1", - "chalk": "^4.0.0", - "cjs-module-lexer": "^1.0.0", - "collect-v8-coverage": "^1.0.0", - "execa": "^5.0.0", - "glob": "^7.1.3", - "graceful-fs": "^4.2.9", - "jest-haste-map": "^27.5.1", - "jest-message-util": "^27.5.1", - "jest-mock": "^27.5.1", - "jest-regex-util": "^27.5.1", - "jest-resolve": "^27.5.1", - "jest-snapshot": "^27.5.1", - "jest-util": "^27.5.1", - "slash": "^3.0.0", - "strip-bom": "^4.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-snapshot": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/core": "^7.7.2", - "@babel/generator": "^7.7.2", - "@babel/plugin-syntax-typescript": "^7.7.2", - "@babel/traverse": "^7.7.2", - "@babel/types": "^7.0.0", - "@jest/transform": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/babel__traverse": "^7.0.4", - "@types/prettier": "^2.1.5", - "babel-preset-current-node-syntax": "^1.0.0", - "chalk": "^4.0.0", - "expect": "^27.5.1", - "graceful-fs": "^4.2.9", - "jest-diff": "^27.5.1", - "jest-get-type": "^27.5.1", - "jest-haste-map": "^27.5.1", - "jest-matcher-utils": "^27.5.1", - "jest-message-util": "^27.5.1", - "jest-util": "^27.5.1", - "natural-compare": "^1.4.0", - "pretty-format": "^27.5.1", - "semver": "^7.3.2" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead": { - "version": "1.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-escapes": "^4.3.1", - "chalk": "^4.0.0", - "jest-regex-util": "^28.0.0", - "jest-watcher": "^28.0.0", - "slash": "^4.0.0", - "string-length": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "peerDependencies": { - "jest": "^27.0.0 || ^28.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/@jest/console": { - "version": "28.1.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^28.1.3", - "@types/node": "*", - "chalk": "^4.0.0", - "jest-message-util": "^28.1.3", - "jest-util": "^28.1.3", - "slash": "^3.0.0" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/@jest/console/node_modules/slash": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/@jest/test-result": { - "version": "28.1.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/console": "^28.1.3", - "@jest/types": "^28.1.3", - "@types/istanbul-lib-coverage": "^2.0.0", - "collect-v8-coverage": "^1.0.0" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/@jest/types": { - "version": "28.1.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/schemas": "^28.1.3", - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^17.0.8", - "chalk": "^4.0.0" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/@types/yargs": { - "version": "17.0.13", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/yargs-parser": "*" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/emittery": { - "version": "0.10.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sindresorhus/emittery?sponsor=1" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/jest-message-util": { - "version": "28.1.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.12.13", - "@jest/types": "^28.1.3", - "@types/stack-utils": "^2.0.0", - "chalk": "^4.0.0", - "graceful-fs": "^4.2.9", - "micromatch": "^4.0.4", - "pretty-format": "^28.1.3", - "slash": "^3.0.0", - "stack-utils": "^2.0.3" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/jest-message-util/node_modules/slash": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/jest-regex-util": { - "version": "28.0.2", - "dev": true, - "license": "MIT", - "engines": { - "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/jest-util": { - "version": "28.1.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^28.1.3", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/jest-watcher": { - "version": "28.1.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/test-result": "^28.1.3", - "@jest/types": "^28.1.3", - "@types/node": "*", - "ansi-escapes": "^4.2.1", - "chalk": "^4.0.0", - "emittery": "^0.10.2", - "jest-util": "^28.1.3", - "string-length": "^4.0.1" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/jest-watcher/node_modules/string-length": { - "version": "4.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "char-regex": "^1.0.2", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/jest-watcher/node_modules/strip-ansi": { - "version": "6.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/pretty-format": { - "version": "28.1.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/schemas": "^28.1.3", - "ansi-regex": "^5.0.1", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || ^16.10.0 || >=17.0.0" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/slash": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/string-length": { - "version": "5.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "char-regex": "^2.0.0", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12.20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/string-length/node_modules/char-regex": { - "version": "2.0.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12.20" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/strip-ansi": { - "version": "7.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/react-scripts/node_modules/jest-watch-typeahead/node_modules/strip-ansi/node_modules/ansi-regex": { - "version": "6.0.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/react-scripts/node_modules/jest-watcher": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/test-result": "^27.5.1", - "@jest/types": "^27.5.1", - "@types/node": "*", - "ansi-escapes": "^4.2.1", - "chalk": "^4.0.0", - "jest-util": "^27.5.1", - "string-length": "^4.0.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/react-scripts/node_modules/react-is": { - "version": "18.2.0", - "dev": true, - "license": "MIT" - }, - "node_modules/react-scripts/node_modules/semver": { - "version": "7.5.4", - "dev": true, - "license": "ISC", - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/react-scripts/node_modules/v8-to-istanbul": { - "version": "8.1.1", - "dev": true, - "license": "ISC", - "dependencies": { - "@types/istanbul-lib-coverage": "^2.0.1", - "convert-source-map": "^1.6.0", - "source-map": "^0.7.3" - }, - "engines": { - "node": ">=10.12.0" - } - }, - "node_modules/react-scripts/node_modules/v8-to-istanbul/node_modules/source-map": { - "version": "0.7.4", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">= 8" - } - }, - "node_modules/react-scripts/node_modules/write-file-atomic": { - "version": "3.0.3", - "dev": true, - "license": "ISC", - "dependencies": { - "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" - } - }, - "node_modules/react-scripts/node_modules/yargs": { - "version": "16.2.0", - "dev": true, - "license": "MIT", - "dependencies": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/react-scripts/node_modules/yargs-parser": { - "version": "20.2.9", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=10" - } - }, - "node_modules/react-shallow-renderer": { - "version": "16.15.0", - "license": "MIT", - "dependencies": { - "object-assign": "^4.1.1", - "react-is": "^16.12.0 || ^17.0.0 || ^18.0.0" - }, - "peerDependencies": { - "react": "^16.0.0 || ^17.0.0 || ^18.0.0" - } - }, - "node_modules/react-test-renderer": { - "version": "17.0.2", - "license": "MIT", - "dependencies": { - "object-assign": "^4.1.1", - "react-is": "^17.0.2", - "react-shallow-renderer": "^16.13.1", - "scheduler": "^0.20.2" - }, - "peerDependencies": { - "react": "17.0.2" - } - }, - "node_modules/react-transition-group": { - "version": "3.0.0", - "license": "BSD-3-Clause", - "dependencies": { - "dom-helpers": "^3.4.0", - "loose-envify": "^1.4.0", - "prop-types": "^15.6.2", - "react-lifecycles-compat": "^3.0.4" - }, - "peerDependencies": { - "react": ">=16.6.0", - "react-dom": ">=16.6.0" - } - }, - "node_modules/reactstrap": { - "version": "8.10.1", - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.12.5", - "classnames": "^2.2.3", - "prop-types": "^15.5.8", - "react-popper": "^1.3.6", - "react-transition-group": "^3.0.0" - }, - "peerDependencies": { - "react": ">=16.3.0", - "react-dom": ">=16.3.0" - } - }, - "node_modules/read-cache": { - "version": "1.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "pify": "^2.3.0" - } - }, - "node_modules/readable-stream": { - "version": "3.6.0", - "dev": true, - "license": "MIT", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" } }, "node_modules/readdirp": { "version": "3.6.0", + "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "picomatch": "^2.2.1" }, @@ -16268,17 +10292,6 @@ "node": ">=8.10.0" } }, - "node_modules/recursive-readdir": { - "version": "2.2.3", - "dev": true, - "license": "MIT", - "dependencies": { - "minimatch": "^3.0.5" - }, - "engines": { - "node": ">=6.0.0" - } - }, "node_modules/regenerate": { "version": "1.4.2", "dev": true, @@ -16297,6 +10310,7 @@ }, "node_modules/regenerator-runtime": { "version": "0.13.9", + "dev": true, "license": "MIT" }, "node_modules/regenerator-transform": { @@ -16307,13 +10321,9 @@ "@babel/runtime": "^7.8.4" } }, - "node_modules/regex-parser": { - "version": "2.2.11", - "dev": true, - "license": "MIT" - }, "node_modules/regexp.prototype.flags": { "version": "1.4.3", + "dev": true, "license": "MIT", "dependencies": { "call-bind": "^1.0.2", @@ -16377,109 +10387,15 @@ "jsesc": "bin/jsesc" } }, - "node_modules/relateurl": { - "version": "0.2.7", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/renderkid": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "css-select": "^4.1.3", - "dom-converter": "^0.2.0", - "htmlparser2": "^6.1.0", - "lodash": "^4.17.21", - "strip-ansi": "^6.0.1" - } - }, - "node_modules/renderkid/node_modules/css-select": { - "version": "4.3.0", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^6.0.1", - "domhandler": "^4.3.1", - "domutils": "^2.8.0", - "nth-check": "^2.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/renderkid/node_modules/dom-serializer": { - "version": "1.4.1", - "dev": true, - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.2.0", - "entities": "^2.0.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/domhandler": { - "version": "4.3.1", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "domelementtype": "^2.2.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/domutils": { - "version": "2.8.0", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.2.0" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, - "node_modules/renderkid/node_modules/entities": { - "version": "2.2.0", - "dev": true, - "license": "BSD-2-Clause", - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, "node_modules/require-directory": { "version": "2.1.1", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=0.10.0" } }, - "node_modules/require-from-string": { - "version": "2.0.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/requires-port": { - "version": "1.0.0", - "license": "MIT" - }, "node_modules/resolve": { "version": "1.22.1", "dev": true, @@ -16500,6 +10416,7 @@ "version": "3.0.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "resolve-from": "^5.0.0" }, @@ -16511,6 +10428,7 @@ "version": "5.0.0", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -16523,82 +10441,15 @@ "node": ">=4" } }, - "node_modules/resolve-pathname": { - "version": "3.0.0", - "license": "MIT" - }, - "node_modules/resolve-url-loader": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "adjust-sourcemap-loader": "^4.0.0", - "convert-source-map": "^1.7.0", - "loader-utils": "^2.0.0", - "postcss": "^7.0.35", - "source-map": "0.6.1" - }, - "engines": { - "node": ">=8.9" - }, - "peerDependencies": { - "rework": "1.0.1", - "rework-visit": "1.0.0" - }, - "peerDependenciesMeta": { - "rework": { - "optional": true - }, - "rework-visit": { - "optional": true - } - } - }, - "node_modules/resolve-url-loader/node_modules/picocolors": { - "version": "0.2.1", - "dev": true, - "license": "ISC" - }, - "node_modules/resolve-url-loader/node_modules/postcss": { - "version": "7.0.39", - "dev": true, - "license": "MIT", - "dependencies": { - "picocolors": "^0.2.1", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">=6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - } - }, "node_modules/resolve.exports": { "version": "1.1.0", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" } }, - "node_modules/ret": { - "version": "0.1.15", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.12" - } - }, - "node_modules/retry": { - "version": "0.13.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, "node_modules/reusify": { "version": "1.0.4", "dev": true, @@ -16623,63 +10474,37 @@ } }, "node_modules/rollup": { - "version": "2.79.1", + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.12.0.tgz", + "integrity": "sha512-wz66wn4t1OHIJw3+XU7mJJQV/2NAfw5OAk6G6Hoo3zcvz/XOfQ52Vgi+AN4Uxoxi0KBBwk2g8zPrTDA4btSB/Q==", "dev": true, - "license": "MIT", + "dependencies": { + "@types/estree": "1.0.5" + }, "bin": { "rollup": "dist/bin/rollup" }, "engines": { - "node": ">=10.0.0" + "node": ">=18.0.0", + "npm": ">=8.0.0" }, "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.12.0", + "@rollup/rollup-android-arm64": "4.12.0", + "@rollup/rollup-darwin-arm64": "4.12.0", + "@rollup/rollup-darwin-x64": "4.12.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.12.0", + "@rollup/rollup-linux-arm64-gnu": "4.12.0", + "@rollup/rollup-linux-arm64-musl": "4.12.0", + "@rollup/rollup-linux-riscv64-gnu": "4.12.0", + "@rollup/rollup-linux-x64-gnu": "4.12.0", + "@rollup/rollup-linux-x64-musl": "4.12.0", + "@rollup/rollup-win32-arm64-msvc": "4.12.0", + "@rollup/rollup-win32-ia32-msvc": "4.12.0", + "@rollup/rollup-win32-x64-msvc": "4.12.0", "fsevents": "~2.3.2" } }, - "node_modules/rollup-plugin-terser": { - "version": "7.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/code-frame": "^7.10.4", - "jest-worker": "^26.2.1", - "serialize-javascript": "^4.0.0", - "terser": "^5.0.0" - }, - "peerDependencies": { - "rollup": "^2.0.0" - } - }, - "node_modules/rollup-plugin-terser/node_modules/jest-worker": { - "version": "26.6.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/node": "*", - "merge-stream": "^2.0.0", - "supports-color": "^7.0.0" - }, - "engines": { - "node": ">= 10.13.0" - } - }, - "node_modules/rollup-plugin-terser/node_modules/serialize-javascript": { - "version": "4.0.0", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "randombytes": "^2.1.0" - } - }, - "node_modules/rst-selector-parser": { - "version": "2.2.3", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "lodash.flattendeep": "^4.4.0", - "nearley": "^2.7.10" - } - }, "node_modules/run-parallel": { "version": "1.2.0", "dev": true, @@ -16705,7 +10530,8 @@ "node_modules/safe-buffer": { "version": "5.1.2", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/safe-regex-test": { "version": "1.0.0", @@ -16720,48 +10546,12 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "dev": true, - "license": "MIT" - }, - "node_modules/sanitize-html": { - "version": "2.11.0", - "license": "MIT", - "dependencies": { - "deepmerge": "^4.2.2", - "escape-string-regexp": "^4.0.0", - "htmlparser2": "^8.0.0", - "is-plain-object": "^5.0.0", - "parse-srcset": "^1.0.2", - "postcss": "^8.3.11" - } - }, - "node_modules/sanitize-html/node_modules/htmlparser2": { - "version": "8.0.1", - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "MIT", - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "domutils": "^3.0.1", - "entities": "^4.3.0" - } - }, - "node_modules/sanitize.css": { - "version": "13.0.0", - "dev": true, - "license": "CC0-1.0" - }, "node_modules/sass": { "version": "1.69.5", + "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -16774,100 +10564,15 @@ "node": ">=14.0.0" } }, - "node_modules/sass-loader": { - "version": "12.6.0", - "dev": true, - "license": "MIT", - "dependencies": { - "klona": "^2.0.4", - "neo-async": "^2.6.2" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "fibers": ">= 3.1.0", - "node-sass": "^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0", - "sass": "^1.3.0", - "sass-embedded": "*", - "webpack": "^5.0.0" - }, - "peerDependenciesMeta": { - "fibers": { - "optional": true - }, - "node-sass": { - "optional": true - }, - "sass": { - "optional": true - }, - "sass-embedded": { - "optional": true - } - } - }, - "node_modules/sax": { - "version": "1.2.4", - "dev": true, - "license": "ISC" - }, - "node_modules/saxes": { - "version": "5.0.1", - "dev": true, - "license": "ISC", - "dependencies": { - "xmlchars": "^2.2.0" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/scheduler": { "version": "0.20.2", "license": "MIT", + "peer": true, "dependencies": { "loose-envify": "^1.1.0", "object-assign": "^4.1.1" } }, - "node_modules/schema-utils": { - "version": "3.3.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/select-hose": { - "version": "2.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/selfsigned": { - "version": "2.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "node-forge": "^1" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/semver": { "version": "6.3.1", "dev": true, @@ -16876,144 +10581,6 @@ "semver": "bin/semver.js" } }, - "node_modules/send": { - "version": "0.18.0", - "dev": true, - "license": "MIT", - "dependencies": { - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "1.2.0", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "2.0.0", - "mime": "1.6.0", - "ms": "2.1.3", - "on-finished": "2.4.1", - "range-parser": "~1.2.1", - "statuses": "2.0.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/send/node_modules/debug": { - "version": "2.6.9", - "dev": true, - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/send/node_modules/debug/node_modules/ms": { - "version": "2.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/send/node_modules/ms": { - "version": "2.1.3", - "dev": true, - "license": "MIT" - }, - "node_modules/serialize-javascript": { - "version": "6.0.1", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "randombytes": "^2.1.0" - } - }, - "node_modules/serve-index": { - "version": "1.9.1", - "dev": true, - "license": "MIT", - "dependencies": { - "accepts": "~1.3.4", - "batch": "0.6.1", - "debug": "2.6.9", - "escape-html": "~1.0.3", - "http-errors": "~1.6.2", - "mime-types": "~2.1.17", - "parseurl": "~1.3.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/serve-index/node_modules/debug": { - "version": "2.6.9", - "dev": true, - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/serve-index/node_modules/depd": { - "version": "1.1.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-index/node_modules/http-errors": { - "version": "1.6.3", - "dev": true, - "license": "MIT", - "dependencies": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.0", - "statuses": ">= 1.4.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-index/node_modules/inherits": { - "version": "2.0.3", - "dev": true, - "license": "ISC" - }, - "node_modules/serve-index/node_modules/ms": { - "version": "2.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/serve-index/node_modules/setprototypeof": { - "version": "1.1.0", - "dev": true, - "license": "ISC" - }, - "node_modules/serve-index/node_modules/statuses": { - "version": "1.5.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-static": { - "version": "1.15.0", - "dev": true, - "license": "MIT", - "dependencies": { - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "parseurl": "~1.3.3", - "send": "0.18.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/setprototypeof": { - "version": "1.2.0", - "dev": true, - "license": "ISC" - }, "node_modules/shebang-command": { "version": "2.0.0", "dev": true, @@ -17033,11 +10600,6 @@ "node": ">=8" } }, - "node_modules/shell-quote": { - "version": "1.7.3", - "dev": true, - "license": "MIT" - }, "node_modules/side-channel": { "version": "1.0.4", "dev": true, @@ -17054,37 +10616,14 @@ "node_modules/signal-exit": { "version": "3.0.7", "dev": true, - "license": "ISC" - }, - "node_modules/sinon": { - "version": "14.0.2", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "@sinonjs/commons": "^2.0.0", - "@sinonjs/fake-timers": "^9.1.2", - "@sinonjs/samsam": "^7.0.1", - "diff": "^5.0.0", - "nise": "^5.1.2", - "supports-color": "^7.2.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/sinon" - } - }, - "node_modules/sinon/node_modules/@sinonjs/commons": { - "version": "2.0.0", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "type-detect": "4.0.8" - } + "license": "ISC", + "peer": true }, "node_modules/sisteransi": { "version": "1.0.5", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/slash": { "version": "3.0.0", @@ -17094,107 +10633,39 @@ "node": ">=8" } }, - "node_modules/sockjs": { - "version": "0.3.24", - "dev": true, - "license": "MIT", - "dependencies": { - "faye-websocket": "^0.11.3", - "uuid": "^8.3.2", - "websocket-driver": "^0.7.4" - } - }, - "node_modules/source-list-map": { - "version": "2.0.1", - "dev": true, - "license": "MIT" - }, "node_modules/source-map": { "version": "0.6.1", "dev": true, "license": "BSD-3-Clause", + "peer": true, "engines": { "node": ">=0.10.0" } }, "node_modules/source-map-js": { "version": "1.0.2", + "dev": true, "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, - "node_modules/source-map-loader": { - "version": "3.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "abab": "^2.0.5", - "iconv-lite": "^0.6.3", - "source-map-js": "^1.0.1" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - } - }, "node_modules/source-map-support": { "version": "0.5.21", "dev": true, "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" } }, - "node_modules/sourcemap-codec": { - "version": "1.4.8", - "dev": true, - "license": "MIT" - }, - "node_modules/spdy": { - "version": "4.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "debug": "^4.1.0", - "handle-thing": "^2.0.0", - "http-deceiver": "^1.2.7", - "select-hose": "^2.0.0", - "spdy-transport": "^3.0.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/spdy-transport": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "debug": "^4.1.0", - "detect-node": "^2.0.4", - "hpack.js": "^2.1.6", - "obuf": "^1.1.2", - "readable-stream": "^3.0.6", - "wbuf": "^1.7.3" - } - }, "node_modules/sprintf-js": { "version": "1.0.3", "dev": true, - "license": "BSD-3-Clause" - }, - "node_modules/stable": { - "version": "0.1.8", - "dev": true, - "license": "MIT" + "license": "BSD-3-Clause", + "peer": true }, "node_modules/stack-utils": { "version": "2.0.5", @@ -17215,50 +10686,11 @@ "node": ">=8" } }, - "node_modules/stackframe": { - "version": "1.3.4", - "dev": true, - "license": "MIT" - }, - "node_modules/statuses": { - "version": "2.0.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/string_decoder": { - "version": "1.3.0", - "dev": true, - "license": "MIT", - "dependencies": { - "safe-buffer": "~5.2.0" - } - }, - "node_modules/string_decoder/node_modules/safe-buffer": { - "version": "5.2.1", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, "node_modules/string-length": { "version": "4.0.2", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "char-regex": "^1.0.2", "strip-ansi": "^6.0.0" @@ -17276,6 +10708,7 @@ "version": "4.2.3", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", @@ -17288,7 +10721,8 @@ "node_modules/string-width/node_modules/emoji-regex": { "version": "8.0.0", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/string.prototype.matchall": { "version": "4.0.7", @@ -17308,22 +10742,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/string.prototype.trim": { - "version": "1.2.6", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.19.5" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/string.prototype.trimend": { "version": "1.0.5", "dev": true, @@ -17350,19 +10768,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/stringify-object": { - "version": "3.3.0", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "get-own-enumerable-property-symbols": "^3.0.0", - "is-obj": "^1.0.1", - "is-regexp": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/strip-ansi": { "version": "6.0.1", "dev": true, @@ -17378,22 +10783,16 @@ "version": "4.0.0", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } }, - "node_modules/strip-comments": { - "version": "2.0.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - } - }, "node_modules/strip-final-newline": { "version": "2.0.0", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6" } @@ -17409,38 +10808,24 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/style-loader": { - "version": "3.3.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.0.0" - } - }, "node_modules/style-mod": { "version": "4.1.0", "license": "MIT" }, - "node_modules/stylehacks": { - "version": "5.1.0", + "node_modules/sugarss": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/sugarss/-/sugarss-4.0.1.tgz", + "integrity": "sha512-WCjS5NfuVJjkQzK10s8WOBY+hhDxxNt/N6ZaGwxFZ+wN3/lKKFSaaKUNecULcTTvE4urLcKaZFQD8vO0mOZujw==", "dev": true, - "license": "MIT", - "dependencies": { - "browserslist": "^4.16.6", - "postcss-selector-parser": "^6.0.4" - }, "engines": { - "node": "^10 || ^12 || >=14.0" + "node": ">=12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" }, "peerDependencies": { - "postcss": "^8.2.15" + "postcss": "^8.3.3" } }, "node_modules/supports-color": { @@ -17454,18 +10839,6 @@ "node": ">=8" } }, - "node_modules/supports-hyperlinks": { - "version": "2.3.0", - "dev": true, - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0", - "supports-color": "^7.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/supports-preserve-symlinks-flag": { "version": "1.0.0", "dev": true, @@ -17477,322 +10850,17 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/svg-parser": { - "version": "2.0.4", - "dev": true, - "license": "MIT" - }, - "node_modules/svgo": { - "version": "1.3.2", - "dev": true, - "license": "MIT", - "dependencies": { - "chalk": "^2.4.1", - "coa": "^2.0.2", - "css-select": "^2.0.0", - "css-select-base-adapter": "^0.1.1", - "css-tree": "1.0.0-alpha.37", - "csso": "^4.0.2", - "js-yaml": "^3.13.1", - "mkdirp": "~0.5.1", - "object.values": "^1.1.0", - "sax": "~1.2.4", - "stable": "^0.1.8", - "unquote": "~1.1.1", - "util.promisify": "~1.0.0" - }, - "bin": { - "svgo": "bin/svgo" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/svgo/node_modules/ansi-styles": { - "version": "3.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/svgo/node_modules/argparse": { - "version": "1.0.10", - "dev": true, - "license": "MIT", - "dependencies": { - "sprintf-js": "~1.0.2" - } - }, - "node_modules/svgo/node_modules/chalk": { - "version": "2.4.2", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/svgo/node_modules/color-convert": { - "version": "1.9.3", - "dev": true, - "license": "MIT", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/svgo/node_modules/color-name": { - "version": "1.1.3", - "dev": true, - "license": "MIT" - }, - "node_modules/svgo/node_modules/css-select": { - "version": "2.1.0", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^3.2.1", - "domutils": "^1.7.0", - "nth-check": "^1.0.2" - } - }, - "node_modules/svgo/node_modules/css-what": { - "version": "3.4.2", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">= 6" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/svgo/node_modules/dom-serializer": { - "version": "0.2.2", - "dev": true, - "license": "MIT", - "dependencies": { - "domelementtype": "^2.0.1", - "entities": "^2.0.0" - } - }, - "node_modules/svgo/node_modules/domutils": { - "version": "1.7.0", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "dom-serializer": "0", - "domelementtype": "1" - } - }, - "node_modules/svgo/node_modules/domutils/node_modules/domelementtype": { - "version": "1.3.1", - "dev": true, - "license": "BSD-2-Clause" - }, - "node_modules/svgo/node_modules/entities": { - "version": "2.2.0", - "dev": true, - "license": "BSD-2-Clause", - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/svgo/node_modules/escape-string-regexp": { - "version": "1.0.5", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/svgo/node_modules/has-flag": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/svgo/node_modules/js-yaml": { - "version": "3.14.1", - "dev": true, - "license": "MIT", - "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/svgo/node_modules/nth-check": { - "version": "1.0.2", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "~1.0.0" - } - }, - "node_modules/svgo/node_modules/supports-color": { - "version": "5.5.0", - "dev": true, - "license": "MIT", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/symbol-tree": { - "version": "3.2.4", - "dev": true, - "license": "MIT" - }, - "node_modules/tailwindcss": { - "version": "3.1.8", - "dev": true, - "license": "MIT", - "dependencies": { - "arg": "^5.0.2", - "chokidar": "^3.5.3", - "color-name": "^1.1.4", - "detective": "^5.2.1", - "didyoumean": "^1.2.2", - "dlv": "^1.1.3", - "fast-glob": "^3.2.11", - "glob-parent": "^6.0.2", - "is-glob": "^4.0.3", - "lilconfig": "^2.0.6", - "normalize-path": "^3.0.0", - "object-hash": "^3.0.0", - "picocolors": "^1.0.0", - "postcss": "^8.4.14", - "postcss-import": "^14.1.0", - "postcss-js": "^4.0.0", - "postcss-load-config": "^3.1.4", - "postcss-nested": "5.0.6", - "postcss-selector-parser": "^6.0.10", - "postcss-value-parser": "^4.2.0", - "quick-lru": "^5.1.1", - "resolve": "^1.22.1" - }, - "bin": { - "tailwind": "lib/cli.js", - "tailwindcss": "lib/cli.js" - }, - "engines": { - "node": ">=12.13.0" - }, - "peerDependencies": { - "postcss": "^8.0.9" - } - }, - "node_modules/tapable": { - "version": "2.2.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/temp-dir": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/tempusdominus-bootstrap-4": { - "version": "5.39.2", - "license": "MIT", - "dependencies": { - "bootstrap": "^4.6.1", - "jquery": "^3.6.0", - "moment": "^2.29.2", - "moment-timezone": "^0.5.34", - "popper.js": "^1.16.1" - }, - "peerDependencies": { - "bootstrap": ">=4.5.2", - "jquery": "^3.5.1", - "moment": "^2.29.0", - "moment-timezone": "^0.5.31", - "popper.js": "^1.16.1", - "tempusdominus-core": "5.19.3" - } - }, - "node_modules/tempusdominus-core": { - "version": "5.19.3", - "license": "MIT", - "dependencies": { - "jquery": "^3.6.0", - "moment": "~2.29.2", - "moment-timezone": "^0.5.34" - }, - "peerDependencies": { - "jquery": "^3.0", - "moment": "^2.29.2", - "moment-timezone": "^0.5.0" - } - }, - "node_modules/tempy": { - "version": "0.6.0", - "dev": true, - "license": "MIT", - "dependencies": { - "is-stream": "^2.0.0", - "temp-dir": "^2.0.0", - "type-fest": "^0.16.0", - "unique-string": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/tempy/node_modules/type-fest": { - "version": "0.16.0", - "dev": true, - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/terminal-link": { - "version": "2.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-escapes": "^4.2.1", - "supports-hyperlinks": "^2.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } + "node_modules/tabbable": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz", + "integrity": "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==" }, "node_modules/terser": { "version": "5.20.0", "dev": true, "license": "BSD-2-Clause", + "optional": true, + "peer": true, "dependencies": { "@jridgewell/source-map": "^0.3.3", "acorn": "^8.8.2", @@ -17806,48 +10874,18 @@ "node": ">=10" } }, - "node_modules/terser-webpack-plugin": { - "version": "5.3.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/trace-mapping": "^0.3.17", - "jest-worker": "^27.4.5", - "schema-utils": "^3.1.1", - "serialize-javascript": "^6.0.1", - "terser": "^5.16.8" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^5.1.0" - }, - "peerDependenciesMeta": { - "@swc/core": { - "optional": true - }, - "esbuild": { - "optional": true - }, - "uglify-js": { - "optional": true - } - } - }, "node_modules/terser/node_modules/commander": { "version": "2.20.3", "dev": true, - "license": "MIT" + "license": "MIT", + "optional": true, + "peer": true }, "node_modules/test-exclude": { "version": "6.0.0", "dev": true, "license": "ISC", + "peer": true, "dependencies": { "@istanbuljs/schema": "^0.1.2", "glob": "^7.1.4", @@ -17862,35 +10900,11 @@ "dev": true, "license": "MIT" }, - "node_modules/throat": { - "version": "6.0.1", - "dev": true, - "license": "MIT" - }, - "node_modules/throttle-debounce": { - "version": "2.3.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/thunky": { - "version": "1.1.0", - "dev": true, - "license": "MIT" - }, - "node_modules/tiny-invariant": { - "version": "1.3.1", - "license": "MIT" - }, - "node_modules/tiny-warning": { - "version": "1.0.3", - "license": "MIT" - }, "node_modules/tmpl": { "version": "1.0.5", "dev": true, - "license": "BSD-3-Clause" + "license": "BSD-3-Clause", + "peer": true }, "node_modules/to-fast-properties": { "version": "2.0.0", @@ -17902,6 +10916,7 @@ }, "node_modules/to-regex-range": { "version": "5.0.1", + "dev": true, "license": "MIT", "dependencies": { "is-number": "^7.0.0" @@ -17910,56 +10925,18 @@ "node": ">=8.0" } }, - "node_modules/toggle-selection": { - "version": "1.0.6", - "license": "MIT" - }, - "node_modules/toidentifier": { - "version": "1.0.1", + "node_modules/ts-api-utils": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.2.1.tgz", + "integrity": "sha512-RIYA36cJn2WiH9Hy77hdF9r7oEwxAtB/TS9/S4Qd90Ap4z5FSiin5zEiTL44OII1Y3IIlEvxwxFUVgrHSZ/UpA==", "dev": true, - "license": "MIT", "engines": { - "node": ">=0.6" - } - }, - "node_modules/tough-cookie": { - "version": "4.1.3", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "psl": "^1.1.33", - "punycode": "^2.1.1", - "universalify": "^0.2.0", - "url-parse": "^1.5.3" + "node": ">=16" }, - "engines": { - "node": ">=6" + "peerDependencies": { + "typescript": ">=4.2.0" } }, - "node_modules/tough-cookie/node_modules/universalify": { - "version": "0.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/tr46": { - "version": "2.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "punycode": "^2.1.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/tryer": { - "version": "1.0.1", - "dev": true, - "license": "MIT" - }, "node_modules/ts-jest": { "version": "29.1.1", "dev": true, @@ -18087,8 +11064,9 @@ }, "node_modules/type-check": { "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", "dev": true, - "license": "MIT", "dependencies": { "prelude-ls": "^1.2.1" }, @@ -18100,6 +11078,7 @@ "version": "4.0.8", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=4" } @@ -18115,40 +11094,17 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/type-is": { - "version": "1.6.18", - "dev": true, - "license": "MIT", - "dependencies": { - "media-typer": "0.3.0", - "mime-types": "~2.1.24" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/typed-styles": { - "version": "0.0.7", - "license": "MIT" - }, - "node_modules/typedarray-to-buffer": { - "version": "3.1.5", - "dev": true, - "license": "MIT", - "dependencies": { - "is-typedarray": "^1.0.0" - } - }, "node_modules/typescript": { - "version": "4.9.5", + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.3.3.tgz", + "integrity": "sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==", "dev": true, - "license": "Apache-2.0", "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" }, "engines": { - "node": ">=4.2.0" + "node": ">=14.17" } }, "node_modules/unbox-primitive": { @@ -18167,6 +11123,7 @@ }, "node_modules/undici-types": { "version": "5.26.5", + "dev": true, "license": "MIT" }, "node_modules/unicode-canonical-property-names-ecmascript": { @@ -18205,49 +11162,10 @@ "node": ">=4" } }, - "node_modules/unique-string": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "crypto-random-string": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/universalify": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/unpipe": { - "version": "1.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/unquote": { - "version": "1.1.1", - "dev": true, - "license": "MIT" - }, - "node_modules/upath": { - "version": "1.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4", - "yarn": "*" - } - }, "node_modules/update-browserslist-db": { - "version": "1.0.9", + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", + "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", "dev": true, "funding": [ { @@ -18257,15 +11175,18 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], - "license": "MIT", "dependencies": { "escalade": "^3.1.1", "picocolors": "^1.0.0" }, "bin": { - "browserslist-lint": "cli.js" + "update-browserslist-db": "cli.js" }, "peerDependencies": { "browserslist": ">= 4.21.0" @@ -18279,13 +11200,82 @@ "punycode": "^2.1.0" } }, - "node_modules/url-parse": { - "version": "1.5.10", - "dev": true, - "license": "MIT", + "node_modules/use-callback-ref": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.1.tgz", + "integrity": "sha512-Lg4Vx1XZQauB42Hw3kK7JM6yjVjgFmFC5/Ab797s79aARomD2nEErc4mCgM8EZrARLmmbWpi5DGCadmK50DcAQ==", "dependencies": { - "querystringify": "^2.1.1", - "requires-port": "^1.0.0" + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-composed-ref": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/use-composed-ref/-/use-composed-ref-1.3.0.tgz", + "integrity": "sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/use-isomorphic-layout-effect": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz", + "integrity": "sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-latest": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/use-latest/-/use-latest-1.2.1.tgz", + "integrity": "sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw==", + "dependencies": { + "use-isomorphic-layout-effect": "^1.1.1" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sidecar": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.2.tgz", + "integrity": "sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==", + "dependencies": { + "detect-node-es": "^1.1.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.9.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } } }, "node_modules/util-deprecate": { @@ -18293,41 +11283,6 @@ "dev": true, "license": "MIT" }, - "node_modules/util.promisify": { - "version": "1.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.2", - "has-symbols": "^1.0.1", - "object.getownpropertydescriptors": "^2.1.0" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/utila": { - "version": "0.4.0", - "dev": true, - "license": "MIT" - }, - "node_modules/utils-merge": { - "version": "1.0.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/uuid": { - "version": "8.3.2", - "dev": true, - "license": "MIT", - "bin": { - "uuid": "dist/bin/uuid" - } - }, "node_modules/v8-to-istanbul": { "version": "9.0.1", "dev": true, @@ -18342,442 +11297,79 @@ "node": ">=10.12.0" } }, - "node_modules/value-equal": { - "version": "1.0.1", - "license": "MIT" - }, - "node_modules/vary": { - "version": "1.1.2", + "node_modules/vite": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.1.3.tgz", + "integrity": "sha512-UfmUD36DKkqhi/F75RrxvPpry+9+tTkrXfMNZD+SboZqBCMsxKtO52XeGzzuh7ioz+Eo/SYDBbdb0Z7vgcDJew==", "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/w3c-hr-time": { - "version": "1.0.2", - "dev": true, - "license": "MIT", "dependencies": { - "browser-process-hrtime": "^1.0.0" + "esbuild": "^0.19.3", + "postcss": "^8.4.35", + "rollup": "^4.2.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } } }, "node_modules/w3c-keyname": { "version": "2.2.6", "license": "MIT" }, - "node_modules/w3c-xmlserializer": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "xml-name-validator": "^3.0.0" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/walker": { "version": "1.0.8", "dev": true, "license": "Apache-2.0", + "peer": true, "dependencies": { "makeerror": "1.0.12" } }, - "node_modules/warning": { - "version": "4.0.3", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.0.0" - } - }, - "node_modules/watchpack": { - "version": "2.4.0", - "dev": true, - "license": "MIT", - "dependencies": { - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.1.2" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/wbuf": { - "version": "1.7.3", - "dev": true, - "license": "MIT", - "dependencies": { - "minimalistic-assert": "^1.0.0" - } - }, - "node_modules/webidl-conversions": { - "version": "6.1.0", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=10.4" - } - }, - "node_modules/webpack": { - "version": "5.88.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/eslint-scope": "^3.7.3", - "@types/estree": "^1.0.0", - "@webassemblyjs/ast": "^1.11.5", - "@webassemblyjs/wasm-edit": "^1.11.5", - "@webassemblyjs/wasm-parser": "^1.11.5", - "acorn": "^8.7.1", - "acorn-import-assertions": "^1.9.0", - "browserslist": "^4.14.5", - "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^5.15.0", - "es-module-lexer": "^1.2.1", - "eslint-scope": "5.1.1", - "events": "^3.2.0", - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.2.9", - "json-parse-even-better-errors": "^2.3.1", - "loader-runner": "^4.2.0", - "mime-types": "^2.1.27", - "neo-async": "^2.6.2", - "schema-utils": "^3.2.0", - "tapable": "^2.1.1", - "terser-webpack-plugin": "^5.3.7", - "watchpack": "^2.4.0", - "webpack-sources": "^3.2.3" - }, - "bin": { - "webpack": "bin/webpack.js" - }, - "engines": { - "node": ">=10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependenciesMeta": { - "webpack-cli": { - "optional": true - } - } - }, - "node_modules/webpack-dev-middleware": { - "version": "5.3.3", - "dev": true, - "license": "MIT", - "dependencies": { - "colorette": "^2.0.10", - "memfs": "^3.4.3", - "mime-types": "^2.1.31", - "range-parser": "^1.2.1", - "schema-utils": "^4.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.0.0 || ^5.0.0" - } - }, - "node_modules/webpack-dev-middleware/node_modules/ajv": { - "version": "8.11.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/webpack-dev-middleware/node_modules/ajv-keywords": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/webpack-dev-middleware/node_modules/json-schema-traverse": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/webpack-dev-middleware/node_modules/schema-utils": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/webpack-dev-server": { - "version": "4.11.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/bonjour": "^3.5.9", - "@types/connect-history-api-fallback": "^1.3.5", - "@types/express": "^4.17.13", - "@types/serve-index": "^1.9.1", - "@types/serve-static": "^1.13.10", - "@types/sockjs": "^0.3.33", - "@types/ws": "^8.5.1", - "ansi-html-community": "^0.0.8", - "bonjour-service": "^1.0.11", - "chokidar": "^3.5.3", - "colorette": "^2.0.10", - "compression": "^1.7.4", - "connect-history-api-fallback": "^2.0.0", - "default-gateway": "^6.0.3", - "express": "^4.17.3", - "graceful-fs": "^4.2.6", - "html-entities": "^2.3.2", - "http-proxy-middleware": "^2.0.3", - "ipaddr.js": "^2.0.1", - "open": "^8.0.9", - "p-retry": "^4.5.0", - "rimraf": "^3.0.2", - "schema-utils": "^4.0.0", - "selfsigned": "^2.1.1", - "serve-index": "^1.9.1", - "sockjs": "^0.3.24", - "spdy": "^4.0.2", - "webpack-dev-middleware": "^5.3.1", - "ws": "^8.4.2" - }, - "bin": { - "webpack-dev-server": "bin/webpack-dev-server.js" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.37.0 || ^5.0.0" - }, - "peerDependenciesMeta": { - "webpack-cli": { - "optional": true - } - } - }, - "node_modules/webpack-dev-server/node_modules/ajv": { - "version": "8.11.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/webpack-dev-server/node_modules/ajv-keywords": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3" - }, - "peerDependencies": { - "ajv": "^8.8.2" - } - }, - "node_modules/webpack-dev-server/node_modules/json-schema-traverse": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/webpack-dev-server/node_modules/schema-utils": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/json-schema": "^7.0.9", - "ajv": "^8.8.0", - "ajv-formats": "^2.1.1", - "ajv-keywords": "^5.0.0" - }, - "engines": { - "node": ">= 12.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, - "node_modules/webpack-dev-server/node_modules/ws": { - "version": "8.9.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": "^5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/webpack-manifest-plugin": { - "version": "4.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "tapable": "^2.0.0", - "webpack-sources": "^2.2.0" - }, - "engines": { - "node": ">=12.22.0" - }, - "peerDependencies": { - "webpack": "^4.44.2 || ^5.47.0" - } - }, - "node_modules/webpack-manifest-plugin/node_modules/webpack-sources": { - "version": "2.3.1", - "dev": true, - "license": "MIT", - "dependencies": { - "source-list-map": "^2.0.1", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/webpack-sources": { - "version": "3.2.3", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/webpack/node_modules/eslint-scope": { - "version": "5.1.1", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/webpack/node_modules/estraverse": { - "version": "4.3.0", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/websocket-driver": { - "version": "0.7.4", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "http-parser-js": ">=0.5.1", - "safe-buffer": ">=5.1.0", - "websocket-extensions": ">=0.1.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/websocket-extensions": { - "version": "0.1.4", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/whatwg-encoding": { - "version": "1.0.5", - "dev": true, - "license": "MIT", - "dependencies": { - "iconv-lite": "0.4.24" - } - }, - "node_modules/whatwg-encoding/node_modules/iconv-lite": { - "version": "0.4.24", - "dev": true, - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/whatwg-fetch": { "version": "3.6.2", "dev": true, "license": "MIT" }, - "node_modules/whatwg-mimetype": { - "version": "2.3.0", - "dev": true, - "license": "MIT" - }, - "node_modules/whatwg-url": { - "version": "8.7.0", - "dev": true, - "license": "MIT", - "dependencies": { - "lodash": "^4.7.0", - "tr46": "^2.1.0", - "webidl-conversions": "^6.1.0" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/which": { "version": "2.0.2", "dev": true, @@ -18807,304 +11399,11 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/word-wrap": { - "version": "1.2.5", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/workbox-background-sync": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "idb": "^7.0.1", - "workbox-core": "6.5.4" - } - }, - "node_modules/workbox-broadcast-update": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "workbox-core": "6.5.4" - } - }, - "node_modules/workbox-build": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "@apideck/better-ajv-errors": "^0.3.1", - "@babel/core": "^7.11.1", - "@babel/preset-env": "^7.11.0", - "@babel/runtime": "^7.11.2", - "@rollup/plugin-babel": "^5.2.0", - "@rollup/plugin-node-resolve": "^11.2.1", - "@rollup/plugin-replace": "^2.4.1", - "@surma/rollup-plugin-off-main-thread": "^2.2.3", - "ajv": "^8.6.0", - "common-tags": "^1.8.0", - "fast-json-stable-stringify": "^2.1.0", - "fs-extra": "^9.0.1", - "glob": "^7.1.6", - "lodash": "^4.17.20", - "pretty-bytes": "^5.3.0", - "rollup": "^2.43.1", - "rollup-plugin-terser": "^7.0.0", - "source-map": "^0.8.0-beta.0", - "stringify-object": "^3.3.0", - "strip-comments": "^2.0.1", - "tempy": "^0.6.0", - "upath": "^1.2.0", - "workbox-background-sync": "6.5.4", - "workbox-broadcast-update": "6.5.4", - "workbox-cacheable-response": "6.5.4", - "workbox-core": "6.5.4", - "workbox-expiration": "6.5.4", - "workbox-google-analytics": "6.5.4", - "workbox-navigation-preload": "6.5.4", - "workbox-precaching": "6.5.4", - "workbox-range-requests": "6.5.4", - "workbox-recipes": "6.5.4", - "workbox-routing": "6.5.4", - "workbox-strategies": "6.5.4", - "workbox-streams": "6.5.4", - "workbox-sw": "6.5.4", - "workbox-window": "6.5.4" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/workbox-build/node_modules/@apideck/better-ajv-errors": { - "version": "0.3.6", - "dev": true, - "license": "MIT", - "dependencies": { - "json-schema": "^0.4.0", - "jsonpointer": "^5.0.0", - "leven": "^3.1.0" - }, - "engines": { - "node": ">=10" - }, - "peerDependencies": { - "ajv": ">=8" - } - }, - "node_modules/workbox-build/node_modules/ajv": { - "version": "8.11.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/workbox-build/node_modules/fs-extra": { - "version": "9.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/workbox-build/node_modules/json-schema-traverse": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/workbox-build/node_modules/source-map": { - "version": "0.8.0-beta.0", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "whatwg-url": "^7.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/workbox-build/node_modules/tr46": { - "version": "1.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/workbox-build/node_modules/webidl-conversions": { - "version": "4.0.2", - "dev": true, - "license": "BSD-2-Clause" - }, - "node_modules/workbox-build/node_modules/whatwg-url": { - "version": "7.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "lodash.sortby": "^4.7.0", - "tr46": "^1.0.1", - "webidl-conversions": "^4.0.2" - } - }, - "node_modules/workbox-cacheable-response": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "workbox-core": "6.5.4" - } - }, - "node_modules/workbox-core": { - "version": "6.5.4", - "dev": true, - "license": "MIT" - }, - "node_modules/workbox-expiration": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "idb": "^7.0.1", - "workbox-core": "6.5.4" - } - }, - "node_modules/workbox-google-analytics": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "workbox-background-sync": "6.5.4", - "workbox-core": "6.5.4", - "workbox-routing": "6.5.4", - "workbox-strategies": "6.5.4" - } - }, - "node_modules/workbox-navigation-preload": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "workbox-core": "6.5.4" - } - }, - "node_modules/workbox-precaching": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "workbox-core": "6.5.4", - "workbox-routing": "6.5.4", - "workbox-strategies": "6.5.4" - } - }, - "node_modules/workbox-range-requests": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "workbox-core": "6.5.4" - } - }, - "node_modules/workbox-recipes": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "workbox-cacheable-response": "6.5.4", - "workbox-core": "6.5.4", - "workbox-expiration": "6.5.4", - "workbox-precaching": "6.5.4", - "workbox-routing": "6.5.4", - "workbox-strategies": "6.5.4" - } - }, - "node_modules/workbox-routing": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "workbox-core": "6.5.4" - } - }, - "node_modules/workbox-strategies": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "workbox-core": "6.5.4" - } - }, - "node_modules/workbox-streams": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "workbox-core": "6.5.4", - "workbox-routing": "6.5.4" - } - }, - "node_modules/workbox-sw": { - "version": "6.5.4", - "dev": true, - "license": "MIT" - }, - "node_modules/workbox-webpack-plugin": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-json-stable-stringify": "^2.1.0", - "pretty-bytes": "^5.4.1", - "upath": "^1.2.0", - "webpack-sources": "^1.4.3", - "workbox-build": "6.5.4" - }, - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "webpack": "^4.4.0 || ^5.9.0" - } - }, - "node_modules/workbox-webpack-plugin/node_modules/webpack-sources": { - "version": "1.4.3", - "dev": true, - "license": "MIT", - "dependencies": { - "source-list-map": "^2.0.0", - "source-map": "~0.6.1" - } - }, - "node_modules/workbox-window": { - "version": "6.5.4", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/trusted-types": "^2.0.2", - "workbox-core": "6.5.4" - } - }, "node_modules/wrap-ansi": { "version": "7.0.0", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", @@ -19135,48 +11434,11 @@ "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, - "node_modules/ws": { - "version": "7.5.9", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8.3.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": "^5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/xml-name-validator": { - "version": "3.0.0", - "dev": true, - "license": "Apache-2.0" - }, - "node_modules/xmlchars": { - "version": "2.2.0", - "dev": true, - "license": "MIT" - }, - "node_modules/xtend": { - "version": "4.0.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.4" - } - }, "node_modules/y18n": { "version": "5.0.8", "dev": true, "license": "ISC", + "peer": true, "engines": { "node": ">=10" } @@ -19234,6 +11496,7 @@ "react-app": { "name": "@prometheus-io/app", "version": "0.49.1", + "extraneous": true, "dependencies": { "@codemirror/autocomplete": "^6.11.1", "@codemirror/commands": "^6.3.2", diff --git a/web/ui/package.json b/web/ui/package.json index 026c1663b3..10307bc807 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -4,18 +4,19 @@ "scripts": { "build": "GENERATE_SOURCEMAP=false bash build_ui.sh --all", "build:module": "bash build_ui.sh --build-module", - "start": "npm run start -w react-app", + "start": "npm run start -w mantine-ui", "test": "npm run test --workspaces", "lint": "npm run lint --workspaces" }, "workspaces": [ - "react-app", + "mantine-ui", "module/*" ], "engines": { "npm": ">=7.0.0" }, "devDependencies": { + "@rollup/plugin-node-resolve": "^15.2.3", "@types/jest": "^29.5.11", "@types/node": "^20.10.4", "eslint-config-prettier": "^8.10.0", @@ -24,9 +25,9 @@ "jest-canvas-mock": "^2.5.2", "jest-fetch-mock": "^3.0.3", "prettier": "^2.8.8", - "react-scripts": "^5.0.1", + "rollup": "^4.12.0", "ts-jest": "^29.1.1", - "typescript": "^4.9.5" + "typescript": "^5.3.3" }, "version": "0.49.1" } diff --git a/web/web.go b/web/web.go index 43b79c235d..d623505573 100644 --- a/web/web.go +++ b/web/web.go @@ -436,8 +436,8 @@ func New(logger log.Logger, o *Options) *Handler { } // Static files required by the React app. - router.Get("/static/*filepath", func(w http.ResponseWriter, r *http.Request) { - r.URL.Path = path.Join("/static/react/static", route.Param(r.Context(), "filepath")) + router.Get("/assets/*filepath", func(w http.ResponseWriter, r *http.Request) { + r.URL.Path = path.Join("/static/react/assets", route.Param(r.Context(), "filepath")) fs := server.StaticFileServer(ui.Assets) fs.ServeHTTP(w, r) }) From 096acdce7e18a084805c0e4945aafc4ea485ff94 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 21 Feb 2024 22:36:27 +0100 Subject: [PATCH 0010/1397] Fix npm workspaces build for Mantine UI Thanks to input by @nexucis :) Signed-off-by: Julius Volz --- web/ui/mantine-ui/package-lock.json | 4641 ++++------ web/ui/mantine-ui/package.json | 9 +- .../src/client/prometheus.ts | 2 +- web/ui/module/lezer-promql/package.json | 3 +- web/ui/package-lock.json | 7784 ++++------------- web/ui/package.json | 30 +- web/ui/react-app/postcss.config.cjs | 14 + .../src/images/prometheus_logo_grey.svg.orig | 19 + .../src/images/prometheus_logo_grey.svg.rej | 11 + .../src/pages/graph/GraphHistogramHelpers.ts | 62 + 10 files changed, 3491 insertions(+), 9084 deletions(-) create mode 100644 web/ui/react-app/postcss.config.cjs create mode 100644 web/ui/react-app/src/images/prometheus_logo_grey.svg.orig create mode 100644 web/ui/react-app/src/images/prometheus_logo_grey.svg.rej create mode 100644 web/ui/react-app/src/pages/graph/GraphHistogramHelpers.ts diff --git a/web/ui/mantine-ui/package-lock.json b/web/ui/mantine-ui/package-lock.json index 47b72006f5..ecb36f0a60 100644 --- a/web/ui/mantine-ui/package-lock.json +++ b/web/ui/mantine-ui/package-lock.json @@ -1,15 +1,13 @@ { - "name": "mantine-ui", + "name": "app", "version": "0.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { - "name": "mantine-ui", + "name": "app", "version": "0.0.0", "dependencies": { - "@mantine/core": "^7.5.3", - "@mantine/hooks": "^7.5.3", "react": "^18.2.0", "react-dom": "^18.2.0" }, @@ -18,684 +16,404 @@ "@types/react-dom": "^18.2.19", "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", - "@vitejs/plugin-react": "^4.2.1", + "@vitejs/plugin-react-swc": "^3.5.0", "eslint": "^8.56.0", "eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-refresh": "^0.4.5", - "postcss": "^8.4.35", - "postcss-preset-mantine": "^1.13.0", - "postcss-simple-vars": "^7.0.1", "typescript": "^5.2.2", "vite": "^5.1.0" } }, - "node_modules/@babel/core": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.9.tgz", - "integrity": "sha512-5q0175NOjddqpvvzU+kDiSOAk4PfdO6FvwCWoQ6RO7rTzEe8vlo+4HVfcnAREhD4npMs0e9uZypjTwzZPCf/cw==", - "dev": true, - "dependencies": { - "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.23.5", - "@babel/generator": "^7.23.6", - "@babel/helper-compilation-targets": "^7.23.6", - "@babel/helper-module-transforms": "^7.23.3", - "@babel/helpers": "^7.23.9", - "@babel/parser": "^7.23.9", - "@babel/template": "^7.23.9", - "@babel/traverse": "^7.23.9", - "@babel/types": "^7.23.9", - "convert-source-map": "^2.0.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.2.3", - "semver": "^6.3.1" - }, - "engines": { - "node": ">=6.9.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/babel" - } - }, - "node_modules/@babel/core/node_modules/@ampproject/remapping": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", - "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", - "dev": true, - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.0", - "@jridgewell/trace-mapping": "^0.3.9" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/code-frame": { - "version": "7.23.5", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz", - "integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==", - "dev": true, - "dependencies": { - "@babel/highlight": "^7.23.4", - "chalk": "^2.4.2" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/compat-data": { - "version": "7.23.5", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.5.tgz", - "integrity": "sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw==", + "node_modules/@aashutoshrathi/word-wrap": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz", + "integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==", "dev": true, "engines": { - "node": ">=6.9.0" + "node": ">=0.10.0" } }, - "node_modules/@babel/core/node_modules/@babel/generator": { - "version": "7.23.6", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.6.tgz", - "integrity": "sha512-qrSfCYxYQB5owCmGLbl8XRpX1ytXlpueOb0N0UmQwA073KZxejgQTzAmJezxvpwQD9uGtK2shHdi55QT+MbjIw==", - "dev": true, - "dependencies": { - "@babel/types": "^7.23.6", - "@jridgewell/gen-mapping": "^0.3.2", - "@jridgewell/trace-mapping": "^0.3.17", - "jsesc": "^2.5.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/helper-compilation-targets": { - "version": "7.23.6", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz", - "integrity": "sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ==", - "dev": true, - "dependencies": { - "@babel/compat-data": "^7.23.5", - "@babel/helper-validator-option": "^7.23.5", - "browserslist": "^4.22.2", - "lru-cache": "^5.1.1", - "semver": "^6.3.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/helper-environment-visitor": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", - "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/helper-function-name": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", - "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", - "dev": true, - "dependencies": { - "@babel/template": "^7.22.15", - "@babel/types": "^7.23.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/helper-hoist-variables": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", - "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", - "dev": true, - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/helper-module-imports": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz", - "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==", - "dev": true, - "dependencies": { - "@babel/types": "^7.22.15" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/helper-module-transforms": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.3.tgz", - "integrity": "sha512-7bBs4ED9OmswdfDzpz4MpWgSrV7FXlc3zIagvLFjS5H+Mk7Snr21vQ6QwrsoCGMfNC4e4LQPdoULEt4ykz0SRQ==", - "dev": true, - "dependencies": { - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-module-imports": "^7.22.15", - "@babel/helper-simple-access": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/helper-validator-identifier": "^7.22.20" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/helper-simple-access": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", - "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", - "dev": true, - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/helper-split-export-declaration": { - "version": "7.22.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", - "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", - "dev": true, - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/helper-string-parser": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz", - "integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/helper-validator-identifier": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", - "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/helper-validator-option": { - "version": "7.23.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz", - "integrity": "sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/helpers": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.9.tgz", - "integrity": "sha512-87ICKgU5t5SzOT7sBMfCOZQ2rHjRU+Pcb9BoILMYz600W6DkVRLFBPwQ18gwUVvggqXivaUakpnxWQGbpywbBQ==", - "dev": true, - "dependencies": { - "@babel/template": "^7.23.9", - "@babel/traverse": "^7.23.9", - "@babel/types": "^7.23.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/highlight": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz", - "integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==", - "dev": true, - "dependencies": { - "@babel/helper-validator-identifier": "^7.22.20", - "chalk": "^2.4.2", - "js-tokens": "^4.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/parser": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.9.tgz", - "integrity": "sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA==", - "dev": true, - "bin": { - "parser": "bin/babel-parser.js" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/template": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.23.9.tgz", - "integrity": "sha512-+xrD2BWLpvHKNmX2QbpdpsBaWnRxahMwJjO+KZk2JOElj5nSmKezyS1B4u+QbHMTX69t4ukm6hh9lsYQ7GHCKA==", - "dev": true, - "dependencies": { - "@babel/code-frame": "^7.23.5", - "@babel/parser": "^7.23.9", - "@babel/types": "^7.23.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/traverse": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.9.tgz", - "integrity": "sha512-I/4UJ9vs90OkBtY6iiiTORVMyIhJ4kAVmsKo9KFc8UOxMeUfi2hvtIBsET5u9GizXE6/GFSuKCTNfgCswuEjRg==", - "dev": true, - "dependencies": { - "@babel/code-frame": "^7.23.5", - "@babel/generator": "^7.23.6", - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-function-name": "^7.23.0", - "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/parser": "^7.23.9", - "@babel/types": "^7.23.9", - "debug": "^4.3.1", - "globals": "^11.1.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@babel/types": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.9.tgz", - "integrity": "sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q==", - "dev": true, - "dependencies": { - "@babel/helper-string-parser": "^7.23.4", - "@babel/helper-validator-identifier": "^7.22.20", - "to-fast-properties": "^2.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/@jridgewell/gen-mapping": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", - "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", - "dev": true, - "dependencies": { - "@jridgewell/set-array": "^1.0.1", - "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/core/node_modules/@jridgewell/resolve-uri": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", - "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", - "dev": true, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/core/node_modules/@jridgewell/set-array": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", - "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", - "dev": true, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/core/node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.15", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", - "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", - "dev": true - }, - "node_modules/@babel/core/node_modules/@jridgewell/trace-mapping": { - "version": "0.3.22", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.22.tgz", - "integrity": "sha512-Wf963MzWtA2sjrNt+g18IAln9lKnlRp+K2eH4jjIoF1wYeq3aMREpG09xhlhdzS0EjwU7qmUJYangWa+151vZw==", - "dev": true, - "dependencies": { - "@jridgewell/resolve-uri": "^3.1.0", - "@jridgewell/sourcemap-codec": "^1.4.14" - } - }, - "node_modules/@babel/core/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/core/node_modules/browserslist": { - "version": "4.23.0", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.0.tgz", - "integrity": "sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } + "node_modules/@esbuild/aix-ppc64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.19.12.tgz", + "integrity": "sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA==", + "cpu": [ + "ppc64" ], - "dependencies": { - "caniuse-lite": "^1.0.30001587", - "electron-to-chromium": "^1.4.668", - "node-releases": "^2.0.14", - "update-browserslist-db": "^1.0.13" - }, - "bin": { - "browserslist": "cli.js" - }, - "engines": { - "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" - } - }, - "node_modules/@babel/core/node_modules/caniuse-lite": { - "version": "1.0.30001587", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001587.tgz", - "integrity": "sha512-HMFNotUmLXn71BQxg8cijvqxnIAofforZOwGsxyXJ0qugTdspUF4sPSJ2vhgprHCB996tIDzEq1ubumPDV8ULA==", "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/caniuse-lite" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ] - }, - "node_modules/@babel/core/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/core/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/core/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", - "dev": true - }, - "node_modules/@babel/core/node_modules/convert-source-map": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", - "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", - "dev": true - }, - "node_modules/@babel/core/node_modules/electron-to-chromium": { - "version": "1.4.673", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.673.tgz", - "integrity": "sha512-zjqzx4N7xGdl5468G+vcgzDhaHkaYgVcf9MqgexcTqsl2UHSCmOj/Bi3HAprg4BZCpC7HyD8a6nZl6QAZf72gw==", - "dev": true - }, - "node_modules/@babel/core/node_modules/escalade": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", - "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/@babel/core/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "dev": true, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@babel/core/node_modules/gensync": { - "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/core/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/core/node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "dev": true - }, - "node_modules/@babel/core/node_modules/jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", - "dev": true, - "bin": { - "jsesc": "bin/jsesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/core/node_modules/json5": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", - "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", - "dev": true, - "bin": { - "json5": "lib/cli.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@babel/core/node_modules/lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", - "dev": true, - "dependencies": { - "yallist": "^3.0.2" - } - }, - "node_modules/@babel/core/node_modules/node-releases": { - "version": "2.0.14", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", - "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==", - "dev": true - }, - "node_modules/@babel/core/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/@babel/core/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/core/node_modules/to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/core/node_modules/update-browserslist-db": { - "version": "1.0.13", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", - "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/browserslist" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/browserslist" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } + "optional": true, + "os": [ + "aix" ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.19.12.tgz", + "integrity": "sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.19.12.tgz", + "integrity": "sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.19.12.tgz", + "integrity": "sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz", + "integrity": "sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.19.12.tgz", + "integrity": "sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.12.tgz", + "integrity": "sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.19.12.tgz", + "integrity": "sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.19.12.tgz", + "integrity": "sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.19.12.tgz", + "integrity": "sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.19.12.tgz", + "integrity": "sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.19.12.tgz", + "integrity": "sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA==", + "cpu": [ + "loong64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.19.12.tgz", + "integrity": "sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w==", + "cpu": [ + "mips64el" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.19.12.tgz", + "integrity": "sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.19.12.tgz", + "integrity": "sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.19.12.tgz", + "integrity": "sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg==", + "cpu": [ + "s390x" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.19.12.tgz", + "integrity": "sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.19.12.tgz", + "integrity": "sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.19.12.tgz", + "integrity": "sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.19.12.tgz", + "integrity": "sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.19.12.tgz", + "integrity": "sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.19.12.tgz", + "integrity": "sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.19.12.tgz", + "integrity": "sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "dev": true, "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" + "eslint-visitor-keys": "^3.3.0" }, - "bin": { - "update-browserslist-db": "cli.js" + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" }, "peerDependencies": { - "browserslist": ">= 4.21.0" - } - }, - "node_modules/@babel/core/node_modules/yallist": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", - "dev": true - }, - "node_modules/@babel/plugin-transform-react-jsx-self": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.23.3.tgz", - "integrity": "sha512-qXRvbeKDSfwnlJnanVRp0SfuWE5DQhwQr5xtLBzp56Wabyo+4CMosF6Kfp+eOD/4FYpql64XVJ2W0pVLlJZxOQ==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-jsx-self/node_modules/@babel/helper-plugin-utils": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", - "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/plugin-transform-react-jsx-source": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.23.3.tgz", - "integrity": "sha512-91RS0MDnAWDNvGC6Wio5XYkyWI39FMFO+JK9+4AlgaTH+yWwVTsw7/sn6LK0lH7c5F+TFkpv/3LfCJ1Ydwof/g==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-jsx-source/node_modules/@babel/helper-plugin-utils": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", - "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/runtime": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.23.9.tgz", - "integrity": "sha512-0CX6F+BI2s9dkUqr08KFrAIZgNFj75rdBU/DjCyYLIaV/quFjkk6T+EJ2LkZHyZTbEV4L5p97mNkUsHl2wLFAw==", - "dependencies": { - "regenerator-runtime": "^0.14.0" - }, - "engines": { - "node": ">=6.9.0" + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" } }, "node_modules/@eslint-community/regexpp": { @@ -707,80 +425,115 @@ "node": "^12.0.0 || ^14.0.0 || >=16.0.0" } }, - "node_modules/@floating-ui/core": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.0.tgz", - "integrity": "sha512-PcF++MykgmTj3CIyOQbKA/hDzOAiqI3mhuoN44WRCopIs1sgoDoU4oty4Jtqaj/y3oDU6fnVSm4QG0a3t5i0+g==", + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, "dependencies": { - "@floating-ui/utils": "^0.2.1" - } - }, - "node_modules/@floating-ui/dom": { - "version": "1.6.3", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.3.tgz", - "integrity": "sha512-RnDthu3mzPlQ31Ss/BTwQ1zjzIhr3lk1gZB1OC56h/1vEtaXkESrOqL5fQVMfXpwGtRwX+YsZBdyHtJMQnkArw==", - "dependencies": { - "@floating-ui/core": "^1.0.0", - "@floating-ui/utils": "^0.2.0" - } - }, - "node_modules/@floating-ui/react": { - "version": "0.24.8", - "resolved": "https://registry.npmjs.org/@floating-ui/react/-/react-0.24.8.tgz", - "integrity": "sha512-AuYeDoaR8jtUlUXtZ1IJ/6jtBkGnSpJXbGNzokBL87VDJ8opMq1Bgrc0szhK482ReQY6KZsMoZCVSb4xwalkBA==", - "dependencies": { - "@floating-ui/react-dom": "^2.0.1", - "aria-hidden": "^1.2.3", - "tabbable": "^6.0.1" + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" }, - "peerDependencies": { - "react": ">=16.8.0", - "react-dom": ">=16.8.0" - } - }, - "node_modules/@floating-ui/react-dom": { - "version": "2.0.8", - "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.8.tgz", - "integrity": "sha512-HOdqOt3R3OGeTKidaLvJKcgg75S6tibQ3Tif4eyd91QnIJWr0NLvoXFpJA/j8HqkFSL68GDca9AuyWEHlhyClw==", - "dependencies": { - "@floating-ui/dom": "^1.6.1" + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" }, - "peerDependencies": { - "react": ">=16.8.0", - "react-dom": ">=16.8.0" + "funding": { + "url": "https://opencollective.com/eslint" } }, - "node_modules/@floating-ui/utils": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.1.tgz", - "integrity": "sha512-9TANp6GPoMtYzQdt54kfAyMmz1+osLlXdg2ENroU7zzrtflTLrrC/lgrIfaSe+Wu0b89GKccT7vxXA0MoAIO+Q==" - }, - "node_modules/@mantine/core": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.5.3.tgz", - "integrity": "sha512-Wvv6DJXI+GX9mmKG5HITTh/24sCZ0RoYQHdTHh0tOfGnEy+RleyhA82UjnMsp0n2NjfCISBwbiKgfya6b2iaFw==", + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, "dependencies": { - "@floating-ui/react": "^0.24.8", - "clsx": "2.0.0", - "react-number-format": "^5.3.1", - "react-remove-scroll": "^2.5.7", - "react-textarea-autosize": "8.5.3", - "type-fest": "^3.13.1" - }, - "peerDependencies": { - "@mantine/hooks": "7.5.3", - "react": "^18.2.0", - "react-dom": "^18.2.0" + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" } }, - "node_modules/@mantine/hooks": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.5.3.tgz", - "integrity": "sha512-mFI448mAs12v8FrgSVhytqlhTVrEjIfd/PqPEfwJu5YcZIq4YZdqpzJIUbANnRrFSvmoQpDb1PssdKx7Ds35hw==", - "peerDependencies": { - "react": "^18.2.0" + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" } }, + "node_modules/@eslint/js": { + "version": "8.56.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.56.0.tgz", + "integrity": "sha512-gMsVel9D7f2HLkBma9VbtzZRehRogVRfbr++f06nL2vnCGCNlzOD+/MUov/F4p8myyAHspEhVobgjpX64q5m6A==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.11.14", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", + "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "dev": true, + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.2.tgz", + "integrity": "sha512-6EwiSjwWYP7pTckG6I5eyFANjPhmPjUX9JRLUSfNPC7FX7zK9gyZAfUEaECL6ALTpGX5AjnBq3C9XmVWPitNpw==", + "dev": true + }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", @@ -816,89 +569,408 @@ "node": ">= 8" } }, - "node_modules/@types/babel__core": { - "version": "7.20.5", - "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", - "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.12.0.tgz", + "integrity": "sha512-+ac02NL/2TCKRrJu2wffk1kZ+RyqxVUlbjSagNgPm94frxtr+XDL12E5Ll1enWskLrtrZ2r8L3wED1orIibV/w==", + "cpu": [ + "arm" + ], "dev": true, + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.12.0.tgz", + "integrity": "sha512-OBqcX2BMe6nvjQ0Nyp7cC90cnumt8PXmO7Dp3gfAju/6YwG0Tj74z1vKrfRz7qAv23nBcYM8BCbhrsWqO7PzQQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.12.0.tgz", + "integrity": "sha512-X64tZd8dRE/QTrBIEs63kaOBG0b5GVEd3ccoLtyf6IdXtHdh8h+I56C2yC3PtC9Ucnv0CpNFJLqKFVgCYe0lOQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.12.0.tgz", + "integrity": "sha512-cc71KUZoVbUJmGP2cOuiZ9HSOP14AzBAThn3OU+9LcA1+IUqswJyR1cAJj3Mg55HbjZP6OLAIscbQsQLrpgTOg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.12.0.tgz", + "integrity": "sha512-a6w/Y3hyyO6GlpKL2xJ4IOh/7d+APaqLYdMf86xnczU3nurFTaVN9s9jOXQg97BE4nYm/7Ga51rjec5nfRdrvA==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.12.0.tgz", + "integrity": "sha512-0fZBq27b+D7Ar5CQMofVN8sggOVhEtzFUwOwPppQt0k+VR+7UHMZZY4y+64WJ06XOhBTKXtQB/Sv0NwQMXyNAA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.12.0.tgz", + "integrity": "sha512-eTvzUS3hhhlgeAv6bfigekzWZjaEX9xP9HhxB0Dvrdbkk5w/b+1Sxct2ZuDxNJKzsRStSq1EaEkVSEe7A7ipgQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.12.0.tgz", + "integrity": "sha512-ix+qAB9qmrCRiaO71VFfY8rkiAZJL8zQRXveS27HS+pKdjwUfEhqo2+YF2oI+H/22Xsiski+qqwIBxVewLK7sw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.12.0.tgz", + "integrity": "sha512-TenQhZVOtw/3qKOPa7d+QgkeM6xY0LtwzR8OplmyL5LrgTWIXpTQg2Q2ycBf8jm+SFW2Wt/DTn1gf7nFp3ssVA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.12.0.tgz", + "integrity": "sha512-LfFdRhNnW0zdMvdCb5FNuWlls2WbbSridJvxOvYWgSBOYZtgBfW9UGNJG//rwMqTX1xQE9BAodvMH9tAusKDUw==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.12.0.tgz", + "integrity": "sha512-JPDxovheWNp6d7AHCgsUlkuCKvtu3RB55iNEkaQcf0ttsDU/JZF+iQnYcQJSk/7PtT4mjjVG8N1kpwnI9SLYaw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.12.0.tgz", + "integrity": "sha512-fjtuvMWRGJn1oZacG8IPnzIV6GF2/XG+h71FKn76OYFqySXInJtseAqdprVTDTyqPxQOG9Exak5/E9Z3+EJ8ZA==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.12.0.tgz", + "integrity": "sha512-ZYmr5mS2wd4Dew/JjT0Fqi2NPB/ZhZ2VvPp7SmvPZb4Y1CG/LRcS6tcRo2cYU7zLK5A7cdbhWnnWmUjoI4qapg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@swc/core": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.4.2.tgz", + "integrity": "sha512-vWgY07R/eqj1/a0vsRKLI9o9klGZfpLNOVEnrv4nrccxBgYPjcf22IWwAoaBJ+wpA7Q4fVjCUM8lP0m01dpxcg==", + "dev": true, + "hasInstallScript": true, "dependencies": { - "@babel/parser": "^7.20.7", - "@babel/types": "^7.20.7", - "@types/babel__generator": "*", - "@types/babel__template": "*", - "@types/babel__traverse": "*" - } - }, - "node_modules/@types/babel__core/node_modules/@babel/helper-string-parser": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz", - "integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@types/babel__core/node_modules/@babel/helper-validator-identifier": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", - "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@types/babel__core/node_modules/@babel/parser": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.9.tgz", - "integrity": "sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA==", - "dev": true, - "bin": { - "parser": "bin/babel-parser.js" + "@swc/counter": "^0.1.2", + "@swc/types": "^0.1.5" }, "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@types/babel__core/node_modules/@babel/types": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.9.tgz", - "integrity": "sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q==", - "dev": true, - "dependencies": { - "@babel/helper-string-parser": "^7.23.4", - "@babel/helper-validator-identifier": "^7.22.20", - "to-fast-properties": "^2.0.0" + "node": ">=10" }, - "engines": { - "node": ">=6.9.0" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/swc" + }, + "optionalDependencies": { + "@swc/core-darwin-arm64": "1.4.2", + "@swc/core-darwin-x64": "1.4.2", + "@swc/core-linux-arm-gnueabihf": "1.4.2", + "@swc/core-linux-arm64-gnu": "1.4.2", + "@swc/core-linux-arm64-musl": "1.4.2", + "@swc/core-linux-x64-gnu": "1.4.2", + "@swc/core-linux-x64-musl": "1.4.2", + "@swc/core-win32-arm64-msvc": "1.4.2", + "@swc/core-win32-ia32-msvc": "1.4.2", + "@swc/core-win32-x64-msvc": "1.4.2" + }, + "peerDependencies": { + "@swc/helpers": "^0.5.0" + }, + "peerDependenciesMeta": { + "@swc/helpers": { + "optional": true + } } }, - "node_modules/@types/babel__core/node_modules/to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "node_modules/@swc/core-darwin-arm64": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.4.2.tgz", + "integrity": "sha512-1uSdAn1MRK5C1m/TvLZ2RDvr0zLvochgrZ2xL+lRzugLlCTlSA+Q4TWtrZaOz+vnnFVliCpw7c7qu0JouhgQIw==", + "cpu": [ + "arm64" + ], "dev": true, + "optional": true, + "os": [ + "darwin" + ], "engines": { - "node": ">=4" + "node": ">=10" } }, - "node_modules/@types/babel__generator": { + "node_modules/@swc/core-darwin-x64": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.4.2.tgz", + "integrity": "sha512-TYD28+dCQKeuxxcy7gLJUCFLqrwDZnHtC2z7cdeGfZpbI2mbfppfTf2wUPzqZk3gEC96zHd4Yr37V3Tvzar+lQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm-gnueabihf": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.4.2.tgz", + "integrity": "sha512-Eyqipf7ZPGj0vplKHo8JUOoU1un2sg5PjJMpEesX0k+6HKE2T8pdyeyXODN0YTFqzndSa/J43EEPXm+rHAsLFQ==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-gnu": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.4.2.tgz", + "integrity": "sha512-wZn02DH8VYPv3FC0ub4my52Rttsus/rFw+UUfzdb3tHMHXB66LqN+rR0ssIOZrH6K+VLN6qpTw9VizjyoH0BxA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-musl": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.4.2.tgz", + "integrity": "sha512-3G0D5z9hUj9bXNcwmA1eGiFTwe5rWkuL3DsoviTj73TKLpk7u64ND0XjEfO0huVv4vVu9H1jodrKb7nvln/dlw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-gnu": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.4.2.tgz", + "integrity": "sha512-LFxn9U8cjmYHw3jrdPNqPAkBGglKE3tCZ8rA7hYyp0BFxuo7L2ZcEnPm4RFpmSCCsExFH+LEJWuMGgWERoktvg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-musl": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.4.2.tgz", + "integrity": "sha512-dp0fAmreeVVYTUcb4u9njTPrYzKnbIH0EhH2qvC9GOYNNREUu2GezSIDgonjOXkHiTCvopG4xU7y56XtXj4VrQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-arm64-msvc": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.4.2.tgz", + "integrity": "sha512-HlVIiLMQkzthAdqMslQhDkoXJ5+AOLUSTV6fm6shFKZKqc/9cJvr4S8UveNERL9zUficA36yM3bbfo36McwnvQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-ia32-msvc": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.4.2.tgz", + "integrity": "sha512-WCF8faPGjCl4oIgugkp+kL9nl3nUATlzKXCEGFowMEmVVCFM0GsqlmGdPp1pjZoWc9tpYanoXQDnp5IvlDSLhA==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-x64-msvc": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.4.2.tgz", + "integrity": "sha512-oV71rwiSpA5xre2C5570BhCsg1HF97SNLsZ/12xv7zayGzqr3yvFALFJN8tHKpqUdCB4FGPjoP3JFdV3i+1wUw==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", "dev": true }, - "node_modules/@types/babel__template": { + "node_modules/@swc/types": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.5.tgz", + "integrity": "sha512-myfUej5naTBWnqOCc/MdVOLVjXUXtIA+NpDrDBKJtLLg2shUjBu3cZmB/85RyitKc55+lUUyl7oRfLOvkr2hsw==", "dev": true }, - "node_modules/@types/babel__traverse": { + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==", + "dev": true + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", "dev": true }, "node_modules/@types/prop-types": { - "devOptional": true + "version": "15.7.11", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.11.tgz", + "integrity": "sha512-ga8y9v9uyeiLdpKddhxYQkxNDrfvuPrlFb0N1qnZZByvcElJaXthF1UhvCh9TLWJBEHeNtdnbysW7Y6Uq8CVng==", + "dev": true }, "node_modules/@types/react": { - "version": "18.2.56", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.56.tgz", - "integrity": "sha512-NpwHDMkS/EFZF2dONFQHgkPRwhvgq/OAvIaGQzxGSBmaeR++kTg6njr15Vatz0/2VcCEwJQFi6Jf4Q0qBu0rLA==", - "devOptional": true, + "version": "18.2.57", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.57.tgz", + "integrity": "sha512-ZvQsktJgSYrQiMirAN60y4O/LRevIV8hUzSOSNB6gfR3/o3wCBFQx3sPwIYtuDMeiVgsSS3UzCV26tEzgnfvQw==", + "dev": true, "dependencies": { "@types/prop-types": "*", "@types/scheduler": "*", @@ -915,7 +987,16 @@ } }, "node_modules/@types/scheduler": { - "devOptional": true + "version": "0.16.8", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.8.tgz", + "integrity": "sha512-WZLiwShhwLRmeV6zH+GkbOFT6Z6VklCItrDioxUnv+u4Ll+8vKeFySoFyK/0ctcRpOmwAicELfmys1sDc/Rw+A==", + "dev": true + }, + "node_modules/@types/semver": { + "version": "7.5.7", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.7.tgz", + "integrity": "sha512-/wdoPq1QqkSj9/QOeKkFquEuPzQbHTWAMPH/PaUMB+JuR31lXhlWXRZ52IpfDYVlDOUBvX09uBrPwxGT1hjNBg==", + "dev": true }, "node_modules/@typescript-eslint/eslint-plugin": { "version": "6.21.0", @@ -980,136 +1061,6 @@ } } }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", - "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", - "dev": true, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", - "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.21.0", - "@typescript-eslint/visitor-keys": "6.21.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "minimatch": "9.0.3", - "semver": "^7.5.4", - "ts-api-utils": "^1.0.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/parser/node_modules/array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/@typescript-eslint/parser/node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true - }, - "node_modules/@typescript-eslint/parser/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/@typescript-eslint/parser/node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "dependencies": { - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@typescript-eslint/parser/node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", - "dev": true, - "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@typescript-eslint/parser/node_modules/minimatch": { - "version": "9.0.3", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", - "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", - "dev": true, - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@typescript-eslint/parser/node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/@typescript-eslint/parser/node_modules/slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "dev": true, - "engines": { - "node": ">=8" - } - }, "node_modules/@typescript-eslint/scope-manager": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz", @@ -1127,19 +1078,6 @@ "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@typescript-eslint/scope-manager/node_modules/@typescript-eslint/types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", - "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", - "dev": true, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@typescript-eslint/type-utils": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", @@ -1167,7 +1105,7 @@ } } }, - "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": { + "node_modules/@typescript-eslint/types": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", @@ -1180,7 +1118,7 @@ "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": { + "node_modules/@typescript-eslint/typescript-estree": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", @@ -1208,95 +1146,6 @@ } } }, - "node_modules/@typescript-eslint/type-utils/node_modules/array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/@typescript-eslint/type-utils/node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true - }, - "node_modules/@typescript-eslint/type-utils/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/@typescript-eslint/type-utils/node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "dependencies": { - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@typescript-eslint/type-utils/node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", - "dev": true, - "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@typescript-eslint/type-utils/node_modules/minimatch": { - "version": "9.0.3", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", - "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", - "dev": true, - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@typescript-eslint/type-utils/node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/@typescript-eslint/type-utils/node_modules/slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "dev": true, - "engines": { - "node": ">=8" - } - }, "node_modules/@typescript-eslint/utils": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", @@ -1322,175 +1171,6 @@ "eslint": "^7.0.0 || ^8.0.0" } }, - "node_modules/@typescript-eslint/utils/node_modules/@eslint-community/eslint-utils": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", - "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", - "dev": true, - "dependencies": { - "eslint-visitor-keys": "^3.3.0" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/@types/json-schema": { - "version": "7.0.15", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", - "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", - "dev": true - }, - "node_modules/@typescript-eslint/utils/node_modules/@types/semver": { - "version": "7.5.7", - "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.7.tgz", - "integrity": "sha512-/wdoPq1QqkSj9/QOeKkFquEuPzQbHTWAMPH/PaUMB+JuR31lXhlWXRZ52IpfDYVlDOUBvX09uBrPwxGT1hjNBg==", - "dev": true - }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", - "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", - "dev": true, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", - "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.21.0", - "@typescript-eslint/visitor-keys": "6.21.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "minimatch": "9.0.3", - "semver": "^7.5.4", - "ts-api-utils": "^1.0.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/utils/node_modules/array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true - }, - "node_modules/@typescript-eslint/utils/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "dependencies": { - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/eslint-visitor-keys": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", - "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", - "dev": true, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", - "dev": true, - "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/minimatch": { - "version": "9.0.3", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", - "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", - "dev": true, - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "dev": true, - "engines": { - "node": ">=8" - } - }, "node_modules/@typescript-eslint/visitor-keys": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz", @@ -1508,59 +1188,113 @@ "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@typescript-eslint/visitor-keys/node_modules/@typescript-eslint/types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", - "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", - "dev": true, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "dev": true }, - "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", - "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", - "dev": true, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/@vitejs/plugin-react": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.2.1.tgz", - "integrity": "sha512-oojO9IDc4nCUUi8qIR11KoQm0XFFLIwsRBwHRR4d/88IWghn1y6ckz/bJ8GHDCsYEJee8mDzqtJxh15/cisJNQ==", + "node_modules/@vitejs/plugin-react-swc": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.6.0.tgz", + "integrity": "sha512-XFRbsGgpGxGzEV5i5+vRiro1bwcIaZDIdBRP16qwm+jP68ue/S8FJTBEgOeojtVDYrbSua3XFp71kC8VJE6v+g==", "dev": true, "dependencies": { - "@babel/core": "^7.23.5", - "@babel/plugin-transform-react-jsx-self": "^7.23.3", - "@babel/plugin-transform-react-jsx-source": "^7.23.3", - "@types/babel__core": "^7.20.5", - "react-refresh": "^0.14.0" - }, - "engines": { - "node": "^14.18.0 || >=16.0.0" + "@swc/core": "^1.3.107" }, "peerDependencies": { - "vite": "^4.2.0 || ^5.0.0" + "vite": "^4 || ^5" } }, - "node_modules/aria-hidden": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.3.tgz", - "integrity": "sha512-xcLxITLe2HYa1cnYnwCjkOO1PqUHQpozB8x9AR0OgWN2woOBi5kSDVxKfd0b7sb1hw5qFeJhXm9H1nu3xSfLeQ==", - "dependencies": { - "tslib": "^2.0.0" + "node_modules/acorn": { + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "dev": true, + "bin": { + "acorn": "bin/acorn" }, "engines": { - "node": ">=10" + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" } }, "node_modules/braces": { @@ -1575,40 +1309,74 @@ "node": ">=8" } }, - "node_modules/camelcase-css": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", - "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", "dev": true, - "engines": { - "node": ">= 6" - } - }, - "node_modules/clsx": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.0.0.tgz", - "integrity": "sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q==", "engines": { "node": ">=6" } }, - "node_modules/cssesc": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, - "bin": { - "cssesc": "bin/cssesc" + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" }, "engines": { - "node": ">=4" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" } }, "node_modules/csstype": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", - "devOptional": true + "dev": true }, "node_modules/debug": { "version": "4.3.4", @@ -1627,16 +1395,85 @@ } } }, - "node_modules/debug/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", "dev": true }, - "node_modules/detect-node-es": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", - "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==" + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/esbuild": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.12.tgz", + "integrity": "sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg==", + "dev": true, + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.19.12", + "@esbuild/android-arm": "0.19.12", + "@esbuild/android-arm64": "0.19.12", + "@esbuild/android-x64": "0.19.12", + "@esbuild/darwin-arm64": "0.19.12", + "@esbuild/darwin-x64": "0.19.12", + "@esbuild/freebsd-arm64": "0.19.12", + "@esbuild/freebsd-x64": "0.19.12", + "@esbuild/linux-arm": "0.19.12", + "@esbuild/linux-arm64": "0.19.12", + "@esbuild/linux-ia32": "0.19.12", + "@esbuild/linux-loong64": "0.19.12", + "@esbuild/linux-mips64el": "0.19.12", + "@esbuild/linux-ppc64": "0.19.12", + "@esbuild/linux-riscv64": "0.19.12", + "@esbuild/linux-s390x": "0.19.12", + "@esbuild/linux-x64": "0.19.12", + "@esbuild/netbsd-x64": "0.19.12", + "@esbuild/openbsd-x64": "0.19.12", + "@esbuild/sunos-x64": "0.19.12", + "@esbuild/win32-arm64": "0.19.12", + "@esbuild/win32-ia32": "0.19.12", + "@esbuild/win32-x64": "0.19.12" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, "node_modules/eslint": { "version": "8.56.0", @@ -1714,278 +1551,7 @@ "eslint": ">=7" } }, - "node_modules/eslint/node_modules/@aashutoshrathi/word-wrap": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz", - "integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/eslint/node_modules/@eslint-community/eslint-utils": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", - "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", - "dev": true, - "dependencies": { - "eslint-visitor-keys": "^3.3.0" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" - } - }, - "node_modules/eslint/node_modules/@eslint/eslintrc": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", - "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", - "dev": true, - "dependencies": { - "ajv": "^6.12.4", - "debug": "^4.3.2", - "espree": "^9.6.0", - "globals": "^13.19.0", - "ignore": "^5.2.0", - "import-fresh": "^3.2.1", - "js-yaml": "^4.1.0", - "minimatch": "^3.1.2", - "strip-json-comments": "^3.1.1" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/eslint/node_modules/@eslint/js": { - "version": "8.56.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.56.0.tgz", - "integrity": "sha512-gMsVel9D7f2HLkBma9VbtzZRehRogVRfbr++f06nL2vnCGCNlzOD+/MUov/F4p8myyAHspEhVobgjpX64q5m6A==", - "dev": true, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - } - }, - "node_modules/eslint/node_modules/@humanwhocodes/config-array": { - "version": "0.11.14", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", - "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", - "dev": true, - "dependencies": { - "@humanwhocodes/object-schema": "^2.0.2", - "debug": "^4.3.1", - "minimatch": "^3.0.5" - }, - "engines": { - "node": ">=10.10.0" - } - }, - "node_modules/eslint/node_modules/@humanwhocodes/module-importer": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", - "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", - "dev": true, - "engines": { - "node": ">=12.22" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/nzakas" - } - }, - "node_modules/eslint/node_modules/@humanwhocodes/object-schema": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.2.tgz", - "integrity": "sha512-6EwiSjwWYP7pTckG6I5eyFANjPhmPjUX9JRLUSfNPC7FX7zK9gyZAfUEaECL6ALTpGX5AjnBq3C9XmVWPitNpw==", - "dev": true - }, - "node_modules/eslint/node_modules/@ungap/structured-clone": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", - "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", - "dev": true - }, - "node_modules/eslint/node_modules/acorn": { - "version": "8.11.3", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", - "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", - "dev": true, - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/eslint/node_modules/acorn-jsx": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", - "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", - "dev": true, - "peerDependencies": { - "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" - } - }, - "node_modules/eslint/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/eslint/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/eslint/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/eslint/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, - "node_modules/eslint/node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true - }, - "node_modules/eslint/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/eslint/node_modules/callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/eslint/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/eslint/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/eslint/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/eslint/node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "dev": true - }, - "node_modules/eslint/node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/eslint/node_modules/deep-is": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", - "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", - "dev": true - }, - "node_modules/eslint/node_modules/doctrine": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", - "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", - "dev": true, - "dependencies": { - "esutils": "^2.0.2" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/eslint/node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint/node_modules/eslint-scope": { + "node_modules/eslint-scope": { "version": "7.2.2", "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", @@ -2001,7 +1567,7 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint/node_modules/eslint-visitor-keys": { + "node_modules/eslint-visitor-keys": { "version": "3.4.3", "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", @@ -2013,7 +1579,29 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint/node_modules/espree": { + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/espree": { "version": "9.6.1", "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", @@ -2030,7 +1618,7 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint/node_modules/esquery": { + "node_modules/esquery": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", @@ -2042,7 +1630,7 @@ "node": ">=0.10" } }, - "node_modules/eslint/node_modules/esrecurse": { + "node_modules/esrecurse": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", @@ -2054,7 +1642,7 @@ "node": ">=4.0" } }, - "node_modules/eslint/node_modules/estraverse": { + "node_modules/estraverse": { "version": "5.3.0", "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", @@ -2063,7 +1651,7 @@ "node": ">=4.0" } }, - "node_modules/eslint/node_modules/esutils": { + "node_modules/esutils": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", @@ -2072,25 +1660,62 @@ "node": ">=0.10.0" } }, - "node_modules/eslint/node_modules/fast-deep-equal": { + "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", "dev": true }, - "node_modules/eslint/node_modules/fast-json-stable-stringify": { + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", "dev": true }, - "node_modules/eslint/node_modules/fast-levenshtein": { + "node_modules/fast-levenshtein": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", "dev": true }, - "node_modules/eslint/node_modules/file-entry-cache": { + "node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "dev": true, + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/file-entry-cache": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", @@ -2102,7 +1727,19 @@ "node": "^10.12.0 || >=12.0.0" } }, - "node_modules/eslint/node_modules/find-up": { + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", @@ -2118,7 +1755,7 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/eslint/node_modules/flat-cache": { + "node_modules/flat-cache": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", @@ -2132,19 +1769,33 @@ "node": "^10.12.0 || >=12.0.0" } }, - "node_modules/eslint/node_modules/flatted": { - "version": "3.2.9", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.9.tgz", - "integrity": "sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==", + "node_modules/flatted": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.0.tgz", + "integrity": "sha512-noqGuLw158+DuD9UPRKHpJ2hGxpFyDlYYrfM0mWt4XhT4n0lwzTLh70Tkdyy4kyTmyTT9Bv7bWAJqw7cgkEXDg==", "dev": true }, - "node_modules/eslint/node_modules/fs.realpath": { + "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", "dev": true }, - "node_modules/eslint/node_modules/glob": { + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", @@ -2164,7 +1815,7 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/eslint/node_modules/glob-parent": { + "node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", @@ -2176,7 +1827,29 @@ "node": ">=10.13.0" } }, - "node_modules/eslint/node_modules/globals": { + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/globals": { "version": "13.24.0", "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", @@ -2191,7 +1864,33 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/eslint/node_modules/has-flag": { + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true + }, + "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", @@ -2200,7 +1899,16 @@ "node": ">=8" } }, - "node_modules/eslint/node_modules/import-fresh": { + "node_modules/ignore": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", @@ -2216,7 +1924,7 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/eslint/node_modules/imurmurhash": { + "node_modules/imurmurhash": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", @@ -2225,7 +1933,7 @@ "node": ">=0.8.19" } }, - "node_modules/eslint/node_modules/inflight": { + "node_modules/inflight": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", @@ -2235,458 +1943,12 @@ "wrappy": "1" } }, - "node_modules/eslint/node_modules/inherits": { + "node_modules/inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", "dev": true }, - "node_modules/eslint/node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/eslint/node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true - }, - "node_modules/eslint/node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/eslint/node_modules/json-buffer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", - "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", - "dev": true - }, - "node_modules/eslint/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true - }, - "node_modules/eslint/node_modules/json-stable-stringify-without-jsonify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", - "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", - "dev": true - }, - "node_modules/eslint/node_modules/keyv": { - "version": "4.5.4", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", - "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", - "dev": true, - "dependencies": { - "json-buffer": "3.0.1" - } - }, - "node_modules/eslint/node_modules/levn": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", - "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", - "dev": true, - "dependencies": { - "prelude-ls": "^1.2.1", - "type-check": "~0.4.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/eslint/node_modules/locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", - "dev": true, - "dependencies": { - "p-locate": "^5.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint/node_modules/lodash.merge": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", - "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", - "dev": true - }, - "node_modules/eslint/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/eslint/node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "dev": true, - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/eslint/node_modules/optionator": { - "version": "0.9.3", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz", - "integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==", - "dev": true, - "dependencies": { - "@aashutoshrathi/word-wrap": "^1.2.3", - "deep-is": "^0.1.3", - "fast-levenshtein": "^2.0.6", - "levn": "^0.4.1", - "prelude-ls": "^1.2.1", - "type-check": "^0.4.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/eslint/node_modules/p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "dev": true, - "dependencies": { - "yocto-queue": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint/node_modules/p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", - "dev": true, - "dependencies": { - "p-limit": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint/node_modules/parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", - "dev": true, - "dependencies": { - "callsites": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/eslint/node_modules/path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/eslint/node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/eslint/node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/eslint/node_modules/prelude-ls": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", - "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", - "dev": true, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/eslint/node_modules/punycode": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", - "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/eslint/node_modules/resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/eslint/node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "dev": true, - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/eslint/node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/eslint/node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/eslint/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/eslint/node_modules/strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/eslint/node_modules/text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", - "dev": true - }, - "node_modules/eslint/node_modules/type-check": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", - "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", - "dev": true, - "dependencies": { - "prelude-ls": "^1.2.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/eslint/node_modules/type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint/node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dev": true, - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/eslint/node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/eslint/node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "dev": true - }, - "node_modules/eslint/node_modules/yocto-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/fast-glob": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", - "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", - "dev": true, - "dependencies": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.4" - }, - "engines": { - "node": ">=8.6.0" - } - }, - "node_modules/fastq": { - "version": "1.17.1", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", - "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", - "dev": true, - "dependencies": { - "reusify": "^1.0.4" - } - }, - "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "dev": true, - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/get-nonce": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", - "integrity": "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==", - "engines": { - "node": ">=6" - } - }, - "node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/graphemer": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", - "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", - "dev": true - }, - "node_modules/ignore": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", - "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", - "dev": true, - "engines": { - "node": ">= 4" - } - }, - "node_modules/invariant": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", - "dependencies": { - "loose-envify": "^1.0.0" - } - }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", @@ -2717,6 +1979,99 @@ "node": ">=0.12.0" } }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, "node_modules/loose-envify": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", @@ -2728,10 +2083,17 @@ "loose-envify": "cli.js" } }, - "node_modules/loose-envify/node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + "node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } }, "node_modules/merge2": { "version": "1.4.1", @@ -2755,6 +2117,27 @@ "node": ">=8.6" } }, + "node_modules/minimatch": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", + "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, "node_modules/nanoid": { "version": "3.3.7", "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", @@ -2779,14 +2162,110 @@ "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", "dev": true }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.9.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz", + "integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==", + "dev": true, + "dependencies": { + "@aashutoshrathi/word-wrap": "^1.2.3", + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, "engines": { "node": ">=0.10.0" } }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, "node_modules/picocolors": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", @@ -2833,116 +2312,22 @@ "node": "^10 || ^12 || >=14" } }, - "node_modules/postcss-js": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz", - "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==", - "dev": true, - "dependencies": { - "camelcase-css": "^2.0.1" - }, - "engines": { - "node": "^12 || ^14 || >= 16" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - "peerDependencies": { - "postcss": "^8.4.21" - } - }, - "node_modules/postcss-mixins": { - "version": "9.0.4", - "resolved": "https://registry.npmjs.org/postcss-mixins/-/postcss-mixins-9.0.4.tgz", - "integrity": "sha512-XVq5jwQJDRu5M1XGkdpgASqLk37OqkH4JCFDXl/Dn7janOJjCTEKL+36cnRVy7bMtoBzALfO7bV7nTIsFnUWLA==", - "dev": true, - "dependencies": { - "fast-glob": "^3.2.11", - "postcss-js": "^4.0.0", - "postcss-simple-vars": "^7.0.0", - "sugarss": "^4.0.1" - }, - "engines": { - "node": ">=14.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - "peerDependencies": { - "postcss": "^8.2.14" - } - }, - "node_modules/postcss-nested": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.1.tgz", - "integrity": "sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==", - "dev": true, - "dependencies": { - "postcss-selector-parser": "^6.0.11" - }, - "engines": { - "node": ">=12.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - "peerDependencies": { - "postcss": "^8.2.14" - } - }, - "node_modules/postcss-preset-mantine": { - "version": "1.13.0", - "resolved": "https://registry.npmjs.org/postcss-preset-mantine/-/postcss-preset-mantine-1.13.0.tgz", - "integrity": "sha512-1bv/mQz2K+/FixIMxYd83BYH7PusDZaI7LpUtKbb1l/5N5w6t1p/V9ONHfRJeeAZyfa6Xc+AtR+95VKdFXRH1g==", - "dev": true, - "dependencies": { - "postcss-mixins": "^9.0.4", - "postcss-nested": "^6.0.1" - }, - "peerDependencies": { - "postcss": ">=8.0.0" - } - }, - "node_modules/postcss-selector-parser": { - "version": "6.0.15", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.15.tgz", - "integrity": "sha512-rEYkQOMUCEMhsKbK66tbEU9QVIxbhN18YiniAwA7XQYTVBqrBy+P2p5JcdqsHgKM2zWylp8d7J6eszocfds5Sw==", - "dev": true, - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-simple-vars": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/postcss-simple-vars/-/postcss-simple-vars-7.0.1.tgz", - "integrity": "sha512-5GLLXaS8qmzHMOjVxqkk1TZPf1jMqesiI7qLhnlyERalG0sMbHIbJqrcnrpmZdKCLglHnRHoEBB61RtGTsj++A==", + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", "dev": true, "engines": { - "node": ">=14.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - "peerDependencies": { - "postcss": "^8.2.1" + "node": ">= 0.8.0" } }, - "node_modules/prop-types": { - "version": "15.8.1", - "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", - "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", - "dependencies": { - "loose-envify": "^1.4.0", - "object-assign": "^4.1.1", - "react-is": "^16.13.1" + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "engines": { + "node": ">=6" } }, "node_modules/queue-microtask": { @@ -2988,120 +2373,15 @@ "react": "^18.2.0" } }, - "node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" - }, - "node_modules/react-number-format": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/react-number-format/-/react-number-format-5.3.1.tgz", - "integrity": "sha512-qpYcQLauIeEhCZUZY9jXZnnroOtdy3jYaS1zQ3M1Sr6r/KMOBEIGNIb7eKT19g2N1wbYgFgvDzs19hw5TrB8XQ==", - "dependencies": { - "prop-types": "^15.7.2" - }, - "peerDependencies": { - "react": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0", - "react-dom": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0" - } - }, - "node_modules/react-refresh": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.14.0.tgz", - "integrity": "sha512-wViHqhAd8OHeLS/IRMJjTSDHF3U9eWi62F/MledQGPdJGDhodXJ9PBLNGr6WWL7qlH12Mt3TyTpbS+hGXMjCzQ==", + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", "dev": true, "engines": { - "node": ">=0.10.0" + "node": ">=4" } }, - "node_modules/react-remove-scroll": { - "version": "2.5.7", - "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.7.tgz", - "integrity": "sha512-FnrTWO4L7/Bhhf3CYBNArEG/yROV0tKmTv7/3h9QCFvH6sndeFf1wPqOcbFVu5VAulS5dV1wGT3GZZ/1GawqiA==", - "dependencies": { - "react-remove-scroll-bar": "^2.3.4", - "react-style-singleton": "^2.2.1", - "tslib": "^2.1.0", - "use-callback-ref": "^1.3.0", - "use-sidecar": "^1.1.2" - }, - "engines": { - "node": ">=10" - }, - "peerDependencies": { - "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/react-remove-scroll-bar": { - "version": "2.3.4", - "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.4.tgz", - "integrity": "sha512-63C4YQBUt0m6ALadE9XV56hV8BgJWDmmTPY758iIJjfQKt2nYwoUrPk0LXRXcB/yIj82T1/Ixfdpdk68LwIB0A==", - "dependencies": { - "react-style-singleton": "^2.2.1", - "tslib": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "peerDependencies": { - "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/react-style-singleton": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.1.tgz", - "integrity": "sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==", - "dependencies": { - "get-nonce": "^1.0.0", - "invariant": "^2.2.4", - "tslib": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "peerDependencies": { - "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/react-textarea-autosize": { - "version": "8.5.3", - "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.3.tgz", - "integrity": "sha512-XT1024o2pqCuZSuBt9FwHlaDeNtVrtCXu0Rnz88t1jUGheCLa3PhjE1GH8Ctm2axEtvdCl5SUHYschyQ0L5QHQ==", - "dependencies": { - "@babel/runtime": "^7.20.13", - "use-composed-ref": "^1.3.0", - "use-latest": "^1.2.1" - }, - "engines": { - "node": ">=10" - }, - "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - } - }, - "node_modules/regenerator-runtime": { - "version": "0.14.1", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", - "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" - }, "node_modules/reusify": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", @@ -3112,6 +2392,53 @@ "node": ">=0.10.0" } }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dev": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rollup": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.12.0.tgz", + "integrity": "sha512-wz66wn4t1OHIJw3+XU7mJJQV/2NAfw5OAk6G6Hoo3zcvz/XOfQ52Vgi+AN4Uxoxi0KBBwk2g8zPrTDA4btSB/Q==", + "dev": true, + "dependencies": { + "@types/estree": "1.0.5" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.12.0", + "@rollup/rollup-android-arm64": "4.12.0", + "@rollup/rollup-darwin-arm64": "4.12.0", + "@rollup/rollup-darwin-x64": "4.12.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.12.0", + "@rollup/rollup-linux-arm64-gnu": "4.12.0", + "@rollup/rollup-linux-arm64-musl": "4.12.0", + "@rollup/rollup-linux-riscv64-gnu": "4.12.0", + "@rollup/rollup-linux-x64-gnu": "4.12.0", + "@rollup/rollup-linux-x64-musl": "4.12.0", + "@rollup/rollup-win32-arm64-msvc": "4.12.0", + "@rollup/rollup-win32-ia32-msvc": "4.12.0", + "@rollup/rollup-win32-x64-msvc": "4.12.0", + "fsevents": "~2.3.2" + } + }, "node_modules/run-parallel": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", @@ -3158,23 +2485,35 @@ "node": ">=10" } }, - "node_modules/semver/node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", "dev": true, "dependencies": { - "yallist": "^4.0.0" + "shebang-regex": "^3.0.0" }, "engines": { - "node": ">=10" + "node": ">=8" } }, - "node_modules/semver/node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } }, "node_modules/source-map-js": { "version": "1.0.2", @@ -3185,26 +2524,47 @@ "node": ">=0.10.0" } }, - "node_modules/sugarss": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/sugarss/-/sugarss-4.0.1.tgz", - "integrity": "sha512-WCjS5NfuVJjkQzK10s8WOBY+hhDxxNt/N6ZaGwxFZ+wN3/lKKFSaaKUNecULcTTvE4urLcKaZFQD8vO0mOZujw==", + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, "engines": { - "node": ">=12.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - "peerDependencies": { - "postcss": "^8.3.3" + "node": ">=8" } }, - "node_modules/tabbable": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz", - "integrity": "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==" + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true }, "node_modules/to-regex-range": { "version": "5.0.1", @@ -3230,17 +2590,25 @@ "typescript": ">=4.2.0" } }, - "node_modules/tslib": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", - "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } }, "node_modules/type-fest": { - "version": "3.13.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-3.13.1.tgz", - "integrity": "sha512-tLq3bSNx+xSpwvAJnzrK0Ep5CLNWjvFTOp71URMaAEWBfRb9nnJiBoUe0tF8bI4ZFO3omgBR6NvnbzVUT3Ly4g==", + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, "engines": { - "node": ">=14.16" + "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -3259,90 +2627,15 @@ "node": ">=14.17" } }, - "node_modules/use-callback-ref": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.1.tgz", - "integrity": "sha512-Lg4Vx1XZQauB42Hw3kK7JM6yjVjgFmFC5/Ab797s79aARomD2nEErc4mCgM8EZrARLmmbWpi5DGCadmK50DcAQ==", + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, "dependencies": { - "tslib": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "peerDependencies": { - "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } + "punycode": "^2.1.0" } }, - "node_modules/use-composed-ref": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/use-composed-ref/-/use-composed-ref-1.3.0.tgz", - "integrity": "sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==", - "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - } - }, - "node_modules/use-isomorphic-layout-effect": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz", - "integrity": "sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==", - "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/use-latest": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/use-latest/-/use-latest-1.2.1.tgz", - "integrity": "sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw==", - "dependencies": { - "use-isomorphic-layout-effect": "^1.1.1" - }, - "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/use-sidecar": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.2.tgz", - "integrity": "sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==", - "dependencies": { - "detect-node-es": "^1.1.0", - "tslib": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "peerDependencies": { - "@types/react": "^16.9.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - } - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", - "dev": true - }, "node_modules/vite": { "version": "5.1.3", "resolved": "https://registry.npmjs.org/vite/-/vite-5.1.3.tgz", @@ -3398,631 +2691,43 @@ } } }, - "node_modules/vite/node_modules/@esbuild/aix-ppc64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.19.12.tgz", - "integrity": "sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA==", - "cpu": [ - "ppc64" - ], - "dev": true, - "optional": true, - "os": [ - "aix" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/android-arm": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.19.12.tgz", - "integrity": "sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w==", - "cpu": [ - "arm" - ], - "dev": true, - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/android-arm64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.19.12.tgz", - "integrity": "sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/android-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.19.12.tgz", - "integrity": "sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/darwin-arm64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz", - "integrity": "sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/darwin-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.19.12.tgz", - "integrity": "sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/freebsd-arm64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.12.tgz", - "integrity": "sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/freebsd-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.19.12.tgz", - "integrity": "sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-arm": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.19.12.tgz", - "integrity": "sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w==", - "cpu": [ - "arm" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-arm64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.19.12.tgz", - "integrity": "sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-ia32": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.19.12.tgz", - "integrity": "sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA==", - "cpu": [ - "ia32" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-loong64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.19.12.tgz", - "integrity": "sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA==", - "cpu": [ - "loong64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-mips64el": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.19.12.tgz", - "integrity": "sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w==", - "cpu": [ - "mips64el" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-ppc64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.19.12.tgz", - "integrity": "sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg==", - "cpu": [ - "ppc64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-riscv64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.19.12.tgz", - "integrity": "sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg==", - "cpu": [ - "riscv64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-s390x": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.19.12.tgz", - "integrity": "sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg==", - "cpu": [ - "s390x" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.19.12.tgz", - "integrity": "sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/netbsd-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.19.12.tgz", - "integrity": "sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/openbsd-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.19.12.tgz", - "integrity": "sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/sunos-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.19.12.tgz", - "integrity": "sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "sunos" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/win32-arm64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.19.12.tgz", - "integrity": "sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/win32-ia32": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.19.12.tgz", - "integrity": "sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ==", - "cpu": [ - "ia32" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@esbuild/win32-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.19.12.tgz", - "integrity": "sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/vite/node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.12.0.tgz", - "integrity": "sha512-+ac02NL/2TCKRrJu2wffk1kZ+RyqxVUlbjSagNgPm94frxtr+XDL12E5Ll1enWskLrtrZ2r8L3wED1orIibV/w==", - "cpu": [ - "arm" - ], - "dev": true, - "optional": true, - "os": [ - "android" - ] - }, - "node_modules/vite/node_modules/@rollup/rollup-android-arm64": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.12.0.tgz", - "integrity": "sha512-OBqcX2BMe6nvjQ0Nyp7cC90cnumt8PXmO7Dp3gfAju/6YwG0Tj74z1vKrfRz7qAv23nBcYM8BCbhrsWqO7PzQQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "android" - ] - }, - "node_modules/vite/node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.12.0.tgz", - "integrity": "sha512-X64tZd8dRE/QTrBIEs63kaOBG0b5GVEd3ccoLtyf6IdXtHdh8h+I56C2yC3PtC9Ucnv0CpNFJLqKFVgCYe0lOQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "darwin" - ] - }, - "node_modules/vite/node_modules/@rollup/rollup-darwin-x64": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.12.0.tgz", - "integrity": "sha512-cc71KUZoVbUJmGP2cOuiZ9HSOP14AzBAThn3OU+9LcA1+IUqswJyR1cAJj3Mg55HbjZP6OLAIscbQsQLrpgTOg==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "darwin" - ] - }, - "node_modules/vite/node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.12.0.tgz", - "integrity": "sha512-a6w/Y3hyyO6GlpKL2xJ4IOh/7d+APaqLYdMf86xnczU3nurFTaVN9s9jOXQg97BE4nYm/7Ga51rjec5nfRdrvA==", - "cpu": [ - "arm" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/vite/node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.12.0.tgz", - "integrity": "sha512-0fZBq27b+D7Ar5CQMofVN8sggOVhEtzFUwOwPppQt0k+VR+7UHMZZY4y+64WJ06XOhBTKXtQB/Sv0NwQMXyNAA==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/vite/node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.12.0.tgz", - "integrity": "sha512-eTvzUS3hhhlgeAv6bfigekzWZjaEX9xP9HhxB0Dvrdbkk5w/b+1Sxct2ZuDxNJKzsRStSq1EaEkVSEe7A7ipgQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/vite/node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.12.0.tgz", - "integrity": "sha512-ix+qAB9qmrCRiaO71VFfY8rkiAZJL8zQRXveS27HS+pKdjwUfEhqo2+YF2oI+H/22Xsiski+qqwIBxVewLK7sw==", - "cpu": [ - "riscv64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/vite/node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.12.0.tgz", - "integrity": "sha512-TenQhZVOtw/3qKOPa7d+QgkeM6xY0LtwzR8OplmyL5LrgTWIXpTQg2Q2ycBf8jm+SFW2Wt/DTn1gf7nFp3ssVA==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/vite/node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.12.0.tgz", - "integrity": "sha512-LfFdRhNnW0zdMvdCb5FNuWlls2WbbSridJvxOvYWgSBOYZtgBfW9UGNJG//rwMqTX1xQE9BAodvMH9tAusKDUw==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/vite/node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.12.0.tgz", - "integrity": "sha512-JPDxovheWNp6d7AHCgsUlkuCKvtu3RB55iNEkaQcf0ttsDU/JZF+iQnYcQJSk/7PtT4mjjVG8N1kpwnI9SLYaw==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/vite/node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.12.0.tgz", - "integrity": "sha512-fjtuvMWRGJn1oZacG8IPnzIV6GF2/XG+h71FKn76OYFqySXInJtseAqdprVTDTyqPxQOG9Exak5/E9Z3+EJ8ZA==", - "cpu": [ - "ia32" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/vite/node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.12.0.tgz", - "integrity": "sha512-ZYmr5mS2wd4Dew/JjT0Fqi2NPB/ZhZ2VvPp7SmvPZb4Y1CG/LRcS6tcRo2cYU7zLK5A7cdbhWnnWmUjoI4qapg==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/vite/node_modules/@types/estree": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", - "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==", - "dev": true - }, - "node_modules/vite/node_modules/esbuild": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.12.tgz", - "integrity": "sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg==", - "dev": true, - "hasInstallScript": true, - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=12" - }, - "optionalDependencies": { - "@esbuild/aix-ppc64": "0.19.12", - "@esbuild/android-arm": "0.19.12", - "@esbuild/android-arm64": "0.19.12", - "@esbuild/android-x64": "0.19.12", - "@esbuild/darwin-arm64": "0.19.12", - "@esbuild/darwin-x64": "0.19.12", - "@esbuild/freebsd-arm64": "0.19.12", - "@esbuild/freebsd-x64": "0.19.12", - "@esbuild/linux-arm": "0.19.12", - "@esbuild/linux-arm64": "0.19.12", - "@esbuild/linux-ia32": "0.19.12", - "@esbuild/linux-loong64": "0.19.12", - "@esbuild/linux-mips64el": "0.19.12", - "@esbuild/linux-ppc64": "0.19.12", - "@esbuild/linux-riscv64": "0.19.12", - "@esbuild/linux-s390x": "0.19.12", - "@esbuild/linux-x64": "0.19.12", - "@esbuild/netbsd-x64": "0.19.12", - "@esbuild/openbsd-x64": "0.19.12", - "@esbuild/sunos-x64": "0.19.12", - "@esbuild/win32-arm64": "0.19.12", - "@esbuild/win32-ia32": "0.19.12", - "@esbuild/win32-x64": "0.19.12" - } - }, - "node_modules/vite/node_modules/fsevents": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", - "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "dev": true, - "hasInstallScript": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "node_modules/vite/node_modules/rollup": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.12.0.tgz", - "integrity": "sha512-wz66wn4t1OHIJw3+XU7mJJQV/2NAfw5OAk6G6Hoo3zcvz/XOfQ52Vgi+AN4Uxoxi0KBBwk2g8zPrTDA4btSB/Q==", + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "dev": true, "dependencies": { - "@types/estree": "1.0.5" + "isexe": "^2.0.0" }, "bin": { - "rollup": "dist/bin/rollup" + "node-which": "bin/node-which" }, "engines": { - "node": ">=18.0.0", - "npm": ">=8.0.0" + "node": ">= 8" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" }, - "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.12.0", - "@rollup/rollup-android-arm64": "4.12.0", - "@rollup/rollup-darwin-arm64": "4.12.0", - "@rollup/rollup-darwin-x64": "4.12.0", - "@rollup/rollup-linux-arm-gnueabihf": "4.12.0", - "@rollup/rollup-linux-arm64-gnu": "4.12.0", - "@rollup/rollup-linux-arm64-musl": "4.12.0", - "@rollup/rollup-linux-riscv64-gnu": "4.12.0", - "@rollup/rollup-linux-x64-gnu": "4.12.0", - "@rollup/rollup-linux-x64-musl": "4.12.0", - "@rollup/rollup-win32-arm64-msvc": "4.12.0", - "@rollup/rollup-win32-ia32-msvc": "4.12.0", - "@rollup/rollup-win32-x64-msvc": "4.12.0", - "fsevents": "~2.3.2" + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } } } diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index d64d2b1e44..0d2711ff05 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,13 +1,14 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "0.0.0", + "version": "0.49.1", "type": "module", "scripts": { - "dev": "vite", + "start": "vite", "build": "tsc && vite build", "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", - "preview": "vite preview" + "preview": "vite preview", + "test": "echo TODO: Create tests for the new Mantine UI" }, "dependencies": { "@codemirror/autocomplete": "^6.12.0", @@ -31,7 +32,6 @@ "react-router-dom": "^6.22.1" }, "devDependencies": { - "@types/eslint": "~8.56.2", "@types/react": "^18.2.55", "@types/react-dom": "^18.2.19", "@typescript-eslint/eslint-plugin": "^6.21.0", @@ -43,7 +43,6 @@ "postcss": "^8.4.35", "postcss-preset-mantine": "^1.13.0", "postcss-simple-vars": "^7.0.1", - "typescript": "^5.2.2", "vite": "^5.1.0" } } diff --git a/web/ui/module/codemirror-promql/src/client/prometheus.ts b/web/ui/module/codemirror-promql/src/client/prometheus.ts index 873cbb0d22..8d57defbff 100644 --- a/web/ui/module/codemirror-promql/src/client/prometheus.ts +++ b/web/ui/module/codemirror-promql/src/client/prometheus.ts @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -import { FetchFn } from '.'; +import { FetchFn } from './index'; import { Matcher } from '../types'; import { labelMatchersToString } from '../parser'; import LRUCache from 'lru-cache'; diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index a6bb3f3eb8..fc033b9060 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -32,7 +32,8 @@ "devDependencies": { "@lezer/generator": "^1.5.1", "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.3.14" + "@lezer/lr": "^1.3.14", + "@rollup/plugin-node-resolve": "^15.2.3" }, "peerDependencies": { "@lezer/highlight": "^1.1.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 91a85656e2..ef8b467a9d 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -12,26 +12,101 @@ "module/*" ], "devDependencies": { - "@rollup/plugin-node-resolve": "^15.2.3", - "@types/jest": "^29.5.11", - "@types/node": "^20.10.4", - "eslint-config-prettier": "^8.10.0", - "eslint-config-react-app": "^7.0.1", - "eslint-plugin-prettier": "^4.2.1", - "jest-canvas-mock": "^2.5.2", - "jest-fetch-mock": "^3.0.3", - "prettier": "^2.8.8", - "rollup": "^4.12.0", - "ts-jest": "^29.1.1", - "typescript": "^5.3.3" + "@types/jest": "^29.5.12", + "@typescript-eslint/eslint-plugin": "^6.21.0", + "@typescript-eslint/parser": "^6.21.0", + "ts-jest": "^29.1.2", + "typescript": "^5.2.2", + "vite": "^5.1.0" + } + }, + "app": { + "name": "@prometheus-io/app", + "version": "0.0.0", + "extraneous": true, + "dependencies": { + "@codemirror/autocomplete": "^6.11.1", + "@codemirror/commands": "^6.3.2", + "@codemirror/language": "^6.9.3", + "@codemirror/lint": "^6.4.2", + "@codemirror/search": "^6.5.5", + "@codemirror/state": "^6.3.3", + "@codemirror/view": "^6.22.1", + "@lezer/common": "^1.2.1", + "@lezer/highlight": "^1.2.0", + "@mantine/code-highlight": "^7.5.3", + "@mantine/core": "^7.5.3", + "@mantine/dates": "^7.5.3", + "@mantine/hooks": "^7.5.3", + "@prometheus-io/codemirror-promql": "0.49.1", + "@tabler/icons-react": "^2.47.0", + "@tanstack/react-query": "^5.22.2", + "@uiw/react-codemirror": "^4.21.22", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-router-dom": "^6.22.1" + }, + "devDependencies": { + "@types/react": "^18.2.55", + "@types/react-dom": "^18.2.19", + "@vitejs/plugin-react-swc": "^3.5.0", + "eslint": "^8.56.0", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.4.5" + } + }, + "codemirror-promql": { + "name": "@prometheus-io/codemirror-promql", + "version": "0.49.1", + "extraneous": true, + "license": "Apache-2.0", + "dependencies": { + "@prometheus-io/lezer-promql": "0.49.1", + "lru-cache": "^7.18.3" + }, + "devDependencies": { + "@codemirror/autocomplete": "^6.11.1", + "@codemirror/language": "^6.9.3", + "@codemirror/lint": "^6.4.2", + "@codemirror/state": "^6.3.3", + "@codemirror/view": "^6.22.1", + "@lezer/common": "^1.1.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14", + "isomorphic-fetch": "^3.0.0", + "nock": "^13.4.0" }, "engines": { - "npm": ">=7.0.0" + "node": ">=12.0.0" + }, + "peerDependencies": { + "@codemirror/autocomplete": "^6.4.0", + "@codemirror/language": "^6.3.0", + "@codemirror/lint": "^6.0.0", + "@codemirror/state": "^6.1.1", + "@codemirror/view": "^6.4.0", + "@lezer/common": "^1.0.1" + } + }, + "lezer-promql": { + "name": "@prometheus-io/lezer-promql", + "version": "0.49.1", + "extraneous": true, + "license": "Apache-2.0", + "devDependencies": { + "@lezer/generator": "^1.5.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14", + "@rollup/plugin-node-resolve": "^15.2.3" + }, + "peerDependencies": { + "@lezer/highlight": "^1.1.2", + "@lezer/lr": "^1.2.3" } }, "mantine-ui": { "name": "@prometheus-io/mantine-ui", - "version": "0.0.0", + "version": "0.49.1", "dependencies": { "@codemirror/autocomplete": "^6.12.0", "@codemirror/language": "^6.10.1", @@ -54,7 +129,6 @@ "react-router-dom": "^6.22.1" }, "devDependencies": { - "@types/eslint": "~8.56.2", "@types/react": "^18.2.55", "@types/react-dom": "^18.2.19", "@typescript-eslint/eslint-plugin": "^6.21.0", @@ -66,66 +140,9 @@ "postcss": "^8.4.35", "postcss-preset-mantine": "^1.13.0", "postcss-simple-vars": "^7.0.1", - "typescript": "^5.2.2", "vite": "^5.1.0" } }, - "mantine-ui/node_modules/@mantine/code-highlight": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-7.5.3.tgz", - "integrity": "sha512-TLZSkVAfX3KH9XKjJl965KX6TjpMKtNzObjI6Uvo/J/5Rvqhe7xbhBPJDT7yhSD+wjnTMsEWEb68rmQa3M/cEA==", - "dependencies": { - "clsx": "2.0.0", - "highlight.js": "^11.9.0" - }, - "peerDependencies": { - "@mantine/core": "7.5.3", - "@mantine/hooks": "7.5.3", - "react": "^18.2.0", - "react-dom": "^18.2.0" - } - }, - "mantine-ui/node_modules/@mantine/core": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.5.3.tgz", - "integrity": "sha512-Wvv6DJXI+GX9mmKG5HITTh/24sCZ0RoYQHdTHh0tOfGnEy+RleyhA82UjnMsp0n2NjfCISBwbiKgfya6b2iaFw==", - "dependencies": { - "@floating-ui/react": "^0.24.8", - "clsx": "2.0.0", - "react-number-format": "^5.3.1", - "react-remove-scroll": "^2.5.7", - "react-textarea-autosize": "8.5.3", - "type-fest": "^3.13.1" - }, - "peerDependencies": { - "@mantine/hooks": "7.5.3", - "react": "^18.2.0", - "react-dom": "^18.2.0" - } - }, - "mantine-ui/node_modules/@mantine/dates": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-7.5.3.tgz", - "integrity": "sha512-v6fFdW+7HAd7XsZFMJVMuFE2RHbQAVnsUNeP0/5h+H4qEj0soTmMvHPP8wXEed5v85r9CcEMGOGq1n6RFRpWHA==", - "dependencies": { - "clsx": "2.0.0" - }, - "peerDependencies": { - "@mantine/core": "7.5.3", - "@mantine/hooks": "7.5.3", - "dayjs": ">=1.0.0", - "react": "^18.2.0", - "react-dom": "^18.2.0" - } - }, - "mantine-ui/node_modules/@mantine/hooks": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.5.3.tgz", - "integrity": "sha512-mFI448mAs12v8FrgSVhytqlhTVrEjIfd/PqPEfwJu5YcZIq4YZdqpzJIUbANnRrFSvmoQpDb1PssdKx7Ds35hw==", - "peerDependencies": { - "react": "^18.2.0" - } - }, "mantine-ui/node_modules/@prometheus-io/codemirror-promql": { "version": "0.50.0-rc.1", "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.50.0-rc.1.tgz", @@ -155,240 +172,6 @@ "@lezer/lr": "^1.2.3" } }, - "mantine-ui/node_modules/@tanstack/react-query": { - "version": "5.22.2", - "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.22.2.tgz", - "integrity": "sha512-TaxJDRzJ8/NWRT4lY2jguKCrNI6MRN+67dELzPjNUlvqzTxGANlMp68l7aC7hG8Bd1uHNxHl7ihv7MT50i/43A==", - "dependencies": { - "@tanstack/query-core": "5.22.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" - }, - "peerDependencies": { - "react": "^18.0.0" - } - }, - "mantine-ui/node_modules/@types/react": { - "version": "18.2.57", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.57.tgz", - "integrity": "sha512-ZvQsktJgSYrQiMirAN60y4O/LRevIV8hUzSOSNB6gfR3/o3wCBFQx3sPwIYtuDMeiVgsSS3UzCV26tEzgnfvQw==", - "dev": true, - "dependencies": { - "@types/prop-types": "*", - "@types/scheduler": "*", - "csstype": "^3.0.2" - } - }, - "mantine-ui/node_modules/@types/react-dom": { - "version": "18.2.19", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.19.tgz", - "integrity": "sha512-aZvQL6uUbIJpjZk4U8JZGbau9KDeAwMfmhyWorxgBkqDIEf6ROjRozcmPIicqsUwPUjbkDfHKgGee1Lq65APcA==", - "dev": true, - "dependencies": { - "@types/react": "*" - } - }, - "mantine-ui/node_modules/@typescript-eslint/eslint-plugin": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz", - "integrity": "sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==", - "dev": true, - "dependencies": { - "@eslint-community/regexpp": "^4.5.1", - "@typescript-eslint/scope-manager": "6.21.0", - "@typescript-eslint/type-utils": "6.21.0", - "@typescript-eslint/utils": "6.21.0", - "@typescript-eslint/visitor-keys": "6.21.0", - "debug": "^4.3.4", - "graphemer": "^1.4.0", - "ignore": "^5.2.4", - "natural-compare": "^1.4.0", - "semver": "^7.5.4", - "ts-api-utils": "^1.0.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha", - "eslint": "^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "mantine-ui/node_modules/@typescript-eslint/parser": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.21.0.tgz", - "integrity": "sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/scope-manager": "6.21.0", - "@typescript-eslint/types": "6.21.0", - "@typescript-eslint/typescript-estree": "6.21.0", - "@typescript-eslint/visitor-keys": "6.21.0", - "debug": "^4.3.4" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "mantine-ui/node_modules/@typescript-eslint/scope-manager": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz", - "integrity": "sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.21.0", - "@typescript-eslint/visitor-keys": "6.21.0" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "mantine-ui/node_modules/@typescript-eslint/type-utils": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", - "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", - "dev": true, - "dependencies": { - "@typescript-eslint/typescript-estree": "6.21.0", - "@typescript-eslint/utils": "6.21.0", - "debug": "^4.3.4", - "ts-api-utils": "^1.0.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "mantine-ui/node_modules/@typescript-eslint/types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", - "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", - "dev": true, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "mantine-ui/node_modules/@typescript-eslint/typescript-estree": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", - "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.21.0", - "@typescript-eslint/visitor-keys": "6.21.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "minimatch": "9.0.3", - "semver": "^7.5.4", - "ts-api-utils": "^1.0.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "mantine-ui/node_modules/@typescript-eslint/utils": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", - "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", - "dev": true, - "dependencies": { - "@eslint-community/eslint-utils": "^4.4.0", - "@types/json-schema": "^7.0.12", - "@types/semver": "^7.5.0", - "@typescript-eslint/scope-manager": "6.21.0", - "@typescript-eslint/types": "6.21.0", - "@typescript-eslint/typescript-estree": "6.21.0", - "semver": "^7.5.4" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" - } - }, - "mantine-ui/node_modules/@typescript-eslint/visitor-keys": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz", - "integrity": "sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.21.0", - "eslint-visitor-keys": "^3.4.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "mantine-ui/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0" - } - }, "mantine-ui/node_modules/lru-cache": { "version": "7.18.3", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", @@ -397,120 +180,6 @@ "node": ">=12" } }, - "mantine-ui/node_modules/minimatch": { - "version": "9.0.3", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", - "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", - "dev": true, - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "mantine-ui/node_modules/react": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", - "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", - "dependencies": { - "loose-envify": "^1.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "mantine-ui/node_modules/react-dom": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", - "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", - "dependencies": { - "loose-envify": "^1.1.0", - "scheduler": "^0.23.0" - }, - "peerDependencies": { - "react": "^18.2.0" - } - }, - "mantine-ui/node_modules/react-router": { - "version": "6.22.1", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.22.1.tgz", - "integrity": "sha512-0pdoRGwLtemnJqn1K0XHUbnKiX0S4X8CgvVVmHGOWmofESj31msHo/1YiqcJWK7Wxfq2a4uvvtS01KAQyWK/CQ==", - "dependencies": { - "@remix-run/router": "1.15.1" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "react": ">=16.8" - } - }, - "mantine-ui/node_modules/react-router-dom": { - "version": "6.22.1", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.22.1.tgz", - "integrity": "sha512-iwMyyyrbL7zkKY7MRjOVRy+TMnS/OPusaFVxM2P11x9dzSzGmLsebkCvYirGq0DWB9K9hOspHYYtDz33gE5Duw==", - "dependencies": { - "@remix-run/router": "1.15.1", - "react-router": "6.22.1" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "react": ">=16.8", - "react-dom": ">=16.8" - } - }, - "mantine-ui/node_modules/scheduler": { - "version": "0.23.0", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", - "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", - "dependencies": { - "loose-envify": "^1.1.0" - } - }, - "mantine-ui/node_modules/semver": { - "version": "7.6.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", - "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", - "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "mantine-ui/node_modules/semver/node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "mantine-ui/node_modules/type-fest": { - "version": "3.13.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-3.13.1.tgz", - "integrity": "sha512-tLq3bSNx+xSpwvAJnzrK0Ep5CLNWjvFTOp71URMaAEWBfRb9nnJiBoUe0tF8bI4ZFO3omgBR6NvnbzVUT3Ly4g==", - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", "version": "0.49.1", @@ -545,7 +214,8 @@ }, "module/codemirror-promql/node_modules/lru-cache": { "version": "7.18.3", - "license": "ISC", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", "engines": { "node": ">=12" } @@ -557,7 +227,8 @@ "devDependencies": { "@lezer/generator": "^1.5.1", "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.3.14" + "@lezer/lr": "^1.3.14", + "@rollup/plugin-node-resolve": "^15.2.3" }, "peerDependencies": { "@lezer/highlight": "^1.1.2", @@ -574,11 +245,12 @@ } }, "node_modules/@ampproject/remapping": { - "version": "2.2.0", + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", + "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", "dev": true, - "license": "Apache-2.0", "dependencies": { - "@jridgewell/gen-mapping": "^0.1.0", + "@jridgewell/gen-mapping": "^0.3.0", "@jridgewell/trace-mapping": "^0.3.9" }, "engines": { @@ -600,8 +272,9 @@ }, "node_modules/@babel/code-frame/node_modules/ansi-styles": { "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", "dev": true, - "license": "MIT", "dependencies": { "color-convert": "^1.9.0" }, @@ -611,8 +284,9 @@ }, "node_modules/@babel/code-frame/node_modules/chalk": { "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", "dev": true, - "license": "MIT", "dependencies": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", @@ -624,37 +298,42 @@ }, "node_modules/@babel/code-frame/node_modules/color-convert": { "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", "dev": true, - "license": "MIT", "dependencies": { "color-name": "1.1.3" } }, "node_modules/@babel/code-frame/node_modules/color-name": { "version": "1.1.3", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true }, "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.8.0" } }, "node_modules/@babel/code-frame/node_modules/has-flag": { "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", "dev": true, - "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/@babel/code-frame/node_modules/supports-color": { "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", "dev": true, - "license": "MIT", "dependencies": { "has-flag": "^3.0.0" }, @@ -701,35 +380,13 @@ "url": "https://opencollective.com/babel" } }, - "node_modules/@babel/core/node_modules/convert-source-map": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", - "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", - "dev": true - }, - "node_modules/@babel/eslint-parser": { - "version": "7.19.1", + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, - "license": "MIT", - "dependencies": { - "@nicolo-ribaudo/eslint-scope-5-internals": "5.1.1-v1", - "eslint-visitor-keys": "^2.1.0", - "semver": "^6.3.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || >=14.0.0" - }, - "peerDependencies": { - "@babel/core": ">=7.11.0", - "eslint": "^7.5.0 || ^8.0.0" - } - }, - "node_modules/@babel/eslint-parser/node_modules/eslint-visitor-keys": { - "version": "2.1.0", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=10" + "bin": { + "semver": "bin/semver.js" } }, "node_modules/@babel/generator": { @@ -747,42 +404,6 @@ "node": ">=6.9.0" } }, - "node_modules/@babel/generator/node_modules/@jridgewell/gen-mapping": { - "version": "0.3.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/set-array": "^1.0.1", - "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/helper-annotate-as-pure": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/types": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-explode-assignable-expression": "^7.18.6", - "@babel/types": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, "node_modules/@babel/helper-compilation-targets": { "version": "7.23.6", "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz", @@ -808,86 +429,35 @@ "yallist": "^3.0.2" } }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, "node_modules/@babel/helper-compilation-targets/node_modules/yallist": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", "dev": true }, - "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.19.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-member-expression-to-functions": "^7.18.9", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/helper-replace-supers": "^7.18.9", - "@babel/helper-split-export-declaration": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-create-regexp-features-plugin": { - "version": "7.19.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "regexpu-core": "^5.1.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-define-polyfill-provider": { - "version": "0.3.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-compilation-targets": "^7.17.7", - "@babel/helper-plugin-utils": "^7.16.7", - "debug": "^4.1.1", - "lodash.debounce": "^4.0.8", - "resolve": "^1.14.2", - "semver": "^6.1.2" - }, - "peerDependencies": { - "@babel/core": "^7.4.0-0" - } - }, "node_modules/@babel/helper-environment-visitor": { "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", "dev": true, - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-explode-assignable-expression": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/types": "^7.18.6" - }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-function-name": { "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", "dev": true, - "license": "MIT", "dependencies": { "@babel/template": "^7.22.15", "@babel/types": "^7.23.0" @@ -898,8 +468,9 @@ }, "node_modules/@babel/helper-hoist-variables": { "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", "dev": true, - "license": "MIT", "dependencies": { "@babel/types": "^7.22.5" }, @@ -907,17 +478,6 @@ "node": ">=6.9.0" } }, - "node_modules/@babel/helper-member-expression-to-functions": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/types": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, "node_modules/@babel/helper-module-imports": { "version": "7.22.15", "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz", @@ -949,17 +509,6 @@ "@babel/core": "^7.0.0" } }, - "node_modules/@babel/helper-optimise-call-expression": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/types": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - } - }, "node_modules/@babel/helper-plugin-utils": { "version": "7.22.5", "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", @@ -969,38 +518,6 @@ "node": ">=6.9.0" } }, - "node_modules/@babel/helper-remap-async-to-generator": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-wrap-function": "^7.18.9", - "@babel/types": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/helper-replace-supers": { - "version": "7.19.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-member-expression-to-functions": "^7.18.9", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/traverse": "^7.19.1", - "@babel/types": "^7.19.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, "node_modules/@babel/helper-simple-access": { "version": "7.22.5", "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", @@ -1013,21 +530,11 @@ "node": ">=6.9.0" } }, - "node_modules/@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/types": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - } - }, "node_modules/@babel/helper-split-export-declaration": { "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "dev": true, - "license": "MIT", "dependencies": { "@babel/types": "^7.22.5" }, @@ -1046,8 +553,9 @@ }, "node_modules/@babel/helper-validator-identifier": { "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } @@ -1061,20 +569,6 @@ "node": ">=6.9.0" } }, - "node_modules/@babel/helper-wrap-function": { - "version": "7.19.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-function-name": "^7.19.0", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.19.0", - "@babel/types": "^7.19.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, "node_modules/@babel/helpers": { "version": "7.23.9", "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.9.tgz", @@ -1186,292 +680,12 @@ "node": ">=6.0.0" } }, - "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9", - "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9", - "@babel/plugin-proposal-optional-chaining": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.13.0" - } - }, - "node_modules/@babel/plugin-proposal-async-generator-functions": { - "version": "7.19.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-plugin-utils": "^7.19.0", - "@babel/helper-remap-async-to-generator": "^7.18.9", - "@babel/plugin-syntax-async-generators": "^7.8.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-class-properties": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-class-static-block": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-class-static-block": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.12.0" - } - }, - "node_modules/@babel/plugin-proposal-decorators": { - "version": "7.19.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.19.0", - "@babel/helper-plugin-utils": "^7.19.0", - "@babel/helper-replace-supers": "^7.19.1", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/plugin-syntax-decorators": "^7.19.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-dynamic-import": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-dynamic-import": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-export-namespace-from": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-json-strings": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-json-strings": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-logical-assignment-operators": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-nullish-coalescing-operator": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-numeric-separator": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-numeric-separator": "^7.10.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-object-rest-spread": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/compat-data": "^7.18.8", - "@babel/helper-compilation-targets": "^7.18.9", - "@babel/helper-plugin-utils": "^7.18.9", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.18.8" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-optional-catch-binding": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-optional-chaining": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9", - "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9", - "@babel/plugin-syntax-optional-chaining": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-private-methods": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-private-property-in-object": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-unicode-property-regex": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, "node_modules/@babel/plugin-syntax-async-generators": { "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", "dev": true, - "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1481,8 +695,9 @@ }, "node_modules/@babel/plugin-syntax-bigint": { "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" @@ -1493,8 +708,10 @@ }, "node_modules/@babel/plugin-syntax-class-properties": { "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", "dev": true, - "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.12.13" }, @@ -1502,88 +719,11 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-class-static-block": { - "version": "7.14.5", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-decorators": { - "version": "7.19.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.19.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-dynamic-import": { - "version": "7.8.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-export-namespace-from": { - "version": "7.8.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.3" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-flow": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-import-assertions": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, "node_modules/@babel/plugin-syntax-import-meta": { "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" @@ -1594,8 +734,10 @@ }, "node_modules/@babel/plugin-syntax-json-strings": { "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", "dev": true, - "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1604,11 +746,13 @@ } }, "node_modules/@babel/plugin-syntax-jsx": { - "version": "7.18.6", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.23.3.tgz", + "integrity": "sha512-EB2MELswq55OHUoRZLGg/zC7QWUKfNLpE57m/S2yr1uEneIgsTgrSzXP3NXEsMkVn76OlaVVnzN+ugObuYGwhg==", "dev": true, - "license": "MIT", + "peer": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-plugin-utils": "^7.22.5" }, "engines": { "node": ">=6.9.0" @@ -1619,8 +763,10 @@ }, "node_modules/@babel/plugin-syntax-logical-assignment-operators": { "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", "dev": true, - "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -1630,8 +776,10 @@ }, "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", "dev": true, - "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1641,8 +789,10 @@ }, "node_modules/@babel/plugin-syntax-numeric-separator": { "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", "dev": true, - "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -1652,8 +802,10 @@ }, "node_modules/@babel/plugin-syntax-object-rest-spread": { "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", "dev": true, - "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1663,8 +815,10 @@ }, "node_modules/@babel/plugin-syntax-optional-catch-binding": { "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", "dev": true, - "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1674,8 +828,10 @@ }, "node_modules/@babel/plugin-syntax-optional-chaining": { "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", "dev": true, - "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1683,24 +839,12 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-private-property-in-object": { - "version": "7.14.5", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, "node_modules/@babel/plugin-syntax-top-level-await": { "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", "dev": true, - "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.14.5" }, @@ -1712,428 +856,13 @@ } }, "node_modules/@babel/plugin-syntax-typescript": { - "version": "7.18.6", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.23.3.tgz", + "integrity": "sha512-9EiNjVJOMwCO+43TqoTrgQ8jMwcAd0sWyXi9RPfIsLTj4R2MADDDQXELhffaUx/uJv2AYcxBgPwH6j4TIA4ytQ==", "dev": true, - "license": "MIT", + "peer": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-arrow-functions": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-async-to-generator": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-remap-async-to-generator": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-block-scoped-functions": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-block-scoping": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-classes": { - "version": "7.19.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-compilation-targets": "^7.19.0", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/helper-plugin-utils": "^7.19.0", - "@babel/helper-replace-supers": "^7.18.9", - "@babel/helper-split-export-declaration": "^7.18.6", - "globals": "^11.1.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-classes/node_modules/globals": { - "version": "11.12.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/plugin-transform-computed-properties": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-destructuring": { - "version": "7.18.13", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-dotall-regex": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-duplicate-keys": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-exponentiation-operator": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-flow-strip-types": { - "version": "7.19.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.19.0", - "@babel/plugin-syntax-flow": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-for-of": { - "version": "7.18.8", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-function-name": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-compilation-targets": "^7.18.9", - "@babel/helper-function-name": "^7.18.9", - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-literals": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-member-expression-literals": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-amd": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-module-transforms": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6", - "babel-plugin-dynamic-import-node": "^2.3.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-commonjs": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-module-transforms": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-simple-access": "^7.18.6", - "babel-plugin-dynamic-import-node": "^2.3.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-systemjs": { - "version": "7.19.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-module-transforms": "^7.19.0", - "@babel/helper-plugin-utils": "^7.19.0", - "@babel/helper-validator-identifier": "^7.18.6", - "babel-plugin-dynamic-import-node": "^2.3.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-modules-umd": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-module-transforms": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.19.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.19.0", - "@babel/helper-plugin-utils": "^7.19.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" - } - }, - "node_modules/@babel/plugin-transform-new-target": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-object-super": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-replace-supers": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-parameters": { - "version": "7.18.8", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-property-literals": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-display-name": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-jsx": { - "version": "7.19.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-plugin-utils": "^7.19.0", - "@babel/plugin-syntax-jsx": "^7.18.6", - "@babel/types": "^7.19.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-react-jsx-development": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/plugin-transform-react-jsx": "^7.18.6" + "@babel/helper-plugin-utils": "^7.22.5" }, "engines": { "node": ">=6.9.0" @@ -2172,323 +901,6 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-react-pure-annotations": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-regenerator": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "regenerator-transform": "^0.15.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-reserved-words": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-runtime": { - "version": "7.19.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-plugin-utils": "^7.19.0", - "babel-plugin-polyfill-corejs2": "^0.3.3", - "babel-plugin-polyfill-corejs3": "^0.6.0", - "babel-plugin-polyfill-regenerator": "^0.4.1", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-shorthand-properties": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-spread": { - "version": "7.19.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.19.0", - "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-sticky-regex": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-template-literals": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-typeof-symbol": { - "version": "7.18.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-typescript": { - "version": "7.19.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.19.0", - "@babel/helper-plugin-utils": "^7.19.0", - "@babel/plugin-syntax-typescript": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-escapes": { - "version": "7.18.10", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-unicode-regex": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-env": { - "version": "7.19.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/compat-data": "^7.19.3", - "@babel/helper-compilation-targets": "^7.19.3", - "@babel/helper-plugin-utils": "^7.19.0", - "@babel/helper-validator-option": "^7.18.6", - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.18.6", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.18.9", - "@babel/plugin-proposal-async-generator-functions": "^7.19.1", - "@babel/plugin-proposal-class-properties": "^7.18.6", - "@babel/plugin-proposal-class-static-block": "^7.18.6", - "@babel/plugin-proposal-dynamic-import": "^7.18.6", - "@babel/plugin-proposal-export-namespace-from": "^7.18.9", - "@babel/plugin-proposal-json-strings": "^7.18.6", - "@babel/plugin-proposal-logical-assignment-operators": "^7.18.9", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.18.6", - "@babel/plugin-proposal-numeric-separator": "^7.18.6", - "@babel/plugin-proposal-object-rest-spread": "^7.18.9", - "@babel/plugin-proposal-optional-catch-binding": "^7.18.6", - "@babel/plugin-proposal-optional-chaining": "^7.18.9", - "@babel/plugin-proposal-private-methods": "^7.18.6", - "@babel/plugin-proposal-private-property-in-object": "^7.18.6", - "@babel/plugin-proposal-unicode-property-regex": "^7.18.6", - "@babel/plugin-syntax-async-generators": "^7.8.4", - "@babel/plugin-syntax-class-properties": "^7.12.13", - "@babel/plugin-syntax-class-static-block": "^7.14.5", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-import-assertions": "^7.18.6", - "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", - "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5", - "@babel/plugin-syntax-top-level-await": "^7.14.5", - "@babel/plugin-transform-arrow-functions": "^7.18.6", - "@babel/plugin-transform-async-to-generator": "^7.18.6", - "@babel/plugin-transform-block-scoped-functions": "^7.18.6", - "@babel/plugin-transform-block-scoping": "^7.18.9", - "@babel/plugin-transform-classes": "^7.19.0", - "@babel/plugin-transform-computed-properties": "^7.18.9", - "@babel/plugin-transform-destructuring": "^7.18.13", - "@babel/plugin-transform-dotall-regex": "^7.18.6", - "@babel/plugin-transform-duplicate-keys": "^7.18.9", - "@babel/plugin-transform-exponentiation-operator": "^7.18.6", - "@babel/plugin-transform-for-of": "^7.18.8", - "@babel/plugin-transform-function-name": "^7.18.9", - "@babel/plugin-transform-literals": "^7.18.9", - "@babel/plugin-transform-member-expression-literals": "^7.18.6", - "@babel/plugin-transform-modules-amd": "^7.18.6", - "@babel/plugin-transform-modules-commonjs": "^7.18.6", - "@babel/plugin-transform-modules-systemjs": "^7.19.0", - "@babel/plugin-transform-modules-umd": "^7.18.6", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.19.1", - "@babel/plugin-transform-new-target": "^7.18.6", - "@babel/plugin-transform-object-super": "^7.18.6", - "@babel/plugin-transform-parameters": "^7.18.8", - "@babel/plugin-transform-property-literals": "^7.18.6", - "@babel/plugin-transform-regenerator": "^7.18.6", - "@babel/plugin-transform-reserved-words": "^7.18.6", - "@babel/plugin-transform-shorthand-properties": "^7.18.6", - "@babel/plugin-transform-spread": "^7.19.0", - "@babel/plugin-transform-sticky-regex": "^7.18.6", - "@babel/plugin-transform-template-literals": "^7.18.9", - "@babel/plugin-transform-typeof-symbol": "^7.18.9", - "@babel/plugin-transform-unicode-escapes": "^7.18.10", - "@babel/plugin-transform-unicode-regex": "^7.18.6", - "@babel/preset-modules": "^0.1.5", - "@babel/types": "^7.19.3", - "babel-plugin-polyfill-corejs2": "^0.3.3", - "babel-plugin-polyfill-corejs3": "^0.6.0", - "babel-plugin-polyfill-regenerator": "^0.4.1", - "core-js-compat": "^3.25.1", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-modules": { - "version": "0.1.5", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", - "@babel/plugin-transform-dotall-regex": "^7.4.4", - "@babel/types": "^7.4.4", - "esutils": "^2.0.2" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-react": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-validator-option": "^7.18.6", - "@babel/plugin-transform-react-display-name": "^7.18.6", - "@babel/plugin-transform-react-jsx": "^7.18.6", - "@babel/plugin-transform-react-jsx-development": "^7.18.6", - "@babel/plugin-transform-react-pure-annotations": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/preset-typescript": { - "version": "7.18.6", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-validator-option": "^7.18.6", - "@babel/plugin-transform-typescript": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, "node_modules/@babel/runtime": { "version": "7.23.9", "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.23.9.tgz", @@ -2500,23 +912,6 @@ "node": ">=6.9.0" } }, - "node_modules/@babel/runtime-corejs3": { - "version": "7.19.1", - "dev": true, - "license": "MIT", - "dependencies": { - "core-js-pure": "^3.25.1", - "regenerator-runtime": "^0.13.4" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/runtime/node_modules/regenerator-runtime": { - "version": "0.14.1", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", - "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" - }, "node_modules/@babel/template": { "version": "7.23.9", "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.23.9.tgz", @@ -2554,8 +949,9 @@ }, "node_modules/@babel/traverse/node_modules/globals": { "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", "dev": true, - "license": "MIT", "engines": { "node": ">=4" } @@ -2576,8 +972,9 @@ }, "node_modules/@bcoe/v8-coverage": { "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", "dev": true, - "license": "MIT", "peer": true }, "node_modules/@codemirror/autocomplete": { @@ -2598,11 +995,12 @@ } }, "node_modules/@codemirror/commands": { - "version": "6.3.2", - "license": "MIT", + "version": "6.3.3", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.3.3.tgz", + "integrity": "sha512-dO4hcF0fGT9tu1Pj1D2PvGvxjeGkbC6RGcZw6Qs74TH+Ed1gw98jmUgd2axWvIZEqTeTuFrg1lEB1KV6cK9h1A==", "dependencies": { "@codemirror/language": "^6.0.0", - "@codemirror/state": "^6.2.0", + "@codemirror/state": "^6.4.0", "@codemirror/view": "^6.0.0", "@lezer/common": "^1.1.0" } @@ -2631,8 +1029,9 @@ } }, "node_modules/@codemirror/search": { - "version": "6.5.5", - "license": "MIT", + "version": "6.5.6", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.6.tgz", + "integrity": "sha512-rpMgcsh7o0GuCDUXKPvww+muLA1pDJaFrpq/CCHtpQJYz8xopu4D1hPcKRoDD0YlF8gZaqTNIRa4VRBWyhyy7Q==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -3080,6 +1479,28 @@ "url": "https://opencollective.com/eslint" } }, + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, "node_modules/@eslint/js": { "version": "8.56.0", "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.56.0.tgz", @@ -3151,10 +1572,33 @@ "node": ">=10.10.0" } }, + "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, "node_modules/@humanwhocodes/module-importer": { "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", "dev": true, - "license": "Apache-2.0", "engines": { "node": ">=12.22" }, @@ -3171,8 +1615,9 @@ }, "node_modules/@istanbuljs/load-nyc-config": { "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", "dev": true, - "license": "ISC", "peer": true, "dependencies": { "camelcase": "^5.3.1", @@ -3187,8 +1632,9 @@ }, "node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": { "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "sprintf-js": "~1.0.2" @@ -3196,8 +1642,9 @@ }, "node_modules/@istanbuljs/load-nyc-config/node_modules/find-up": { "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "locate-path": "^5.0.0", @@ -3209,8 +1656,9 @@ }, "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "argparse": "^1.0.7", @@ -3222,8 +1670,9 @@ }, "node_modules/@istanbuljs/load-nyc-config/node_modules/locate-path": { "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "p-locate": "^4.1.0" @@ -3234,8 +1683,9 @@ }, "node_modules/@istanbuljs/load-nyc-config/node_modules/p-limit": { "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "p-try": "^2.0.0" @@ -3249,8 +1699,9 @@ }, "node_modules/@istanbuljs/load-nyc-config/node_modules/p-locate": { "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "p-limit": "^2.2.0" @@ -3261,8 +1712,9 @@ }, "node_modules/@istanbuljs/load-nyc-config/node_modules/resolve-from": { "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -3270,79 +1722,65 @@ }, "node_modules/@istanbuljs/schema": { "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=8" } }, "node_modules/@jest/console": { - "version": "29.2.1", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", + "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/types": "^29.2.1", + "@jest/types": "^29.6.3", "@types/node": "*", "chalk": "^4.0.0", - "jest-message-util": "^29.2.1", - "jest-util": "^29.2.1", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", "slash": "^3.0.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@jest/console/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, "node_modules/@jest/core": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", + "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/console": "^29.2.1", - "@jest/reporters": "^29.2.2", - "@jest/test-result": "^29.2.1", - "@jest/transform": "^29.2.2", - "@jest/types": "^29.2.1", + "@jest/console": "^29.7.0", + "@jest/reporters": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", "@types/node": "*", "ansi-escapes": "^4.2.1", "chalk": "^4.0.0", "ci-info": "^3.2.0", "exit": "^0.1.2", "graceful-fs": "^4.2.9", - "jest-changed-files": "^29.2.0", - "jest-config": "^29.2.2", - "jest-haste-map": "^29.2.1", - "jest-message-util": "^29.2.1", - "jest-regex-util": "^29.2.0", - "jest-resolve": "^29.2.2", - "jest-resolve-dependencies": "^29.2.2", - "jest-runner": "^29.2.2", - "jest-runtime": "^29.2.2", - "jest-snapshot": "^29.2.2", - "jest-util": "^29.2.1", - "jest-validate": "^29.2.2", - "jest-watcher": "^29.2.2", + "jest-changed-files": "^29.7.0", + "jest-config": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-resolve-dependencies": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "jest-watcher": "^29.7.0", "micromatch": "^4.0.4", - "pretty-format": "^29.2.1", + "pretty-format": "^29.7.0", "slash": "^3.0.0", "strip-ansi": "^6.0.0" }, @@ -3358,285 +1796,95 @@ } } }, - "node_modules/@jest/core/node_modules/ansi-styles": { - "version": "5.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/@jest/core/node_modules/camelcase": { - "version": "6.3.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@jest/core/node_modules/jest-get-type": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/core/node_modules/jest-haste-map": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/graceful-fs": "^4.1.3", - "@types/node": "*", - "anymatch": "^3.0.3", - "fb-watchman": "^2.0.0", - "graceful-fs": "^4.2.9", - "jest-regex-util": "^29.2.0", - "jest-util": "^29.2.1", - "jest-worker": "^29.2.1", - "micromatch": "^4.0.4", - "walker": "^1.0.8" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - }, - "optionalDependencies": { - "fsevents": "^2.3.2" - } - }, - "node_modules/@jest/core/node_modules/jest-regex-util": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/core/node_modules/jest-resolve": { - "version": "29.2.2", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "chalk": "^4.0.0", - "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.2.1", - "jest-pnp-resolver": "^1.2.2", - "jest-util": "^29.2.1", - "jest-validate": "^29.2.2", - "resolve": "^1.20.0", - "resolve.exports": "^1.1.0", - "slash": "^3.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/core/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/core/node_modules/jest-validate": { - "version": "29.2.2", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "camelcase": "^6.2.0", - "chalk": "^4.0.0", - "jest-get-type": "^29.2.0", - "leven": "^3.1.0", - "pretty-format": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/core/node_modules/jest-worker": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@types/node": "*", - "jest-util": "^29.2.1", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/core/node_modules/pretty-format": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/schemas": "^29.0.0", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/core/node_modules/react-is": { - "version": "18.2.0", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/@jest/core/node_modules/supports-color": { - "version": "8.1.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, "node_modules/@jest/environment": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", + "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/fake-timers": "^29.2.2", - "@jest/types": "^29.2.1", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", "@types/node": "*", - "jest-mock": "^29.2.2" + "jest-mock": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/@jest/expect": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "expect": "^29.2.2", - "jest-snapshot": "^29.2.2" + "expect": "^29.7.0", + "jest-snapshot": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/@jest/expect-utils": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", "dev": true, - "license": "MIT", "dependencies": { - "jest-get-type": "^29.2.0" + "jest-get-type": "^29.6.3" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@jest/expect-utils/node_modules/jest-get-type": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, "node_modules/@jest/fake-timers": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", + "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/types": "^29.2.1", - "@sinonjs/fake-timers": "^9.1.2", + "@jest/types": "^29.6.3", + "@sinonjs/fake-timers": "^10.0.2", "@types/node": "*", - "jest-message-util": "^29.2.1", - "jest-mock": "^29.2.2", - "jest-util": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/fake-timers/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/@jest/globals": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", + "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/environment": "^29.2.2", - "@jest/expect": "^29.2.2", - "@jest/types": "^29.2.1", - "jest-mock": "^29.2.2" + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/types": "^29.6.3", + "jest-mock": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/@jest/reporters": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", + "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "@bcoe/v8-coverage": "^0.2.3", - "@jest/console": "^29.2.1", - "@jest/test-result": "^29.2.1", - "@jest/transform": "^29.2.2", - "@jest/types": "^29.2.1", - "@jridgewell/trace-mapping": "^0.3.15", + "@jest/console": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", "@types/node": "*", "chalk": "^4.0.0", "collect-v8-coverage": "^1.0.0", @@ -3644,13 +1892,13 @@ "glob": "^7.1.3", "graceful-fs": "^4.2.9", "istanbul-lib-coverage": "^3.0.0", - "istanbul-lib-instrument": "^5.1.0", + "istanbul-lib-instrument": "^6.0.0", "istanbul-lib-report": "^3.0.0", "istanbul-lib-source-maps": "^4.0.0", "istanbul-reports": "^3.1.3", - "jest-message-util": "^29.2.1", - "jest-util": "^29.2.1", - "jest-worker": "^29.2.1", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", "slash": "^3.0.0", "string-length": "^4.0.1", "strip-ansi": "^6.0.0", @@ -3668,71 +1916,26 @@ } } }, - "node_modules/@jest/reporters/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/reporters/node_modules/jest-worker": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@types/node": "*", - "jest-util": "^29.2.1", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/reporters/node_modules/supports-color": { - "version": "8.1.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, "node_modules/@jest/schemas": { - "version": "29.0.0", + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", "dev": true, - "license": "MIT", "dependencies": { - "@sinclair/typebox": "^0.24.1" + "@sinclair/typebox": "^0.27.8" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/@jest/source-map": { - "version": "29.2.0", + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", + "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jridgewell/trace-mapping": "^0.3.15", + "@jridgewell/trace-mapping": "^0.3.18", "callsites": "^3.0.0", "graceful-fs": "^4.2.9" }, @@ -3741,13 +1944,14 @@ } }, "node_modules/@jest/test-result": { - "version": "29.2.1", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", + "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/console": "^29.2.1", - "@jest/types": "^29.2.1", + "@jest/console": "^29.7.0", + "@jest/types": "^29.6.3", "@types/istanbul-lib-coverage": "^2.0.0", "collect-v8-coverage": "^1.0.0" }, @@ -3756,214 +1960,55 @@ } }, "node_modules/@jest/test-sequencer": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", + "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/test-result": "^29.2.1", + "@jest/test-result": "^29.7.0", "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.2.1", + "jest-haste-map": "^29.7.0", "slash": "^3.0.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@jest/test-sequencer/node_modules/jest-haste-map": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/graceful-fs": "^4.1.3", - "@types/node": "*", - "anymatch": "^3.0.3", - "fb-watchman": "^2.0.0", - "graceful-fs": "^4.2.9", - "jest-regex-util": "^29.2.0", - "jest-util": "^29.2.1", - "jest-worker": "^29.2.1", - "micromatch": "^4.0.4", - "walker": "^1.0.8" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - }, - "optionalDependencies": { - "fsevents": "^2.3.2" - } - }, - "node_modules/@jest/test-sequencer/node_modules/jest-regex-util": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/test-sequencer/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/test-sequencer/node_modules/jest-worker": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@types/node": "*", - "jest-util": "^29.2.1", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/test-sequencer/node_modules/supports-color": { - "version": "8.1.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, "node_modules/@jest/transform": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", + "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "@babel/core": "^7.11.6", - "@jest/types": "^29.2.1", - "@jridgewell/trace-mapping": "^0.3.15", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", "babel-plugin-istanbul": "^6.1.1", "chalk": "^4.0.0", - "convert-source-map": "^1.4.0", + "convert-source-map": "^2.0.0", "fast-json-stable-stringify": "^2.1.0", "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.2.1", - "jest-regex-util": "^29.2.0", - "jest-util": "^29.2.1", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", "micromatch": "^4.0.4", "pirates": "^4.0.4", "slash": "^3.0.0", - "write-file-atomic": "^4.0.1" + "write-file-atomic": "^4.0.2" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/@jest/transform/node_modules/jest-haste-map": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/graceful-fs": "^4.1.3", - "@types/node": "*", - "anymatch": "^3.0.3", - "fb-watchman": "^2.0.0", - "graceful-fs": "^4.2.9", - "jest-regex-util": "^29.2.0", - "jest-util": "^29.2.1", - "jest-worker": "^29.2.1", - "micromatch": "^4.0.4", - "walker": "^1.0.8" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - }, - "optionalDependencies": { - "fsevents": "^2.3.2" - } - }, - "node_modules/@jest/transform/node_modules/jest-regex-util": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/transform/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/transform/node_modules/jest-worker": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@types/node": "*", - "jest-util": "^29.2.1", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@jest/transform/node_modules/supports-color": { - "version": "8.1.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, "node_modules/@jest/types": { - "version": "29.2.1", + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", "dev": true, - "license": "MIT", "dependencies": { - "@jest/schemas": "^29.0.0", + "@jest/schemas": "^29.6.3", "@types/istanbul-lib-coverage": "^2.0.0", "@types/istanbul-reports": "^3.0.0", "@types/node": "*", @@ -3975,50 +2020,10 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/set-array": "^1.0.0", - "@jridgewell/sourcemap-codec": "^1.4.10" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/set-array": { - "version": "1.1.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/source-map": { - "version": "0.3.5", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@jridgewell/gen-mapping": "^0.3.0", - "@jridgewell/trace-mapping": "^0.3.9" - } - }, - "node_modules/@jridgewell/source-map/node_modules/@jridgewell/gen-mapping": { "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", + "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", "dev": true, - "license": "MIT", - "optional": true, - "peer": true, "dependencies": { "@jridgewell/set-array": "^1.0.1", "@jridgewell/sourcemap-codec": "^1.4.10", @@ -4028,15 +2033,35 @@ "node": ">=6.0.0" } }, - "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.14", + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", "dev": true, - "license": "MIT" + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", + "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", + "dev": true }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.19", + "version": "0.3.22", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.22.tgz", + "integrity": "sha512-Wf963MzWtA2sjrNt+g18IAln9lKnlRp+K2eH4jjIoF1wYeq3aMREpG09xhlhdzS0EjwU7qmUJYangWa+151vZw==", "dev": true, - "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" @@ -4048,11 +2073,12 @@ "integrity": "sha512-yemX0ZD2xS/73llMZIK6KplkjIjf2EvAHcinDi/TfJ9hS25G0388+ClHt6/3but0oOxinTcQHJLDXh6w1crzFQ==" }, "node_modules/@lezer/generator": { - "version": "1.5.1", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.6.0.tgz", + "integrity": "sha512-mDwFNchv4jBEoZBaZbr5GlKR7BM6W/ZanTOZN6p4avuzcmYHTZ0nIbwtBqvXoeBrqmFSvL2zHL5TX9FWkXKc2w==", "dev": true, - "license": "MIT", "dependencies": { - "@lezer/common": "^1.0.2", + "@lezer/common": "^1.1.0", "@lezer/lr": "^1.3.0" }, "bin": { @@ -4061,50 +2087,92 @@ }, "node_modules/@lezer/highlight": { "version": "1.2.0", - "license": "MIT", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.0.tgz", + "integrity": "sha512-WrS5Mw51sGrpqjlh3d4/fOwpEV2Hd3YOkp9DBt4k8XZQcoTHZFB7sx030A6OcahF4J1nDQAa3jXlTVVYH50IFA==", "dependencies": { "@lezer/common": "^1.0.0" } }, "node_modules/@lezer/lr": { - "version": "1.3.14", - "license": "MIT", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.0.tgz", + "integrity": "sha512-Wst46p51km8gH0ZUmeNrtpRYmdlRHUpN1DQd3GFAyKANi8WVz8c2jHYTf1CVScFaCjQw1iO3ZZdqGDxQPRErTg==", "dependencies": { "@lezer/common": "^1.0.0" } }, - "node_modules/@nicolo-ribaudo/eslint-scope-5-internals": { - "version": "5.1.1-v1", - "dev": true, - "license": "MIT", + "node_modules/@mantine/code-highlight": { + "version": "7.5.3", + "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-7.5.3.tgz", + "integrity": "sha512-TLZSkVAfX3KH9XKjJl965KX6TjpMKtNzObjI6Uvo/J/5Rvqhe7xbhBPJDT7yhSD+wjnTMsEWEb68rmQa3M/cEA==", "dependencies": { - "eslint-scope": "5.1.1" - } - }, - "node_modules/@nicolo-ribaudo/eslint-scope-5-internals/node_modules/eslint-scope": { - "version": "5.1.1", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" + "clsx": "2.0.0", + "highlight.js": "^11.9.0" }, - "engines": { - "node": ">=8.0.0" + "peerDependencies": { + "@mantine/core": "7.5.3", + "@mantine/hooks": "7.5.3", + "react": "^18.2.0", + "react-dom": "^18.2.0" } }, - "node_modules/@nicolo-ribaudo/eslint-scope-5-internals/node_modules/estraverse": { - "version": "4.3.0", - "dev": true, - "license": "BSD-2-Clause", + "node_modules/@mantine/core": { + "version": "7.5.3", + "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.5.3.tgz", + "integrity": "sha512-Wvv6DJXI+GX9mmKG5HITTh/24sCZ0RoYQHdTHh0tOfGnEy+RleyhA82UjnMsp0n2NjfCISBwbiKgfya6b2iaFw==", + "dependencies": { + "@floating-ui/react": "^0.24.8", + "clsx": "2.0.0", + "react-number-format": "^5.3.1", + "react-remove-scroll": "^2.5.7", + "react-textarea-autosize": "8.5.3", + "type-fest": "^3.13.1" + }, + "peerDependencies": { + "@mantine/hooks": "7.5.3", + "react": "^18.2.0", + "react-dom": "^18.2.0" + } + }, + "node_modules/@mantine/core/node_modules/type-fest": { + "version": "3.13.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-3.13.1.tgz", + "integrity": "sha512-tLq3bSNx+xSpwvAJnzrK0Ep5CLNWjvFTOp71URMaAEWBfRb9nnJiBoUe0tF8bI4ZFO3omgBR6NvnbzVUT3Ly4g==", "engines": { - "node": ">=4.0" + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@mantine/dates": { + "version": "7.5.3", + "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-7.5.3.tgz", + "integrity": "sha512-v6fFdW+7HAd7XsZFMJVMuFE2RHbQAVnsUNeP0/5h+H4qEj0soTmMvHPP8wXEed5v85r9CcEMGOGq1n6RFRpWHA==", + "dependencies": { + "clsx": "2.0.0" + }, + "peerDependencies": { + "@mantine/core": "7.5.3", + "@mantine/hooks": "7.5.3", + "dayjs": ">=1.0.0", + "react": "^18.2.0", + "react-dom": "^18.2.0" + } + }, + "node_modules/@mantine/hooks": { + "version": "7.5.3", + "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.5.3.tgz", + "integrity": "sha512-mFI448mAs12v8FrgSVhytqlhTVrEjIfd/PqPEfwJu5YcZIq4YZdqpzJIUbANnRrFSvmoQpDb1PssdKx7Ds35hw==", + "peerDependencies": { + "react": "^18.2.0" } }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", "dev": true, - "license": "MIT", "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" @@ -4115,16 +2183,18 @@ }, "node_modules/@nodelib/fs.stat": { "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", "dev": true, - "license": "MIT", "engines": { "node": ">= 8" } }, "node_modules/@nodelib/fs.walk": { "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", "dev": true, - "license": "MIT", "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" @@ -4369,32 +2439,30 @@ "win32" ] }, - "node_modules/@rushstack/eslint-patch": { - "version": "1.2.0", - "dev": true, - "license": "MIT" - }, "node_modules/@sinclair/typebox": { - "version": "0.24.51", - "dev": true, - "license": "MIT" + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true }, "node_modules/@sinonjs/commons": { - "version": "1.8.3", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz", + "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==", "dev": true, - "license": "BSD-3-Clause", "peer": true, "dependencies": { "type-detect": "4.0.8" } }, "node_modules/@sinonjs/fake-timers": { - "version": "9.1.2", + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", + "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", "dev": true, - "license": "BSD-3-Clause", "peer": true, "dependencies": { - "@sinonjs/commons": "^1.7.0" + "@sinonjs/commons": "^3.0.0" } }, "node_modules/@tabler/icons": { @@ -4431,6 +2499,21 @@ "url": "https://github.com/sponsors/tannerlinsley" } }, + "node_modules/@tanstack/react-query": { + "version": "5.22.2", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.22.2.tgz", + "integrity": "sha512-TaxJDRzJ8/NWRT4lY2jguKCrNI6MRN+67dELzPjNUlvqzTxGANlMp68l7aC7hG8Bd1uHNxHl7ihv7MT50i/43A==", + "dependencies": { + "@tanstack/query-core": "5.22.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^18.0.0" + } + }, "node_modules/@types/babel__core": { "version": "7.20.5", "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", @@ -4445,38 +2528,31 @@ } }, "node_modules/@types/babel__generator": { - "version": "7.6.4", + "version": "7.6.8", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.8.tgz", + "integrity": "sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==", "dev": true, - "license": "MIT", "dependencies": { "@babel/types": "^7.0.0" } }, "node_modules/@types/babel__template": { - "version": "7.4.1", + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", "dev": true, - "license": "MIT", "dependencies": { "@babel/parser": "^7.1.0", "@babel/types": "^7.0.0" } }, "node_modules/@types/babel__traverse": { - "version": "7.18.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/types": "^7.3.0" - } - }, - "node_modules/@types/eslint": { - "version": "8.56.2", - "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.56.2.tgz", - "integrity": "sha512-uQDwm1wFHmbBbCZCqAlq6Do9LYwByNZHWzXppSnay9SuwJ+VRbjkbLABer54kcPnMSlG6Fdiy2yaFXm/z9Z5gw==", + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.5.tgz", + "integrity": "sha512-WXCyOcRtH37HAUkpXhUduaxdm82b4GSlyTqajXviN4EfiuPgNYR109xMCKvpl6zPIpua0DGlMEDCq+g8EdoheQ==", "dev": true, "dependencies": { - "@types/estree": "*", - "@types/json-schema": "*" + "@babel/types": "^7.20.7" } }, "node_modules/@types/estree": { @@ -4486,119 +2562,90 @@ "dev": true }, "node_modules/@types/graceful-fs": { - "version": "4.1.5", + "version": "4.1.9", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", + "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "@types/node": "*" } }, "node_modules/@types/istanbul-lib-coverage": { - "version": "2.0.4", - "dev": true, - "license": "MIT" + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "dev": true }, "node_modules/@types/istanbul-lib-report": { - "version": "3.0.0", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", "dev": true, - "license": "MIT", "dependencies": { "@types/istanbul-lib-coverage": "*" } }, "node_modules/@types/istanbul-reports": { - "version": "3.0.1", + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", "dev": true, - "license": "MIT", "dependencies": { "@types/istanbul-lib-report": "*" } }, "node_modules/@types/jest": { - "version": "29.5.11", + "version": "29.5.12", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.12.tgz", + "integrity": "sha512-eDC8bTvT/QhYdxJAulQikueigY5AsdBRH2yDKW3yveW7svY3+DzN84/2NUgkw10RTiJbWqZrTtoGVdYlvFJdLw==", "dev": true, - "license": "MIT", "dependencies": { "expect": "^29.0.0", "pretty-format": "^29.0.0" } }, - "node_modules/@types/jest/node_modules/ansi-styles": { - "version": "5.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/@types/jest/node_modules/pretty-format": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/schemas": "^29.0.0", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/@types/jest/node_modules/react-is": { - "version": "18.2.0", - "dev": true, - "license": "MIT" - }, "node_modules/@types/json-schema": { "version": "7.0.15", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", "dev": true }, - "node_modules/@types/json5": { - "version": "0.0.29", - "dev": true, - "license": "MIT" - }, "node_modules/@types/node": { - "version": "20.10.4", + "version": "20.11.19", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.19.tgz", + "integrity": "sha512-7xMnVEcZFu0DikYjWOlRq7NTPETrm7teqUT2WkQjrTIkEgUyyGdWsj/Zg8bEJt5TNklzbPD1X3fqfsHw3SpapQ==", "dev": true, - "license": "MIT", "dependencies": { "undici-types": "~5.26.4" } }, - "node_modules/@types/parse-json": { - "version": "4.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/prettier": { - "version": "2.7.1", - "dev": true, - "license": "MIT", - "peer": true - }, "node_modules/@types/prop-types": { - "version": "15.7.5", - "devOptional": true, - "license": "MIT" + "version": "15.7.11", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.11.tgz", + "integrity": "sha512-ga8y9v9uyeiLdpKddhxYQkxNDrfvuPrlFb0N1qnZZByvcElJaXthF1UhvCh9TLWJBEHeNtdnbysW7Y6Uq8CVng==", + "devOptional": true }, "node_modules/@types/react": { - "version": "17.0.71", - "license": "MIT", - "optional": true, - "peer": true, + "version": "18.2.57", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.57.tgz", + "integrity": "sha512-ZvQsktJgSYrQiMirAN60y4O/LRevIV8hUzSOSNB6gfR3/o3wCBFQx3sPwIYtuDMeiVgsSS3UzCV26tEzgnfvQw==", + "devOptional": true, "dependencies": { "@types/prop-types": "*", "@types/scheduler": "*", "csstype": "^3.0.2" } }, + "node_modules/@types/react-dom": { + "version": "18.2.19", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.19.tgz", + "integrity": "sha512-aZvQL6uUbIJpjZk4U8JZGbau9KDeAwMfmhyWorxgBkqDIEf6ROjRozcmPIicqsUwPUjbkDfHKgGee1Lq65APcA==", + "dev": true, + "dependencies": { + "@types/react": "*" + } + }, "node_modules/@types/resolve": { "version": "1.20.2", "resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.20.2.tgz", @@ -4606,9 +2653,10 @@ "dev": true }, "node_modules/@types/scheduler": { - "version": "0.16.2", - "devOptional": true, - "license": "MIT" + "version": "0.16.8", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.8.tgz", + "integrity": "sha512-WZLiwShhwLRmeV6zH+GkbOFT6Z6VklCItrDioxUnv+u4Ll+8vKeFySoFyK/0ctcRpOmwAicELfmys1sDc/Rw+A==", + "devOptional": true }, "node_modules/@types/semver": { "version": "7.5.7", @@ -4617,47 +2665,54 @@ "dev": true }, "node_modules/@types/stack-utils": { - "version": "2.0.1", - "dev": true, - "license": "MIT" + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", + "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", + "dev": true }, "node_modules/@types/yargs": { - "version": "17.0.13", + "version": "17.0.32", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.32.tgz", + "integrity": "sha512-xQ67Yc/laOG5uMfX/093MRlGGCIBzZMarVa+gfNKJxWAIgykYpVGkBdbqEzGDDfCrVUj6Hiff4mTZ5BA6TmAog==", "dev": true, - "license": "MIT", "dependencies": { "@types/yargs-parser": "*" } }, "node_modules/@types/yargs-parser": { - "version": "21.0.0", - "dev": true, - "license": "MIT" + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "dev": true }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "5.38.1", + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz", + "integrity": "sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==", "dev": true, - "license": "MIT", "dependencies": { - "@typescript-eslint/scope-manager": "5.38.1", - "@typescript-eslint/type-utils": "5.38.1", - "@typescript-eslint/utils": "5.38.1", + "@eslint-community/regexpp": "^4.5.1", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/type-utils": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", "debug": "^4.3.4", - "ignore": "^5.2.0", - "regexpp": "^3.2.0", - "semver": "^7.3.7", - "tsutils": "^3.21.0" + "graphemer": "^1.4.0", + "ignore": "^5.2.4", + "natural-compare": "^1.4.0", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^16.0.0 || >=18.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "@typescript-eslint/parser": "^5.0.0", - "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha", + "eslint": "^7.0.0 || ^8.0.0" }, "peerDependenciesMeta": { "typescript": { @@ -4665,57 +2720,27 @@ } } }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/semver": { - "version": "7.5.4", - "dev": true, - "license": "ISC", - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@typescript-eslint/experimental-utils": { - "version": "5.38.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/utils": "5.38.1" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" - } - }, "node_modules/@typescript-eslint/parser": { - "version": "5.38.1", + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.21.0.tgz", + "integrity": "sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { - "@typescript-eslint/scope-manager": "5.38.1", - "@typescript-eslint/types": "5.38.1", - "@typescript-eslint/typescript-estree": "5.38.1", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", "debug": "^4.3.4" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^16.0.0 || >=18.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + "eslint": "^7.0.0 || ^8.0.0" }, "peerDependenciesMeta": { "typescript": { @@ -4724,15 +2749,16 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "5.38.1", + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz", + "integrity": "sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==", "dev": true, - "license": "MIT", "dependencies": { - "@typescript-eslint/types": "5.38.1", - "@typescript-eslint/visitor-keys": "5.38.1" + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^16.0.0 || >=18.0.0" }, "funding": { "type": "opencollective", @@ -4740,24 +2766,25 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "5.38.1", + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", + "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", "dev": true, - "license": "MIT", "dependencies": { - "@typescript-eslint/typescript-estree": "5.38.1", - "@typescript-eslint/utils": "5.38.1", + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/utils": "6.21.0", "debug": "^4.3.4", - "tsutils": "^3.21.0" + "ts-api-utils": "^1.0.1" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^16.0.0 || >=18.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "eslint": "*" + "eslint": "^7.0.0 || ^8.0.0" }, "peerDependenciesMeta": { "typescript": { @@ -4766,11 +2793,12 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "5.38.1", + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", + "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", "dev": true, - "license": "MIT", "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^16.0.0 || >=18.0.0" }, "funding": { "type": "opencollective", @@ -4778,20 +2806,22 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "5.38.1", + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", + "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { - "@typescript-eslint/types": "5.38.1", - "@typescript-eslint/visitor-keys": "5.38.1", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", "debug": "^4.3.4", "globby": "^11.1.0", "is-glob": "^4.0.3", - "semver": "^7.3.7", - "tsutils": "^3.21.0" + "minimatch": "9.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^16.0.0 || >=18.0.0" }, "funding": { "type": "opencollective", @@ -4803,73 +2833,42 @@ } } }, - "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { - "version": "7.5.4", - "dev": true, - "license": "ISC", - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/@typescript-eslint/utils": { - "version": "5.38.1", + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", + "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", "dev": true, - "license": "MIT", "dependencies": { - "@types/json-schema": "^7.0.9", - "@typescript-eslint/scope-manager": "5.38.1", - "@typescript-eslint/types": "5.38.1", - "@typescript-eslint/typescript-estree": "5.38.1", - "eslint-scope": "^5.1.1", - "eslint-utils": "^3.0.0" + "@eslint-community/eslint-utils": "^4.4.0", + "@types/json-schema": "^7.0.12", + "@types/semver": "^7.5.0", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "semver": "^7.5.4" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^16.0.0 || >=18.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/eslint-scope": { - "version": "5.1.1", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/estraverse": { - "version": "4.3.0", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=4.0" + "eslint": "^7.0.0 || ^8.0.0" } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "5.38.1", + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz", + "integrity": "sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==", "dev": true, - "license": "MIT", "dependencies": { - "@typescript-eslint/types": "5.38.1", - "eslint-visitor-keys": "^3.3.0" + "@typescript-eslint/types": "6.21.0", + "eslint-visitor-keys": "^3.4.1" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^16.0.0 || >=18.0.0" }, "funding": { "type": "opencollective", @@ -4952,19 +2951,11 @@ "vite": "^4.2.0 || ^5.0.0" } }, - "node_modules/@vitejs/plugin-react/node_modules/react-refresh": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.14.0.tgz", - "integrity": "sha512-wViHqhAd8OHeLS/IRMJjTSDHF3U9eWi62F/MledQGPdJGDhodXJ9PBLNGr6WWL7qlH12Mt3TyTpbS+hGXMjCzQ==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/acorn": { - "version": "8.10.0", + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", "dev": true, - "license": "MIT", "bin": { "acorn": "bin/acorn" }, @@ -4983,8 +2974,9 @@ }, "node_modules/ajv": { "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dev": true, - "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -4998,8 +2990,9 @@ }, "node_modules/ansi-escapes": { "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "type-fest": "^0.21.3" @@ -5013,8 +3006,9 @@ }, "node_modules/ansi-escapes/node_modules/type-fest": { "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", "dev": true, - "license": "(MIT OR CC0-1.0)", "peer": true, "engines": { "node": ">=10" @@ -5025,16 +3019,18 @@ }, "node_modules/ansi-regex": { "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/ansi-styles": { "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, - "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, @@ -5046,9 +3042,10 @@ } }, "node_modules/anymatch": { - "version": "3.1.2", + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", "dev": true, - "license": "ISC", "peer": true, "dependencies": { "normalize-path": "^3.0.0", @@ -5075,106 +3072,26 @@ "node": ">=10" } }, - "node_modules/aria-query": { - "version": "4.2.2", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "@babel/runtime": "^7.10.2", - "@babel/runtime-corejs3": "^7.10.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/array-includes": { - "version": "3.1.5", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.19.5", - "get-intrinsic": "^1.1.1", - "is-string": "^1.0.7" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/array-union": { "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } }, - "node_modules/array.prototype.flat": { - "version": "1.3.0", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.2", - "es-shim-unscopables": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/array.prototype.flatmap": { - "version": "1.3.0", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.2", - "es-shim-unscopables": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/ast-types-flow": { - "version": "0.0.7", - "dev": true, - "license": "ISC" - }, - "node_modules/axe-core": { - "version": "4.4.3", - "dev": true, - "license": "MPL-2.0", - "engines": { - "node": ">=4" - } - }, - "node_modules/axobject-query": { - "version": "2.2.0", - "dev": true, - "license": "Apache-2.0" - }, "node_modules/babel-jest": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", + "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/transform": "^29.2.2", + "@jest/transform": "^29.7.0", "@types/babel__core": "^7.1.14", "babel-plugin-istanbul": "^6.1.1", - "babel-preset-jest": "^29.2.0", + "babel-preset-jest": "^29.6.3", "chalk": "^4.0.0", "graceful-fs": "^4.2.9", "slash": "^3.0.0" @@ -5186,18 +3103,11 @@ "@babel/core": "^7.8.0" } }, - "node_modules/babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "dev": true, - "license": "MIT", - "dependencies": { - "object.assign": "^4.1.0" - } - }, "node_modules/babel-plugin-istanbul": { "version": "6.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", "dev": true, - "license": "BSD-3-Clause", "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.0.0", @@ -5210,10 +3120,38 @@ "node": ">=8" } }, - "node_modules/babel-plugin-jest-hoist": { - "version": "29.2.0", + "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "peer": true, + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "peer": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", + "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "@babel/template": "^7.3.3", @@ -5225,65 +3163,11 @@ "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/babel-plugin-macros": { - "version": "3.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.12.5", - "cosmiconfig": "^7.0.0", - "resolve": "^1.19.0" - }, - "engines": { - "node": ">=10", - "npm": ">=6" - } - }, - "node_modules/babel-plugin-polyfill-corejs2": { - "version": "0.3.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/compat-data": "^7.17.7", - "@babel/helper-define-polyfill-provider": "^0.3.3", - "semver": "^6.1.1" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/babel-plugin-polyfill-corejs3": { - "version": "0.6.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.3.3", - "core-js-compat": "^3.25.1" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/babel-plugin-polyfill-regenerator": { - "version": "0.4.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.3.3" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/babel-plugin-transform-react-remove-prop-types": { - "version": "0.4.24", - "dev": true, - "license": "MIT" - }, "node_modules/babel-preset-current-node-syntax": { "version": "1.0.1", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.0.1.tgz", + "integrity": "sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "@babel/plugin-syntax-async-generators": "^7.8.4", @@ -5304,12 +3188,13 @@ } }, "node_modules/babel-preset-jest": { - "version": "29.2.0", + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", + "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "babel-plugin-jest-hoist": "^29.2.0", + "babel-plugin-jest-hoist": "^29.6.3", "babel-preset-current-node-syntax": "^1.0.0" }, "engines": { @@ -5319,57 +3204,26 @@ "@babel/core": "^7.0.0" } }, - "node_modules/babel-preset-react-app": { - "version": "10.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/core": "^7.16.0", - "@babel/plugin-proposal-class-properties": "^7.16.0", - "@babel/plugin-proposal-decorators": "^7.16.4", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.0", - "@babel/plugin-proposal-numeric-separator": "^7.16.0", - "@babel/plugin-proposal-optional-chaining": "^7.16.0", - "@babel/plugin-proposal-private-methods": "^7.16.0", - "@babel/plugin-transform-flow-strip-types": "^7.16.0", - "@babel/plugin-transform-react-display-name": "^7.16.0", - "@babel/plugin-transform-runtime": "^7.16.4", - "@babel/preset-env": "^7.16.4", - "@babel/preset-react": "^7.16.0", - "@babel/preset-typescript": "^7.16.0", - "@babel/runtime": "^7.16.3", - "babel-plugin-macros": "^3.1.0", - "babel-plugin-transform-react-remove-prop-types": "^0.4.24" - } - }, "node_modules/balanced-match": { "version": "1.0.2", - "dev": true, - "license": "MIT" - }, - "node_modules/binary-extensions": { - "version": "2.2.0", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "engines": { - "node": ">=8" - } + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true }, "node_modules/brace-expansion": { - "version": "1.1.11", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "dev": true, - "license": "MIT", "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" + "balanced-match": "^1.0.0" } }, "node_modules/braces": { "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", "dev": true, - "license": "MIT", "dependencies": { "fill-range": "^7.0.1" }, @@ -5411,8 +3265,9 @@ }, "node_modules/bs-logger": { "version": "0.2.6", + "resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz", + "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==", "dev": true, - "license": "MIT", "dependencies": { "fast-json-stable-stringify": "2.x" }, @@ -5422,8 +3277,9 @@ }, "node_modules/bser": { "version": "2.1.1", + "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", "dev": true, - "license": "Apache-2.0", "peer": true, "dependencies": { "node-int64": "^0.4.0" @@ -5431,8 +3287,9 @@ }, "node_modules/buffer-from": { "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", "dev": true, - "license": "MIT", "peer": true }, "node_modules/builtin-modules": { @@ -5447,30 +3304,20 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/call-bind": { - "version": "1.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/callsites": { "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/camelcase": { "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=6" @@ -5478,8 +3325,9 @@ }, "node_modules/camelcase-css": { "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", "dev": true, - "license": "MIT", "engines": { "node": ">= 6" } @@ -5506,8 +3354,9 @@ }, "node_modules/chalk": { "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, - "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -5521,69 +3370,41 @@ }, "node_modules/char-regex": { "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=10" } }, - "node_modules/chokidar": { - "version": "3.5.3", + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", "dev": true, "funding": [ { - "type": "individual", - "url": "https://paulmillr.com/funding/" + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" } ], - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" - }, "engines": { - "node": ">= 8.10.0" - }, - "optionalDependencies": { - "fsevents": "~2.3.2" + "node": ">=8" } }, - "node_modules/chokidar/node_modules/glob-parent": { - "version": "5.1.2", - "dev": true, - "license": "ISC", - "optional": true, - "peer": true, - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/ci-info": { - "version": "3.4.0", - "dev": true, - "license": "MIT" - }, "node_modules/cjs-module-lexer": { - "version": "1.2.2", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.3.tgz", + "integrity": "sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ==", "dev": true, - "license": "MIT", "peer": true }, "node_modules/cliui": { "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", "dev": true, - "license": "ISC", "peer": true, "dependencies": { "string-width": "^4.2.0", @@ -5604,8 +3425,9 @@ }, "node_modules/co": { "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", "dev": true, - "license": "MIT", "peer": true, "engines": { "iojs": ">= 1.0.0", @@ -5627,15 +3449,17 @@ } }, "node_modules/collect-v8-coverage": { - "version": "1.0.1", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz", + "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", "dev": true, - "license": "MIT", "peer": true }, "node_modules/color-convert": { "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dev": true, - "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, @@ -5645,81 +3469,54 @@ }, "node_modules/color-name": { "version": "1.1.4", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true }, "node_modules/concat-map": { "version": "0.0.1", - "dev": true, - "license": "MIT" - }, - "node_modules/confusing-browser-globals": { - "version": "1.0.11", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true }, "node_modules/convert-source-map": { - "version": "1.8.0", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true + }, + "node_modules/create-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", + "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "safe-buffer": "~5.1.1" - } - }, - "node_modules/core-js-compat": { - "version": "3.25.4", - "dev": true, - "license": "MIT", - "dependencies": { - "browserslist": "^4.21.4" + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "prompts": "^2.0.1" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, - "node_modules/core-js-pure": { - "version": "3.25.4", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/core-js" - } - }, - "node_modules/cosmiconfig": { - "version": "7.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.2.1", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.10.0" + "bin": { + "create-jest": "bin/create-jest.js" }, "engines": { - "node": ">=10" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/crelt": { - "version": "1.0.5", - "license": "MIT" - }, - "node_modules/cross-fetch": { - "version": "3.1.5", - "dev": true, - "license": "MIT", - "dependencies": { - "node-fetch": "2.6.7" - } + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/crelt/-/crelt-1.0.6.tgz", + "integrity": "sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==" }, "node_modules/cross-spawn": { "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", "dev": true, - "license": "MIT", "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -5731,8 +3528,9 @@ }, "node_modules/cssesc": { "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", "dev": true, - "license": "MIT", "bin": { "cssesc": "bin/cssesc" }, @@ -5740,20 +3538,11 @@ "node": ">=4" } }, - "node_modules/cssfontparser": { - "version": "1.2.1", - "dev": true, - "license": "MIT" - }, "node_modules/csstype": { - "version": "3.1.1", - "devOptional": true, - "license": "MIT" - }, - "node_modules/damerau-levenshtein": { - "version": "1.0.8", - "dev": true, - "license": "BSD-2-Clause" + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "devOptional": true }, "node_modules/dayjs": { "version": "1.11.10", @@ -5762,8 +3551,9 @@ }, "node_modules/debug": { "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", "dev": true, - "license": "MIT", "dependencies": { "ms": "2.1.2" }, @@ -5777,43 +3567,40 @@ } }, "node_modules/dedent": { - "version": "0.7.0", + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.1.tgz", + "integrity": "sha512-+LxW+KLWxu3HW3M2w2ympwtqPrqYRzU8fqi6Fhd18fBALe15blJPI/I4+UHveMVG6lJqB4JNd4UG0S5cnVHwIg==", "dev": true, - "license": "MIT", - "peer": true + "peer": true, + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } }, "node_modules/deep-is": { "version": "0.1.4", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true }, "node_modules/deepmerge": { - "version": "4.2.2", + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.10.0" } }, - "node_modules/define-properties": { - "version": "1.1.4", - "dev": true, - "license": "MIT", - "dependencies": { - "has-property-descriptors": "^1.0.0", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/detect-newline": { "version": "3.1.0", + "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -5824,10 +3611,20 @@ "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==" }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, "node_modules/dir-glob": { "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", "dev": true, - "license": "MIT", "dependencies": { "path-type": "^4.0.0" }, @@ -5837,8 +3634,9 @@ }, "node_modules/doctrine": { "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", "dev": true, - "license": "Apache-2.0", "dependencies": { "esutils": "^2.0.2" }, @@ -5847,15 +3645,16 @@ } }, "node_modules/electron-to-chromium": { - "version": "1.4.677", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.677.tgz", - "integrity": "sha512-erDa3CaDzwJOpyvfKhOiJjBVNnMM0qxHq47RheVVwsSQrgBA9ZSGV9kdaOfZDPXcHzhG7lBxhj6A7KvfLJBd6Q==", + "version": "1.4.678", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.678.tgz", + "integrity": "sha512-NbdGC2p0O5Q5iVhLEsNBSfytaw7wbEFJlIvaF71wi6QDtLAph5/rVogjyOpf/QggJIt8hNK3KdwNJnc2bzckbw==", "dev": true }, "node_modules/emittery": { "version": "0.13.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", + "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=12" @@ -5865,79 +3664,22 @@ } }, "node_modules/emoji-regex": { - "version": "9.2.2", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", "dev": true, - "license": "MIT" + "peer": true }, "node_modules/error-ex": { "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", "dev": true, - "license": "MIT", + "peer": true, "dependencies": { "is-arrayish": "^0.2.1" } }, - "node_modules/es-abstract": { - "version": "1.20.3", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "function.prototype.name": "^1.1.5", - "get-intrinsic": "^1.1.3", - "get-symbol-description": "^1.0.0", - "has": "^1.0.3", - "has-property-descriptors": "^1.0.0", - "has-symbols": "^1.0.3", - "internal-slot": "^1.0.3", - "is-callable": "^1.2.6", - "is-negative-zero": "^2.0.2", - "is-regex": "^1.1.4", - "is-shared-array-buffer": "^1.0.2", - "is-string": "^1.0.7", - "is-weakref": "^1.0.2", - "object-inspect": "^1.12.2", - "object-keys": "^1.1.1", - "object.assign": "^4.1.4", - "regexp.prototype.flags": "^1.4.3", - "safe-regex-test": "^1.0.0", - "string.prototype.trimend": "^1.0.5", - "string.prototype.trimstart": "^1.0.5", - "unbox-primitive": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/es-shim-unscopables": { - "version": "1.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "has": "^1.0.3" - } - }, - "node_modules/es-to-primitive": { - "version": "1.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "is-callable": "^1.1.4", - "is-date-object": "^1.0.1", - "is-symbol": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/esbuild": { "version": "0.19.12", "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.12.tgz", @@ -5977,17 +3719,19 @@ } }, "node_modules/escalade": { - "version": "3.1.1", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/escape-string-regexp": { "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", "dev": true, - "license": "MIT", "engines": { "node": ">=10" }, @@ -6050,252 +3794,11 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint-config-prettier": { - "version": "8.10.0", - "dev": true, - "license": "MIT", - "bin": { - "eslint-config-prettier": "bin/cli.js" - }, - "peerDependencies": { - "eslint": ">=7.0.0" - } - }, - "node_modules/eslint-config-react-app": { - "version": "7.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/core": "^7.16.0", - "@babel/eslint-parser": "^7.16.3", - "@rushstack/eslint-patch": "^1.1.0", - "@typescript-eslint/eslint-plugin": "^5.5.0", - "@typescript-eslint/parser": "^5.5.0", - "babel-preset-react-app": "^10.0.1", - "confusing-browser-globals": "^1.0.11", - "eslint-plugin-flowtype": "^8.0.3", - "eslint-plugin-import": "^2.25.3", - "eslint-plugin-jest": "^25.3.0", - "eslint-plugin-jsx-a11y": "^6.5.1", - "eslint-plugin-react": "^7.27.1", - "eslint-plugin-react-hooks": "^4.3.0", - "eslint-plugin-testing-library": "^5.0.1" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "eslint": "^8.0.0" - } - }, - "node_modules/eslint-import-resolver-node": { - "version": "0.3.6", - "dev": true, - "license": "MIT", - "dependencies": { - "debug": "^3.2.7", - "resolve": "^1.20.0" - } - }, - "node_modules/eslint-import-resolver-node/node_modules/debug": { - "version": "3.2.7", - "dev": true, - "license": "MIT", - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/eslint-module-utils": { - "version": "2.7.4", - "dev": true, - "license": "MIT", - "dependencies": { - "debug": "^3.2.7" - }, - "engines": { - "node": ">=4" - }, - "peerDependenciesMeta": { - "eslint": { - "optional": true - } - } - }, - "node_modules/eslint-module-utils/node_modules/debug": { - "version": "3.2.7", - "dev": true, - "license": "MIT", - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/eslint-plugin-flowtype": { - "version": "8.0.3", - "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "lodash": "^4.17.21", - "string-natural-compare": "^3.0.1" - }, - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "@babel/plugin-syntax-flow": "^7.14.5", - "@babel/plugin-transform-react-jsx": "^7.14.9", - "eslint": "^8.1.0" - } - }, - "node_modules/eslint-plugin-import": { - "version": "2.26.0", - "dev": true, - "license": "MIT", - "dependencies": { - "array-includes": "^3.1.4", - "array.prototype.flat": "^1.2.5", - "debug": "^2.6.9", - "doctrine": "^2.1.0", - "eslint-import-resolver-node": "^0.3.6", - "eslint-module-utils": "^2.7.3", - "has": "^1.0.3", - "is-core-module": "^2.8.1", - "is-glob": "^4.0.3", - "minimatch": "^3.1.2", - "object.values": "^1.1.5", - "resolve": "^1.22.0", - "tsconfig-paths": "^3.14.1" - }, - "engines": { - "node": ">=4" - }, - "peerDependencies": { - "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8" - } - }, - "node_modules/eslint-plugin-import/node_modules/debug": { - "version": "2.6.9", - "dev": true, - "license": "MIT", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/eslint-plugin-import/node_modules/doctrine": { - "version": "2.1.0", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "esutils": "^2.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/eslint-plugin-import/node_modules/ms": { - "version": "2.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/eslint-plugin-jest": { - "version": "25.7.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/experimental-utils": "^5.0.0" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" - }, - "peerDependencies": { - "@typescript-eslint/eslint-plugin": "^4.0.0 || ^5.0.0", - "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "@typescript-eslint/eslint-plugin": { - "optional": true - }, - "jest": { - "optional": true - } - } - }, - "node_modules/eslint-plugin-jsx-a11y": { - "version": "6.6.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.18.9", - "aria-query": "^4.2.2", - "array-includes": "^3.1.5", - "ast-types-flow": "^0.0.7", - "axe-core": "^4.4.3", - "axobject-query": "^2.2.0", - "damerau-levenshtein": "^1.0.8", - "emoji-regex": "^9.2.2", - "has": "^1.0.3", - "jsx-ast-utils": "^3.3.2", - "language-tags": "^1.0.5", - "minimatch": "^3.1.2", - "semver": "^6.3.0" - }, - "engines": { - "node": ">=4.0" - }, - "peerDependencies": { - "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8" - } - }, - "node_modules/eslint-plugin-prettier": { - "version": "4.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "prettier-linter-helpers": "^1.0.0" - }, - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "eslint": ">=7.28.0", - "prettier": ">=2.0.0" - }, - "peerDependenciesMeta": { - "eslint-config-prettier": { - "optional": true - } - } - }, - "node_modules/eslint-plugin-react": { - "version": "7.31.8", - "dev": true, - "license": "MIT", - "dependencies": { - "array-includes": "^3.1.5", - "array.prototype.flatmap": "^1.3.0", - "doctrine": "^2.1.0", - "estraverse": "^5.3.0", - "jsx-ast-utils": "^2.4.1 || ^3.0.0", - "minimatch": "^3.1.2", - "object.entries": "^1.1.5", - "object.fromentries": "^2.0.5", - "object.hasown": "^1.1.1", - "object.values": "^1.1.5", - "prop-types": "^15.8.1", - "resolve": "^2.0.0-next.3", - "semver": "^6.3.0", - "string.prototype.matchall": "^4.0.7" - }, - "engines": { - "node": ">=4" - }, - "peerDependencies": { - "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8" - } - }, "node_modules/eslint-plugin-react-hooks": { "version": "4.6.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz", + "integrity": "sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==", "dev": true, - "license": "MIT", "engines": { "node": ">=10" }, @@ -6312,48 +3815,6 @@ "eslint": ">=7" } }, - "node_modules/eslint-plugin-react/node_modules/doctrine": { - "version": "2.1.0", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "esutils": "^2.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/eslint-plugin-react/node_modules/resolve": { - "version": "2.0.0-next.4", - "dev": true, - "license": "MIT", - "dependencies": { - "is-core-module": "^2.9.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - }, - "bin": { - "resolve": "bin/resolve" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/eslint-plugin-testing-library": { - "version": "5.7.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/utils": "^5.13.0" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0", - "npm": ">=6" - }, - "peerDependencies": { - "eslint": "^7.5.0 || ^8.0.0" - } - }, "node_modules/eslint-scope": { "version": "7.2.2", "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", @@ -6370,31 +3831,6 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint-utils": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "eslint-visitor-keys": "^2.0.0" - }, - "engines": { - "node": "^10.0.0 || ^12.0.0 || >= 14.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/mysticatea" - }, - "peerDependencies": { - "eslint": ">=5" - } - }, - "node_modules/eslint-utils/node_modules/eslint-visitor-keys": { - "version": "2.1.0", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=10" - } - }, "node_modules/eslint-visitor-keys": { "version": "3.4.3", "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", @@ -6407,6 +3843,28 @@ "url": "https://opencollective.com/eslint" } }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, "node_modules/espree": { "version": "9.6.1", "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", @@ -6426,8 +3884,9 @@ }, "node_modules/esprima": { "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", "dev": true, - "license": "BSD-2-Clause", "peer": true, "bin": { "esparse": "bin/esparse.js", @@ -6451,8 +3910,9 @@ }, "node_modules/esrecurse": { "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { "estraverse": "^5.2.0" }, @@ -6462,8 +3922,9 @@ }, "node_modules/estraverse": { "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", "dev": true, - "license": "BSD-2-Clause", "engines": { "node": ">=4.0" } @@ -6476,16 +3937,18 @@ }, "node_modules/esutils": { "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", "dev": true, - "license": "BSD-2-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/execa": { "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "cross-spawn": "^7.0.3", @@ -6507,6 +3970,8 @@ }, "node_modules/exit": { "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", "dev": true, "peer": true, "engines": { @@ -6514,123 +3979,32 @@ } }, "node_modules/expect": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", "dev": true, - "license": "MIT", "dependencies": { - "@jest/expect-utils": "^29.2.2", - "jest-get-type": "^29.2.0", - "jest-matcher-utils": "^29.2.2", - "jest-message-util": "^29.2.1", - "jest-util": "^29.2.1" + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/expect/node_modules/ansi-styles": { - "version": "5.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/expect/node_modules/diff-sequences": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/expect/node_modules/jest-diff": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "chalk": "^4.0.0", - "diff-sequences": "^29.2.0", - "jest-get-type": "^29.2.0", - "pretty-format": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/expect/node_modules/jest-get-type": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/expect/node_modules/jest-matcher-utils": { - "version": "29.2.2", - "dev": true, - "license": "MIT", - "dependencies": { - "chalk": "^4.0.0", - "jest-diff": "^29.2.1", - "jest-get-type": "^29.2.0", - "pretty-format": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/expect/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/expect/node_modules/pretty-format": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/schemas": "^29.0.0", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/expect/node_modules/react-is": { - "version": "18.2.0", - "dev": true, - "license": "MIT" - }, "node_modules/fast-deep-equal": { "version": "3.1.3", - "dev": true, - "license": "MIT" - }, - "node_modules/fast-diff": { - "version": "1.2.0", - "dev": true, - "license": "Apache-2.0" + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true }, "node_modules/fast-glob": { - "version": "3.2.12", + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", "dev": true, - "license": "MIT", "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", @@ -6644,8 +4018,9 @@ }, "node_modules/fast-glob/node_modules/glob-parent": { "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", "dev": true, - "license": "ISC", "dependencies": { "is-glob": "^4.0.1" }, @@ -6655,26 +4030,30 @@ }, "node_modules/fast-json-stable-stringify": { "version": "2.1.0", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true }, "node_modules/fast-levenshtein": { "version": "2.0.6", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true }, "node_modules/fastq": { - "version": "1.13.0", + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", "dev": true, - "license": "ISC", "dependencies": { "reusify": "^1.0.4" } }, "node_modules/fb-watchman": { "version": "2.0.2", + "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", "dev": true, - "license": "Apache-2.0", "peer": true, "dependencies": { "bser": "2.1.1" @@ -6682,8 +4061,9 @@ }, "node_modules/file-entry-cache": { "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", "dev": true, - "license": "MIT", "dependencies": { "flat-cache": "^3.0.4" }, @@ -6693,8 +4073,9 @@ }, "node_modules/fill-range": { "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", "dev": true, - "license": "MIT", "dependencies": { "to-regex-range": "^5.0.1" }, @@ -6704,8 +4085,9 @@ }, "node_modules/find-up": { "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", "dev": true, - "license": "MIT", "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" @@ -6718,11 +4100,13 @@ } }, "node_modules/flat-cache": { - "version": "3.0.4", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", "dev": true, - "license": "MIT", "dependencies": { - "flatted": "^3.1.0", + "flatted": "^3.2.9", + "keyv": "^4.5.3", "rimraf": "^3.0.2" }, "engines": { @@ -6730,19 +4114,23 @@ } }, "node_modules/flatted": { - "version": "3.2.7", - "dev": true, - "license": "ISC" + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.0.tgz", + "integrity": "sha512-noqGuLw158+DuD9UPRKHpJ2hGxpFyDlYYrfM0mWt4XhT4n0lwzTLh70Tkdyy4kyTmyTT9Bv7bWAJqw7cgkEXDg==", + "dev": true }, "node_modules/fs.realpath": { "version": "1.0.0", - "dev": true, - "license": "ISC" + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true }, "node_modules/fsevents": { "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", "dev": true, - "license": "MIT", + "hasInstallScript": true, "optional": true, "os": [ "darwin" @@ -6752,65 +4140,33 @@ } }, "node_modules/function-bind": { - "version": "1.1.1", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", "dev": true, - "license": "MIT" - }, - "node_modules/function.prototype.name": { - "version": "1.1.5", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.0", - "functions-have-names": "^1.2.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/functions-have-names": { - "version": "1.2.3", - "dev": true, - "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/gensync": { "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/get-caller-file": { "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", "dev": true, - "license": "ISC", "peer": true, "engines": { "node": "6.* || 8.* || >= 10.*" } }, - "node_modules/get-intrinsic": { - "version": "1.1.3", - "dev": true, - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/get-nonce": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", @@ -6821,8 +4177,9 @@ }, "node_modules/get-package-type": { "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=8.0.0" @@ -6830,8 +4187,9 @@ }, "node_modules/get-stream": { "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=10" @@ -6840,25 +4198,11 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/get-symbol-description": { - "version": "1.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/glob": { "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", "dev": true, - "license": "ISC", "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -6876,8 +4220,9 @@ }, "node_modules/glob-parent": { "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", "dev": true, - "license": "ISC", "dependencies": { "is-glob": "^4.0.3" }, @@ -6885,6 +4230,28 @@ "node": ">=10.13.0" } }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, "node_modules/globals": { "version": "13.24.0", "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", @@ -6902,8 +4269,9 @@ }, "node_modules/globby": { "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", "dev": true, - "license": "MIT", "dependencies": { "array-union": "^2.1.0", "dir-glob": "^3.0.1", @@ -6920,9 +4288,10 @@ } }, "node_modules/graceful-fs": { - "version": "4.2.10", - "dev": true, - "license": "ISC" + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true }, "node_modules/graphemer": { "version": "1.4.0", @@ -6930,67 +4299,25 @@ "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", "dev": true }, - "node_modules/has": { - "version": "1.0.3", - "dev": true, - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.1" - }, - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/has-bigints": { - "version": "1.0.2", - "dev": true, - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/has-flag": { "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } }, - "node_modules/has-property-descriptors": { - "version": "1.0.0", + "node_modules/hasown": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.1.tgz", + "integrity": "sha512-1/th4MHjnwncwXsIW6QMzlvYL9kG5e/CpVvLRZe4XPa8TOUNbCELqmvhDmnkNsAjwaG4+I8gJJL0JBvTTLO9qA==", "dev": true, - "license": "MIT", "dependencies": { - "get-intrinsic": "^1.1.1" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-symbols": { - "version": "1.0.3", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-tostringtag": { - "version": "1.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "has-symbols": "^1.0.2" + "function-bind": "^1.1.2" }, "engines": { "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" } }, "node_modules/highlight.js": { @@ -7003,14 +4330,16 @@ }, "node_modules/html-escaper": { "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", "dev": true, - "license": "MIT", "peer": true }, "node_modules/human-signals": { "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", "dev": true, - "license": "Apache-2.0", "peer": true, "engines": { "node": ">=10.17.0" @@ -7025,17 +4354,11 @@ "node": ">= 4" } }, - "node_modules/immutable": { - "version": "4.1.0", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true - }, "node_modules/import-fresh": { "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", "dev": true, - "license": "MIT", "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" @@ -7049,8 +4372,9 @@ }, "node_modules/import-local": { "version": "3.1.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz", + "integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "pkg-dir": "^4.2.0", @@ -7068,16 +4392,18 @@ }, "node_modules/imurmurhash": { "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.8.19" } }, "node_modules/inflight": { "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", "dev": true, - "license": "ISC", "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -7085,21 +4411,9 @@ }, "node_modules/inherits": { "version": "2.0.4", - "dev": true, - "license": "ISC" - }, - "node_modules/internal-slot": { - "version": "1.0.3", - "dev": true, - "license": "MIT", - "dependencies": { - "get-intrinsic": "^1.1.0", - "has": "^1.0.3", - "side-channel": "^1.0.4" - }, - "engines": { - "node": ">= 0.4" - } + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true }, "node_modules/invariant": { "version": "2.2.4", @@ -7111,47 +4425,10 @@ }, "node_modules/is-arrayish": { "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", "dev": true, - "license": "MIT" - }, - "node_modules/is-bigint": { - "version": "1.0.4", - "dev": true, - "license": "MIT", - "dependencies": { - "has-bigints": "^1.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-binary-path": { - "version": "2.1.0", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "binary-extensions": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-boolean-object": { - "version": "1.1.2", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } + "peer": true }, "node_modules/is-builtin-module": { "version": "3.2.1", @@ -7168,37 +4445,13 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/is-callable": { - "version": "1.2.7", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/is-core-module": { - "version": "2.10.0", + "version": "2.13.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", + "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", "dev": true, - "license": "MIT", "dependencies": { - "has": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-date-object": { - "version": "1.0.5", - "dev": true, - "license": "MIT", - "dependencies": { - "has-tostringtag": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" + "hasown": "^2.0.0" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -7206,16 +4459,18 @@ }, "node_modules/is-extglob": { "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/is-fullwidth-code-point": { "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -7223,8 +4478,9 @@ }, "node_modules/is-generator-fn": { "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=6" @@ -7232,8 +4488,9 @@ }, "node_modules/is-glob": { "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "dev": true, - "license": "MIT", "dependencies": { "is-extglob": "^2.1.1" }, @@ -7243,42 +4500,19 @@ }, "node_modules/is-module": { "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/is-negative-zero": { - "version": "2.0.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } + "resolved": "https://registry.npmjs.org/is-module/-/is-module-1.0.0.tgz", + "integrity": "sha512-51ypPSPCoTEIN9dy5Oy+h4pShgJmPCygKfyRCISBI+JoWT/2oJvK8QPxmwv7b/p239jXrm9M1mlQbyKJ5A152g==", + "dev": true }, "node_modules/is-number": { "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.12.0" } }, - "node_modules/is-number-object": { - "version": "1.0.7", - "dev": true, - "license": "MIT", - "dependencies": { - "has-tostringtag": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/is-path-inside": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", @@ -7288,36 +4522,11 @@ "node": ">=8" } }, - "node_modules/is-regex": { - "version": "1.1.4", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-shared-array-buffer": { - "version": "1.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/is-stream": { "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -7326,102 +4535,69 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/is-string": { - "version": "1.0.7", - "dev": true, - "license": "MIT", - "dependencies": { - "has-tostringtag": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-symbol": { - "version": "1.0.4", - "dev": true, - "license": "MIT", - "dependencies": { - "has-symbols": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-weakref": { - "version": "1.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/isexe": { "version": "2.0.0", - "dev": true, - "license": "ISC" + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true }, "node_modules/isomorphic-fetch": { "version": "3.0.0", + "resolved": "https://registry.npmjs.org/isomorphic-fetch/-/isomorphic-fetch-3.0.0.tgz", + "integrity": "sha512-qvUtwJ3j6qwsF3jLxkZ72qCgjMysPzDfeV240JHiGZsANBYd+EEuu35v7dfrJ9Up0Ak07D7GGSkGhCHTqg/5wA==", "dev": true, - "license": "MIT", "dependencies": { "node-fetch": "^2.6.1", "whatwg-fetch": "^3.4.1" } }, "node_modules/istanbul-lib-coverage": { - "version": "3.2.0", + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", "dev": true, - "license": "BSD-3-Clause", "peer": true, "engines": { "node": ">=8" } }, "node_modules/istanbul-lib-instrument": { - "version": "5.2.1", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.2.tgz", + "integrity": "sha512-1WUsZ9R1lA0HtBSohTkm39WTPlNKSJ5iFk7UwqXkBLoHQT+hfqPsfsTDVuZdKGaBwn7din9bS7SsnoAr943hvw==", "dev": true, - "license": "BSD-3-Clause", "peer": true, "dependencies": { - "@babel/core": "^7.12.3", - "@babel/parser": "^7.14.7", - "@istanbuljs/schema": "^0.1.2", + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", "istanbul-lib-coverage": "^3.2.0", - "semver": "^6.3.0" + "semver": "^7.5.4" }, "engines": { - "node": ">=8" + "node": ">=10" } }, "node_modules/istanbul-lib-report": { - "version": "3.0.0", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", "dev": true, - "license": "BSD-3-Clause", "peer": true, "dependencies": { "istanbul-lib-coverage": "^3.0.0", - "make-dir": "^3.0.0", + "make-dir": "^4.0.0", "supports-color": "^7.1.0" }, "engines": { - "node": ">=8" + "node": ">=10" } }, "node_modules/istanbul-lib-source-maps": { "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", "dev": true, - "license": "BSD-3-Clause", "peer": true, "dependencies": { "debug": "^4.1.1", @@ -7433,9 +4609,10 @@ } }, "node_modules/istanbul-reports": { - "version": "3.1.5", + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.7.tgz", + "integrity": "sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==", "dev": true, - "license": "BSD-3-Clause", "peer": true, "dependencies": { "html-escaper": "^2.0.0", @@ -7446,15 +4623,16 @@ } }, "node_modules/jest": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", + "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/core": "^29.2.2", - "@jest/types": "^29.2.1", + "@jest/core": "^29.7.0", + "@jest/types": "^29.6.3", "import-local": "^3.0.2", - "jest-cli": "^29.2.2" + "jest-cli": "^29.7.0" }, "bin": { "jest": "bin/jest.js" @@ -7471,22 +4649,15 @@ } } }, - "node_modules/jest-canvas-mock": { - "version": "2.5.2", - "dev": true, - "license": "MIT", - "dependencies": { - "cssfontparser": "^1.2.1", - "moo-color": "^1.0.2" - } - }, "node_modules/jest-changed-files": { - "version": "29.2.0", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", + "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "execa": "^5.0.0", + "jest-util": "^29.7.0", "p-limit": "^3.1.0" }, "engines": { @@ -7494,28 +4665,30 @@ } }, "node_modules/jest-circus": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", + "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/environment": "^29.2.2", - "@jest/expect": "^29.2.2", - "@jest/test-result": "^29.2.1", - "@jest/types": "^29.2.1", + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", "@types/node": "*", "chalk": "^4.0.0", "co": "^4.6.0", - "dedent": "^0.7.0", + "dedent": "^1.0.0", "is-generator-fn": "^2.0.0", - "jest-each": "^29.2.1", - "jest-matcher-utils": "^29.2.2", - "jest-message-util": "^29.2.1", - "jest-runtime": "^29.2.2", - "jest-snapshot": "^29.2.2", - "jest-util": "^29.2.1", + "jest-each": "^29.7.0", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", "p-limit": "^3.1.0", - "pretty-format": "^29.2.1", + "pretty-format": "^29.7.0", + "pure-rand": "^6.0.0", "slash": "^3.0.0", "stack-utils": "^2.0.3" }, @@ -7523,120 +4696,23 @@ "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/jest-circus/node_modules/ansi-styles": { - "version": "5.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/jest-circus/node_modules/diff-sequences": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-circus/node_modules/jest-diff": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "chalk": "^4.0.0", - "diff-sequences": "^29.2.0", - "jest-get-type": "^29.2.0", - "pretty-format": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-circus/node_modules/jest-get-type": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-circus/node_modules/jest-matcher-utils": { - "version": "29.2.2", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "chalk": "^4.0.0", - "jest-diff": "^29.2.1", - "jest-get-type": "^29.2.0", - "pretty-format": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-circus/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-circus/node_modules/pretty-format": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/schemas": "^29.0.0", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-circus/node_modules/react-is": { - "version": "18.2.0", - "dev": true, - "license": "MIT", - "peer": true - }, "node_modules/jest-cli": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", + "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/core": "^29.2.2", - "@jest/test-result": "^29.2.1", - "@jest/types": "^29.2.1", + "@jest/core": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", "chalk": "^4.0.0", + "create-jest": "^29.7.0", "exit": "^0.1.2", - "graceful-fs": "^4.2.9", "import-local": "^3.0.2", - "jest-config": "^29.2.2", - "jest-util": "^29.2.1", - "jest-validate": "^29.2.2", - "prompts": "^2.0.1", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", "yargs": "^17.3.1" }, "bin": { @@ -7654,119 +4730,33 @@ } } }, - "node_modules/jest-cli/node_modules/ansi-styles": { - "version": "5.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/jest-cli/node_modules/camelcase": { - "version": "6.3.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/jest-cli/node_modules/jest-get-type": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-cli/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-cli/node_modules/jest-validate": { - "version": "29.2.2", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "camelcase": "^6.2.0", - "chalk": "^4.0.0", - "jest-get-type": "^29.2.0", - "leven": "^3.1.0", - "pretty-format": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-cli/node_modules/pretty-format": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/schemas": "^29.0.0", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-cli/node_modules/react-is": { - "version": "18.2.0", - "dev": true, - "license": "MIT", - "peer": true - }, "node_modules/jest-config": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", + "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "@babel/core": "^7.11.6", - "@jest/test-sequencer": "^29.2.2", - "@jest/types": "^29.2.1", - "babel-jest": "^29.2.2", + "@jest/test-sequencer": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-jest": "^29.7.0", "chalk": "^4.0.0", "ci-info": "^3.2.0", "deepmerge": "^4.2.2", "glob": "^7.1.3", "graceful-fs": "^4.2.9", - "jest-circus": "^29.2.2", - "jest-environment-node": "^29.2.2", - "jest-get-type": "^29.2.0", - "jest-regex-util": "^29.2.0", - "jest-resolve": "^29.2.2", - "jest-runner": "^29.2.2", - "jest-util": "^29.2.1", - "jest-validate": "^29.2.2", + "jest-circus": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", "micromatch": "^4.0.4", "parse-json": "^5.2.0", - "pretty-format": "^29.2.1", + "pretty-format": "^29.7.0", "slash": "^3.0.0", "strip-json-comments": "^3.1.1" }, @@ -7786,54 +4776,94 @@ } } }, - "node_modules/jest-config/node_modules/ansi-styles": { - "version": "5.2.0", + "node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/jest-config/node_modules/camelcase": { - "version": "6.3.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/jest-config/node_modules/jest-get-type": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/jest-config/node_modules/jest-haste-map": { - "version": "29.2.1", + "node_modules/jest-docblock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", + "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/types": "^29.2.1", + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", + "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", + "dev": true, + "peer": true, + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "jest-util": "^29.7.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", + "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", + "dev": true, + "peer": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", + "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", + "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", + "dev": true, + "peer": true, + "dependencies": { + "@jest/types": "^29.6.3", "@types/graceful-fs": "^4.1.3", "@types/node": "*", "anymatch": "^3.0.3", "fb-watchman": "^2.0.0", "graceful-fs": "^4.2.9", - "jest-regex-util": "^29.2.0", - "jest-util": "^29.2.1", - "jest-worker": "^29.2.1", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", "micromatch": "^4.0.4", "walker": "^1.0.8" }, @@ -7844,378 +4874,48 @@ "fsevents": "^2.3.2" } }, - "node_modules/jest-config/node_modules/jest-regex-util": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-config/node_modules/jest-resolve": { - "version": "29.2.2", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "chalk": "^4.0.0", - "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.2.1", - "jest-pnp-resolver": "^1.2.2", - "jest-util": "^29.2.1", - "jest-validate": "^29.2.2", - "resolve": "^1.20.0", - "resolve.exports": "^1.1.0", - "slash": "^3.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-config/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-config/node_modules/jest-validate": { - "version": "29.2.2", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "camelcase": "^6.2.0", - "chalk": "^4.0.0", - "jest-get-type": "^29.2.0", - "leven": "^3.1.0", - "pretty-format": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-config/node_modules/jest-worker": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@types/node": "*", - "jest-util": "^29.2.1", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-config/node_modules/pretty-format": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/schemas": "^29.0.0", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-config/node_modules/react-is": { - "version": "18.2.0", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/jest-config/node_modules/supports-color": { - "version": "8.1.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "node_modules/jest-docblock": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "detect-newline": "^3.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-each": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "chalk": "^4.0.0", - "jest-get-type": "^29.2.0", - "jest-util": "^29.2.1", - "pretty-format": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-each/node_modules/ansi-styles": { - "version": "5.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/jest-each/node_modules/jest-get-type": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-each/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-each/node_modules/pretty-format": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/schemas": "^29.0.0", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-each/node_modules/react-is": { - "version": "18.2.0", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/jest-environment-node": { - "version": "29.2.2", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/environment": "^29.2.2", - "@jest/fake-timers": "^29.2.2", - "@jest/types": "^29.2.1", - "@types/node": "*", - "jest-mock": "^29.2.2", - "jest-util": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-environment-node/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-fetch-mock": { - "version": "3.0.3", - "dev": true, - "license": "MIT", - "dependencies": { - "cross-fetch": "^3.0.4", - "promise-polyfill": "^8.1.3" - } - }, - "node_modules/jest-get-type": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-haste-map": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@jest/types": "^27.5.1", - "@types/graceful-fs": "^4.1.2", - "@types/node": "*", - "anymatch": "^3.0.3", - "fb-watchman": "^2.0.0", - "graceful-fs": "^4.2.9", - "jest-regex-util": "^27.5.1", - "jest-serializer": "^27.5.1", - "jest-util": "^27.5.1", - "jest-worker": "^27.5.1", - "micromatch": "^4.0.4", - "walker": "^1.0.7" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - }, - "optionalDependencies": { - "fsevents": "^2.3.2" - } - }, - "node_modules/jest-haste-map/node_modules/@jest/types": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^16.0.0", - "chalk": "^4.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-haste-map/node_modules/@types/yargs": { - "version": "16.0.4", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@types/yargs-parser": "*" - } - }, "node_modules/jest-leak-detector": { - "version": "29.2.1", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", + "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "jest-get-type": "^29.2.0", - "pretty-format": "^29.2.1" + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/jest-leak-detector/node_modules/ansi-styles": { - "version": "5.2.0", + "node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/jest-leak-detector/node_modules/jest-get-type": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-leak-detector/node_modules/pretty-format": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, "dependencies": { - "@jest/schemas": "^29.0.0", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/jest-leak-detector/node_modules/react-is": { - "version": "18.2.0", - "dev": true, - "license": "MIT", - "peer": true - }, "node_modules/jest-message-util": { - "version": "29.2.1", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", "dev": true, - "license": "MIT", "dependencies": { "@babel/code-frame": "^7.12.13", - "@jest/types": "^29.2.1", + "@jest/types": "^29.6.3", "@types/stack-utils": "^2.0.0", "chalk": "^4.0.0", "graceful-fs": "^4.2.9", "micromatch": "^4.0.4", - "pretty-format": "^29.2.1", + "pretty-format": "^29.7.0", "slash": "^3.0.0", "stack-utils": "^2.0.3" }, @@ -8223,70 +4923,26 @@ "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/jest-message-util/node_modules/ansi-styles": { - "version": "5.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/jest-message-util/node_modules/pretty-format": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/schemas": "^29.0.0", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-message-util/node_modules/react-is": { - "version": "18.2.0", - "dev": true, - "license": "MIT" - }, "node_modules/jest-mock": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/types": "^29.2.1", + "@jest/types": "^29.6.3", "@types/node": "*", - "jest-util": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-mock/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" + "jest-util": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/jest-pnp-resolver": { - "version": "1.2.2", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=6" @@ -8301,111 +4957,76 @@ } }, "node_modules/jest-regex-util": { - "version": "27.5.1", + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", "dev": true, - "license": "MIT", - "optional": true, "peer": true, "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/jest-resolve": { - "version": "27.5.1", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", + "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", "dev": true, - "license": "MIT", - "optional": true, "peer": true, "dependencies": { - "@jest/types": "^27.5.1", "chalk": "^4.0.0", "graceful-fs": "^4.2.9", - "jest-haste-map": "^27.5.1", + "jest-haste-map": "^29.7.0", "jest-pnp-resolver": "^1.2.2", - "jest-util": "^27.5.1", - "jest-validate": "^27.5.1", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", "resolve": "^1.20.0", - "resolve.exports": "^1.1.0", + "resolve.exports": "^2.0.0", "slash": "^3.0.0" }, "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/jest-resolve-dependencies": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", + "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "jest-regex-util": "^29.2.0", - "jest-snapshot": "^29.2.2" + "jest-regex-util": "^29.6.3", + "jest-snapshot": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/jest-resolve-dependencies/node_modules/jest-regex-util": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-resolve/node_modules/@jest/types": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^16.0.0", - "chalk": "^4.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-resolve/node_modules/@types/yargs": { - "version": "16.0.4", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@types/yargs-parser": "*" - } - }, "node_modules/jest-runner": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", + "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/console": "^29.2.1", - "@jest/environment": "^29.2.2", - "@jest/test-result": "^29.2.1", - "@jest/transform": "^29.2.2", - "@jest/types": "^29.2.1", + "@jest/console": "^29.7.0", + "@jest/environment": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", "@types/node": "*", "chalk": "^4.0.0", "emittery": "^0.13.1", "graceful-fs": "^4.2.9", - "jest-docblock": "^29.2.0", - "jest-environment-node": "^29.2.2", - "jest-haste-map": "^29.2.1", - "jest-leak-detector": "^29.2.1", - "jest-message-util": "^29.2.1", - "jest-resolve": "^29.2.2", - "jest-runtime": "^29.2.2", - "jest-util": "^29.2.1", - "jest-watcher": "^29.2.2", - "jest-worker": "^29.2.1", + "jest-docblock": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-leak-detector": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-util": "^29.7.0", + "jest-watcher": "^29.7.0", + "jest-worker": "^29.7.0", "p-limit": "^3.1.0", "source-map-support": "0.5.13" }, @@ -8413,213 +5034,33 @@ "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/jest-runner/node_modules/ansi-styles": { - "version": "5.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/jest-runner/node_modules/camelcase": { - "version": "6.3.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/jest-runner/node_modules/jest-get-type": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-runner/node_modules/jest-haste-map": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/graceful-fs": "^4.1.3", - "@types/node": "*", - "anymatch": "^3.0.3", - "fb-watchman": "^2.0.0", - "graceful-fs": "^4.2.9", - "jest-regex-util": "^29.2.0", - "jest-util": "^29.2.1", - "jest-worker": "^29.2.1", - "micromatch": "^4.0.4", - "walker": "^1.0.8" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - }, - "optionalDependencies": { - "fsevents": "^2.3.2" - } - }, - "node_modules/jest-runner/node_modules/jest-regex-util": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-runner/node_modules/jest-resolve": { - "version": "29.2.2", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "chalk": "^4.0.0", - "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.2.1", - "jest-pnp-resolver": "^1.2.2", - "jest-util": "^29.2.1", - "jest-validate": "^29.2.2", - "resolve": "^1.20.0", - "resolve.exports": "^1.1.0", - "slash": "^3.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-runner/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-runner/node_modules/jest-validate": { - "version": "29.2.2", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "camelcase": "^6.2.0", - "chalk": "^4.0.0", - "jest-get-type": "^29.2.0", - "leven": "^3.1.0", - "pretty-format": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-runner/node_modules/jest-worker": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@types/node": "*", - "jest-util": "^29.2.1", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-runner/node_modules/pretty-format": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/schemas": "^29.0.0", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-runner/node_modules/react-is": { - "version": "18.2.0", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/jest-runner/node_modules/source-map-support": { - "version": "0.5.13", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "node_modules/jest-runner/node_modules/supports-color": { - "version": "8.1.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, "node_modules/jest-runtime": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", + "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/environment": "^29.2.2", - "@jest/fake-timers": "^29.2.2", - "@jest/globals": "^29.2.2", - "@jest/source-map": "^29.2.0", - "@jest/test-result": "^29.2.1", - "@jest/transform": "^29.2.2", - "@jest/types": "^29.2.1", + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/globals": "^29.7.0", + "@jest/source-map": "^29.6.3", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", "@types/node": "*", "chalk": "^4.0.0", "cjs-module-lexer": "^1.0.0", "collect-v8-coverage": "^1.0.0", "glob": "^7.1.3", "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.2.1", - "jest-message-util": "^29.2.1", - "jest-mock": "^29.2.2", - "jest-regex-util": "^29.2.0", - "jest-resolve": "^29.2.2", - "jest-snapshot": "^29.2.2", - "jest-util": "^29.2.1", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", "slash": "^3.0.0", "strip-bom": "^4.0.0" }, @@ -8627,410 +5068,45 @@ "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/jest-runtime/node_modules/ansi-styles": { - "version": "5.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/jest-runtime/node_modules/camelcase": { - "version": "6.3.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/jest-runtime/node_modules/jest-get-type": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-runtime/node_modules/jest-haste-map": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/graceful-fs": "^4.1.3", - "@types/node": "*", - "anymatch": "^3.0.3", - "fb-watchman": "^2.0.0", - "graceful-fs": "^4.2.9", - "jest-regex-util": "^29.2.0", - "jest-util": "^29.2.1", - "jest-worker": "^29.2.1", - "micromatch": "^4.0.4", - "walker": "^1.0.8" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - }, - "optionalDependencies": { - "fsevents": "^2.3.2" - } - }, - "node_modules/jest-runtime/node_modules/jest-regex-util": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-runtime/node_modules/jest-resolve": { - "version": "29.2.2", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "chalk": "^4.0.0", - "graceful-fs": "^4.2.9", - "jest-haste-map": "^29.2.1", - "jest-pnp-resolver": "^1.2.2", - "jest-util": "^29.2.1", - "jest-validate": "^29.2.2", - "resolve": "^1.20.0", - "resolve.exports": "^1.1.0", - "slash": "^3.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-runtime/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-runtime/node_modules/jest-validate": { - "version": "29.2.2", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "camelcase": "^6.2.0", - "chalk": "^4.0.0", - "jest-get-type": "^29.2.0", - "leven": "^3.1.0", - "pretty-format": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-runtime/node_modules/jest-worker": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@types/node": "*", - "jest-util": "^29.2.1", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-runtime/node_modules/pretty-format": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/schemas": "^29.0.0", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-runtime/node_modules/react-is": { - "version": "18.2.0", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/jest-runtime/node_modules/supports-color": { - "version": "8.1.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "node_modules/jest-serializer": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@types/node": "*", - "graceful-fs": "^4.2.9" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, "node_modules/jest-snapshot": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", + "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "@babel/core": "^7.11.6", "@babel/generator": "^7.7.2", "@babel/plugin-syntax-jsx": "^7.7.2", "@babel/plugin-syntax-typescript": "^7.7.2", - "@babel/traverse": "^7.7.2", "@babel/types": "^7.3.3", - "@jest/expect-utils": "^29.2.2", - "@jest/transform": "^29.2.2", - "@jest/types": "^29.2.1", - "@types/babel__traverse": "^7.0.6", - "@types/prettier": "^2.1.5", + "@jest/expect-utils": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", "babel-preset-current-node-syntax": "^1.0.0", "chalk": "^4.0.0", - "expect": "^29.2.2", + "expect": "^29.7.0", "graceful-fs": "^4.2.9", - "jest-diff": "^29.2.1", - "jest-get-type": "^29.2.0", - "jest-haste-map": "^29.2.1", - "jest-matcher-utils": "^29.2.2", - "jest-message-util": "^29.2.1", - "jest-util": "^29.2.1", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", "natural-compare": "^1.4.0", - "pretty-format": "^29.2.1", - "semver": "^7.3.5" + "pretty-format": "^29.7.0", + "semver": "^7.5.3" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/jest-snapshot/node_modules/ansi-styles": { - "version": "5.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/jest-snapshot/node_modules/diff-sequences": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-snapshot/node_modules/jest-diff": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "chalk": "^4.0.0", - "diff-sequences": "^29.2.0", - "jest-get-type": "^29.2.0", - "pretty-format": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-snapshot/node_modules/jest-get-type": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-snapshot/node_modules/jest-haste-map": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/graceful-fs": "^4.1.3", - "@types/node": "*", - "anymatch": "^3.0.3", - "fb-watchman": "^2.0.0", - "graceful-fs": "^4.2.9", - "jest-regex-util": "^29.2.0", - "jest-util": "^29.2.1", - "jest-worker": "^29.2.1", - "micromatch": "^4.0.4", - "walker": "^1.0.8" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - }, - "optionalDependencies": { - "fsevents": "^2.3.2" - } - }, - "node_modules/jest-snapshot/node_modules/jest-matcher-utils": { - "version": "29.2.2", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "chalk": "^4.0.0", - "jest-diff": "^29.2.1", - "jest-get-type": "^29.2.0", - "pretty-format": "^29.2.1" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-snapshot/node_modules/jest-regex-util": { - "version": "29.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-snapshot/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-snapshot/node_modules/jest-worker": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@types/node": "*", - "jest-util": "^29.2.1", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-snapshot/node_modules/pretty-format": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@jest/schemas": "^29.0.0", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/jest-snapshot/node_modules/react-is": { - "version": "18.2.0", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/jest-snapshot/node_modules/semver": { - "version": "7.5.4", - "dev": true, - "license": "ISC", - "peer": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/jest-snapshot/node_modules/supports-color": { - "version": "8.1.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, "node_modules/jest-util": { - "version": "27.5.1", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", "dev": true, - "license": "MIT", - "optional": true, - "peer": true, "dependencies": { - "@jest/types": "^27.5.1", + "@jest/types": "^29.6.3", "@types/node": "*", "chalk": "^4.0.0", "ci-info": "^3.2.0", @@ -9038,86 +5114,32 @@ "picomatch": "^2.2.3" }, "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-util/node_modules/@jest/types": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^16.0.0", - "chalk": "^4.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-util/node_modules/@types/yargs": { - "version": "16.0.4", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@types/yargs-parser": "*" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/jest-validate": { - "version": "27.5.1", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", + "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", "dev": true, - "license": "MIT", - "optional": true, "peer": true, "dependencies": { - "@jest/types": "^27.5.1", + "@jest/types": "^29.6.3", "camelcase": "^6.2.0", "chalk": "^4.0.0", - "jest-get-type": "^27.5.1", + "jest-get-type": "^29.6.3", "leven": "^3.1.0", - "pretty-format": "^27.5.1" + "pretty-format": "^29.7.0" }, "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-validate/node_modules/@jest/types": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^16.0.0", - "chalk": "^4.0.0" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "node_modules/jest-validate/node_modules/@types/yargs": { - "version": "16.0.4", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@types/yargs-parser": "*" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/jest-validate/node_modules/camelcase": { "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", "dev": true, - "license": "MIT", - "optional": true, "peer": true, "engines": { "node": ">=10" @@ -9127,61 +5149,46 @@ } }, "node_modules/jest-watcher": { - "version": "29.2.2", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", + "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/test-result": "^29.2.1", - "@jest/types": "^29.2.1", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", "@types/node": "*", "ansi-escapes": "^4.2.1", "chalk": "^4.0.0", "emittery": "^0.13.1", - "jest-util": "^29.2.1", + "jest-util": "^29.7.0", "string-length": "^4.0.1" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/jest-watcher/node_modules/jest-util": { - "version": "29.2.1", + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "@jest/types": "^29.2.1", "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/jest-worker": { - "version": "27.5.1", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "@types/node": "*", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" - }, - "engines": { - "node": ">= 10.13.0" - } - }, "node_modules/jest-worker/node_modules/supports-color": { "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "dev": true, - "license": "MIT", - "optional": true, "peer": true, "dependencies": { "has-flag": "^4.0.0" @@ -9195,7 +5202,8 @@ }, "node_modules/js-tokens": { "version": "4.0.0", - "license": "MIT" + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" }, "node_modules/js-yaml": { "version": "4.1.0", @@ -9211,8 +5219,9 @@ }, "node_modules/jsesc": { "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", "dev": true, - "license": "MIT", "bin": { "jsesc": "bin/jsesc" }, @@ -9220,30 +5229,42 @@ "node": ">=4" } }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, "node_modules/json-parse-even-better-errors": { "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", "dev": true, - "license": "MIT" + "peer": true }, "node_modules/json-schema-traverse": { "version": "0.4.1", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true }, "node_modules/json-stable-stringify-without-jsonify": { "version": "1.0.1", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true }, "node_modules/json-stringify-safe": { "version": "5.0.1", - "dev": true, - "license": "ISC" + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==", + "dev": true }, "node_modules/json5": { "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", "dev": true, - "license": "MIT", "bin": { "json5": "lib/cli.js" }, @@ -9251,44 +5272,30 @@ "node": ">=6" } }, - "node_modules/jsx-ast-utils": { - "version": "3.3.3", + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", "dev": true, - "license": "MIT", "dependencies": { - "array-includes": "^3.1.5", - "object.assign": "^4.1.3" - }, - "engines": { - "node": ">=4.0" + "json-buffer": "3.0.1" } }, "node_modules/kleur": { "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=6" } }, - "node_modules/language-subtag-registry": { - "version": "0.3.22", - "dev": true, - "license": "CC0-1.0" - }, - "node_modules/language-tags": { - "version": "1.0.5", - "dev": true, - "license": "MIT", - "dependencies": { - "language-subtag-registry": "~0.3.2" - } - }, "node_modules/leven": { "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=6" @@ -9309,13 +5316,16 @@ }, "node_modules/lines-and-columns": { "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", "dev": true, - "license": "MIT" + "peer": true }, "node_modules/locate-path": { "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", "dev": true, - "license": "MIT", "dependencies": { "p-locate": "^5.0.0" }, @@ -9326,29 +5336,22 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/lodash": { - "version": "4.17.21", - "dev": true, - "license": "MIT" - }, - "node_modules/lodash.debounce": { - "version": "4.0.8", - "dev": true, - "license": "MIT" - }, "node_modules/lodash.memoize": { "version": "4.1.2", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", + "dev": true }, "node_modules/lodash.merge": { "version": "4.6.2", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true }, "node_modules/loose-envify": { "version": "1.4.0", - "license": "MIT", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", "dependencies": { "js-tokens": "^3.0.0 || ^4.0.0" }, @@ -9358,8 +5361,9 @@ }, "node_modules/lru-cache": { "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", "dev": true, - "license": "ISC", "dependencies": { "yallist": "^4.0.0" }, @@ -9368,15 +5372,16 @@ } }, "node_modules/make-dir": { - "version": "3.1.0", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { - "semver": "^6.0.0" + "semver": "^7.5.3" }, "engines": { - "node": ">=8" + "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -9384,13 +5389,15 @@ }, "node_modules/make-error": { "version": "1.3.6", - "dev": true, - "license": "ISC" + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true }, "node_modules/makeerror": { "version": "1.0.12", + "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", "dev": true, - "license": "BSD-3-Clause", "peer": true, "dependencies": { "tmpl": "1.0.5" @@ -9398,22 +5405,25 @@ }, "node_modules/merge-stream": { "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", "dev": true, - "license": "MIT", "peer": true }, "node_modules/merge2": { "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", "dev": true, - "license": "MIT", "engines": { "node": ">= 8" } }, "node_modules/micromatch": { "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", "dev": true, - "license": "MIT", "dependencies": { "braces": "^3.0.2", "picomatch": "^2.3.1" @@ -9424,41 +5434,34 @@ }, "node_modules/mimic-fn": { "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=6" } }, "node_modules/minimatch": { - "version": "3.1.2", + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", + "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", "dev": true, - "license": "ISC", "dependencies": { - "brace-expansion": "^1.1.7" + "brace-expansion": "^2.0.1" }, "engines": { - "node": "*" - } - }, - "node_modules/minimist": { - "version": "1.2.6", - "dev": true, - "license": "MIT" - }, - "node_modules/moo-color": { - "version": "1.0.3", - "dev": true, - "license": "MIT", - "dependencies": { - "color-name": "^1.1.4" + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, "node_modules/ms": { "version": "2.1.2", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true }, "node_modules/nanoid": { "version": "3.3.7", @@ -9480,13 +5483,15 @@ }, "node_modules/natural-compare": { "version": "1.4.0", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true }, "node_modules/nock": { - "version": "13.4.0", + "version": "13.5.3", + "resolved": "https://registry.npmjs.org/nock/-/nock-13.5.3.tgz", + "integrity": "sha512-2NlGmHIK2rTeyy7UaY1ZNg0YZfEJMxghXgZi0b4DBsUyoDNTTxZeCSG1nmirAWF44RkkoV8NnegLVQijgVapNQ==", "dev": true, - "license": "MIT", "dependencies": { "debug": "^4.1.0", "json-stringify-safe": "^5.0.1", @@ -9497,9 +5502,10 @@ } }, "node_modules/node-fetch": { - "version": "2.6.7", + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", "dev": true, - "license": "MIT", "dependencies": { "whatwg-url": "^5.0.0" }, @@ -9515,29 +5521,11 @@ } } }, - "node_modules/node-fetch/node_modules/tr46": { - "version": "0.0.3", - "dev": true, - "license": "MIT" - }, - "node_modules/node-fetch/node_modules/webidl-conversions": { - "version": "3.0.1", - "dev": true, - "license": "BSD-2-Clause" - }, - "node_modules/node-fetch/node_modules/whatwg-url": { - "version": "5.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "tr46": "~0.0.3", - "webidl-conversions": "^3.0.0" - } - }, "node_modules/node-int64": { "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", "dev": true, - "license": "MIT", "peer": true }, "node_modules/node-releases": { @@ -9548,8 +5536,9 @@ }, "node_modules/normalize-path": { "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=0.10.0" @@ -9557,8 +5546,9 @@ }, "node_modules/npm-run-path": { "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "path-key": "^3.0.0" @@ -9569,113 +5559,26 @@ }, "node_modules/object-assign": { "version": "4.1.1", - "license": "MIT", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", "engines": { "node": ">=0.10.0" } }, - "node_modules/object-inspect": { - "version": "1.12.2", - "dev": true, - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/object-keys": { - "version": "1.1.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object.assign": { - "version": "4.1.4", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "has-symbols": "^1.0.3", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/object.entries": { - "version": "1.1.5", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.1" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object.fromentries": { - "version": "2.0.5", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/object.hasown": { - "version": "1.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "define-properties": "^1.1.4", - "es-abstract": "^1.19.5" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/object.values": { - "version": "1.1.5", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/once": { "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", "dev": true, - "license": "ISC", "dependencies": { "wrappy": "1" } }, "node_modules/onetime": { "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "mimic-fn": "^2.1.0" @@ -9706,8 +5609,9 @@ }, "node_modules/p-limit": { "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dev": true, - "license": "MIT", "dependencies": { "yocto-queue": "^0.1.0" }, @@ -9720,8 +5624,9 @@ }, "node_modules/p-locate": { "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", "dev": true, - "license": "MIT", "dependencies": { "p-limit": "^3.0.2" }, @@ -9734,8 +5639,9 @@ }, "node_modules/p-try": { "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=6" @@ -9743,8 +5649,9 @@ }, "node_modules/parent-module": { "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", "dev": true, - "license": "MIT", "dependencies": { "callsites": "^3.0.0" }, @@ -9754,8 +5661,10 @@ }, "node_modules/parse-json": { "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", "dev": true, - "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.0.0", "error-ex": "^1.3.1", @@ -9771,50 +5680,57 @@ }, "node_modules/path-exists": { "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/path-is-absolute": { "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", "dev": true, - "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/path-key": { "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/path-parse": { "version": "1.0.7", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true }, "node_modules/path-type": { "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/picocolors": { "version": "1.0.0", - "dev": true, - "license": "ISC" + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "dev": true }, "node_modules/picomatch": { "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", "dev": true, - "license": "MIT", "engines": { "node": ">=8.6" }, @@ -9823,9 +5739,10 @@ } }, "node_modules/pirates": { - "version": "4.0.5", + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", + "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">= 6" @@ -9833,8 +5750,9 @@ }, "node_modules/pkg-dir": { "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "find-up": "^4.0.0" @@ -9845,8 +5763,9 @@ }, "node_modules/pkg-dir/node_modules/find-up": { "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "locate-path": "^5.0.0", @@ -9858,8 +5777,9 @@ }, "node_modules/pkg-dir/node_modules/locate-path": { "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "p-locate": "^4.1.0" @@ -9870,8 +5790,9 @@ }, "node_modules/pkg-dir/node_modules/p-limit": { "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "p-try": "^2.0.0" @@ -9885,8 +5806,9 @@ }, "node_modules/pkg-dir/node_modules/p-locate": { "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "p-limit": "^2.2.0" @@ -9924,9 +5846,10 @@ } }, "node_modules/postcss-js": { - "version": "4.0.0", + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz", + "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==", "dev": true, - "license": "MIT", "dependencies": { "camelcase-css": "^2.0.1" }, @@ -9938,7 +5861,7 @@ "url": "https://opencollective.com/postcss/" }, "peerDependencies": { - "postcss": "^8.3.3" + "postcss": "^8.4.21" } }, "node_modules/postcss-mixins": { @@ -9963,20 +5886,7 @@ "postcss": "^8.2.14" } }, - "node_modules/postcss-preset-mantine": { - "version": "1.13.0", - "resolved": "https://registry.npmjs.org/postcss-preset-mantine/-/postcss-preset-mantine-1.13.0.tgz", - "integrity": "sha512-1bv/mQz2K+/FixIMxYd83BYH7PusDZaI7LpUtKbb1l/5N5w6t1p/V9ONHfRJeeAZyfa6Xc+AtR+95VKdFXRH1g==", - "dev": true, - "dependencies": { - "postcss-mixins": "^9.0.4", - "postcss-nested": "^6.0.1" - }, - "peerDependencies": { - "postcss": ">=8.0.0" - } - }, - "node_modules/postcss-preset-mantine/node_modules/postcss-nested": { + "node_modules/postcss-nested": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.1.tgz", "integrity": "sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==", @@ -9995,6 +5905,19 @@ "postcss": "^8.2.14" } }, + "node_modules/postcss-preset-mantine": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/postcss-preset-mantine/-/postcss-preset-mantine-1.13.0.tgz", + "integrity": "sha512-1bv/mQz2K+/FixIMxYd83BYH7PusDZaI7LpUtKbb1l/5N5w6t1p/V9ONHfRJeeAZyfa6Xc+AtR+95VKdFXRH1g==", + "dev": true, + "dependencies": { + "postcss-mixins": "^9.0.4", + "postcss-nested": "^6.0.1" + }, + "peerDependencies": { + "postcss": ">=8.0.0" + } + }, "node_modules/postcss-selector-parser": { "version": "6.0.15", "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.15.tgz", @@ -10033,52 +5956,25 @@ "node": ">= 0.8.0" } }, - "node_modules/prettier": { - "version": "2.8.8", - "dev": true, - "license": "MIT", - "bin": { - "prettier": "bin-prettier.js" - }, - "engines": { - "node": ">=10.13.0" - }, - "funding": { - "url": "https://github.com/prettier/prettier?sponsor=1" - } - }, - "node_modules/prettier-linter-helpers": { - "version": "1.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-diff": "^1.1.2" - }, - "engines": { - "node": ">=6.0.0" - } - }, "node_modules/pretty-format": { - "version": "27.5.1", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", "dev": true, - "license": "MIT", - "optional": true, - "peer": true, "dependencies": { - "ansi-regex": "^5.0.1", + "@jest/schemas": "^29.6.3", "ansi-styles": "^5.0.0", - "react-is": "^17.0.1" + "react-is": "^18.0.0" }, "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/pretty-format/node_modules/ansi-styles": { "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, - "license": "MIT", - "optional": true, - "peer": true, "engines": { "node": ">=10" }, @@ -10086,15 +5982,17 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "node_modules/promise-polyfill": { - "version": "8.2.3", - "dev": true, - "license": "MIT" + "node_modules/pretty-format/node_modules/react-is": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", + "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==", + "dev": true }, "node_modules/prompts": { "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "kleur": "^3.0.3", @@ -10106,35 +6004,53 @@ }, "node_modules/prop-types": { "version": "15.8.1", - "license": "MIT", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", "dependencies": { "loose-envify": "^1.4.0", "object-assign": "^4.1.1", "react-is": "^16.13.1" } }, - "node_modules/prop-types/node_modules/react-is": { - "version": "16.13.1", - "license": "MIT" - }, "node_modules/propagate": { "version": "2.0.1", + "resolved": "https://registry.npmjs.org/propagate/-/propagate-2.0.1.tgz", + "integrity": "sha512-vGrhOavPSTz4QVNuBNdcNXePNdNMaO1xj9yBeH1ScQPjk/rhg9sSlCXPhMkFuaNNW/syTvYqsnbIJxMBfRbbag==", "dev": true, - "license": "MIT", "engines": { "node": ">= 8" } }, "node_modules/punycode": { - "version": "2.1.1", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", "dev": true, - "license": "MIT", "engines": { "node": ">=6" } }, + "node_modules/pure-rand": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.0.4.tgz", + "integrity": "sha512-LA0Y9kxMYv47GIPJy6MI84fqTd2HmYZI83W/kM/SkKfDlajnZYfmXFTxkbY+xSBPkLJxltMa9hIkmdc29eguMA==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ], + "peer": true + }, "node_modules/queue-microtask": { "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", "dev": true, "funding": [ { @@ -10149,40 +6065,35 @@ "type": "consulting", "url": "https://feross.org/support" } - ], - "license": "MIT" + ] }, "node_modules/react": { - "version": "17.0.2", - "license": "MIT", - "peer": true, + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", + "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", "dependencies": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1" + "loose-envify": "^1.1.0" }, "engines": { "node": ">=0.10.0" } }, "node_modules/react-dom": { - "version": "17.0.2", - "license": "MIT", - "peer": true, + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", + "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", "dependencies": { "loose-envify": "^1.1.0", - "object-assign": "^4.1.1", - "scheduler": "^0.20.2" + "scheduler": "^0.23.0" }, "peerDependencies": { - "react": "17.0.2" + "react": "^18.2.0" } }, "node_modules/react-is": { - "version": "17.0.2", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" }, "node_modules/react-number-format": { "version": "5.3.1", @@ -10196,6 +6107,15 @@ "react-dom": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0" } }, + "node_modules/react-refresh": { + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.14.0.tgz", + "integrity": "sha512-wViHqhAd8OHeLS/IRMJjTSDHF3U9eWi62F/MledQGPdJGDhodXJ9PBLNGr6WWL7qlH12Mt3TyTpbS+hGXMjCzQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/react-remove-scroll": { "version": "2.5.7", "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.7.tgz", @@ -10241,6 +6161,36 @@ } } }, + "node_modules/react-router": { + "version": "6.22.1", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.22.1.tgz", + "integrity": "sha512-0pdoRGwLtemnJqn1K0XHUbnKiX0S4X8CgvVVmHGOWmofESj31msHo/1YiqcJWK7Wxfq2a4uvvtS01KAQyWK/CQ==", + "dependencies": { + "@remix-run/router": "1.15.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": ">=16.8" + } + }, + "node_modules/react-router-dom": { + "version": "6.22.1", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.22.1.tgz", + "integrity": "sha512-iwMyyyrbL7zkKY7MRjOVRy+TMnS/OPusaFVxM2P11x9dzSzGmLsebkCvYirGq0DWB9K9hOspHYYtDz33gE5Duw==", + "dependencies": { + "@remix-run/router": "1.15.1", + "react-router": "6.22.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": ">=16.8", + "react-dom": ">=16.8" + } + }, "node_modules/react-style-singleton": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.1.tgz", @@ -10279,129 +6229,28 @@ "react": "^16.8.0 || ^17.0.0 || ^18.0.0" } }, - "node_modules/readdirp": { - "version": "3.6.0", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "picomatch": "^2.2.1" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/regenerate": { - "version": "1.4.2", - "dev": true, - "license": "MIT" - }, - "node_modules/regenerate-unicode-properties": { - "version": "10.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "regenerate": "^1.4.2" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/regenerator-runtime": { - "version": "0.13.9", - "dev": true, - "license": "MIT" - }, - "node_modules/regenerator-transform": { - "version": "0.15.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/runtime": "^7.8.4" - } - }, - "node_modules/regexp.prototype.flags": { - "version": "1.4.3", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "functions-have-names": "^1.2.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/regexpp": { - "version": "3.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/mysticatea" - } - }, - "node_modules/regexpu-core": { - "version": "5.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "regenerate": "^1.4.2", - "regenerate-unicode-properties": "^10.1.0", - "regjsgen": "^0.7.1", - "regjsparser": "^0.9.1", - "unicode-match-property-ecmascript": "^2.0.0", - "unicode-match-property-value-ecmascript": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/regjsgen": { - "version": "0.7.1", - "dev": true, - "license": "MIT" - }, - "node_modules/regjsparser": { - "version": "0.9.1", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "jsesc": "~0.5.0" - }, - "bin": { - "regjsparser": "bin/parser" - } - }, - "node_modules/regjsparser/node_modules/jsesc": { - "version": "0.5.0", - "dev": true, - "bin": { - "jsesc": "bin/jsesc" - } + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" }, "node_modules/require-directory": { "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=0.10.0" } }, "node_modules/resolve": { - "version": "1.22.1", + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", "dev": true, - "license": "MIT", "dependencies": { - "is-core-module": "^2.9.0", + "is-core-module": "^2.13.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, @@ -10414,8 +6263,9 @@ }, "node_modules/resolve-cwd": { "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "resolve-from": "^5.0.0" @@ -10426,8 +6276,9 @@ }, "node_modules/resolve-cwd/node_modules/resolve-from": { "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -10435,16 +6286,18 @@ }, "node_modules/resolve-from": { "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", "dev": true, - "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/resolve.exports": { - "version": "1.1.0", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.2.tgz", + "integrity": "sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=10" @@ -10452,8 +6305,9 @@ }, "node_modules/reusify": { "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", "dev": true, - "license": "MIT", "engines": { "iojs": ">=1.0.0", "node": ">=0.10.0" @@ -10461,8 +6315,9 @@ }, "node_modules/rimraf": { "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "dev": true, - "license": "ISC", "dependencies": { "glob": "^7.1.3" }, @@ -10507,6 +6362,8 @@ }, "node_modules/run-parallel": { "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", "dev": true, "funding": [ { @@ -10522,69 +6379,38 @@ "url": "https://feross.org/support" } ], - "license": "MIT", "dependencies": { "queue-microtask": "^1.2.2" } }, - "node_modules/safe-buffer": { - "version": "5.1.2", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/safe-regex-test": { - "version": "1.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.3", - "is-regex": "^1.1.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/sass": { - "version": "1.69.5", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true, - "dependencies": { - "chokidar": ">=3.0.0 <4.0.0", - "immutable": "^4.0.0", - "source-map-js": ">=0.6.2 <2.0.0" - }, - "bin": { - "sass": "sass.js" - }, - "engines": { - "node": ">=14.0.0" - } - }, "node_modules/scheduler": { - "version": "0.20.2", - "license": "MIT", - "peer": true, + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", + "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", "dependencies": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1" + "loose-envify": "^1.1.0" } }, "node_modules/semver": { - "version": "6.3.1", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dev": true, - "license": "ISC", + "dependencies": { + "lru-cache": "^6.0.0" + }, "bin": { "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" } }, "node_modules/shebang-command": { "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", "dev": true, - "license": "MIT", "dependencies": { "shebang-regex": "^3.0.0" }, @@ -10594,49 +6420,41 @@ }, "node_modules/shebang-regex": { "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } }, - "node_modules/side-channel": { - "version": "1.0.4", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/signal-exit": { "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", "dev": true, - "license": "ISC", "peer": true }, "node_modules/sisteransi": { "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", "dev": true, - "license": "MIT", "peer": true }, "node_modules/slash": { "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/source-map": { "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", "dev": true, - "license": "BSD-3-Clause", "peer": true, "engines": { "node": ">=0.10.0" @@ -10644,17 +6462,18 @@ }, "node_modules/source-map-js": { "version": "1.0.2", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", + "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", "dev": true, - "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/source-map-support": { - "version": "0.5.21", + "version": "0.5.13", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", + "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", "dev": true, - "license": "MIT", - "optional": true, "peer": true, "dependencies": { "buffer-from": "^1.0.0", @@ -10663,14 +6482,16 @@ }, "node_modules/sprintf-js": { "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", "dev": true, - "license": "BSD-3-Clause", "peer": true }, "node_modules/stack-utils": { - "version": "2.0.5", + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", "dev": true, - "license": "MIT", "dependencies": { "escape-string-regexp": "^2.0.0" }, @@ -10680,16 +6501,18 @@ }, "node_modules/stack-utils/node_modules/escape-string-regexp": { "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/string-length": { "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "char-regex": "^1.0.2", @@ -10699,15 +6522,11 @@ "node": ">=10" } }, - "node_modules/string-natural-compare": { - "version": "3.0.1", - "dev": true, - "license": "MIT" - }, "node_modules/string-width": { "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "emoji-regex": "^8.0.0", @@ -10718,60 +6537,11 @@ "node": ">=8" } }, - "node_modules/string-width/node_modules/emoji-regex": { - "version": "8.0.0", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/string.prototype.matchall": { - "version": "4.0.7", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.1", - "get-intrinsic": "^1.1.1", - "has-symbols": "^1.0.3", - "internal-slot": "^1.0.3", - "regexp.prototype.flags": "^1.4.1", - "side-channel": "^1.0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/string.prototype.trimend": { - "version": "1.0.5", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.19.5" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/string.prototype.trimstart": { - "version": "1.0.5", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.19.5" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/strip-ansi": { "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "dev": true, - "license": "MIT", "dependencies": { "ansi-regex": "^5.0.1" }, @@ -10781,8 +6551,9 @@ }, "node_modules/strip-bom": { "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -10790,8 +6561,9 @@ }, "node_modules/strip-final-newline": { "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=6" @@ -10799,8 +6571,9 @@ }, "node_modules/strip-json-comments": { "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", "dev": true, - "license": "MIT", "engines": { "node": ">=8" }, @@ -10810,7 +6583,8 @@ }, "node_modules/style-mod": { "version": "4.1.0", - "license": "MIT" + "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.0.tgz", + "integrity": "sha512-Ca5ib8HrFn+f+0n4N4ScTIA9iTOQ7MaGS1ylHcoVqW9J7w2w8PzN6g9gKmTYgGEBH8e120+RCmhpje6jC5uGWA==" }, "node_modules/sugarss": { "version": "4.0.1", @@ -10830,8 +6604,9 @@ }, "node_modules/supports-color": { "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, - "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, @@ -10841,8 +6616,9 @@ }, "node_modules/supports-preserve-symlinks-flag": { "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", "dev": true, - "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -10855,36 +6631,11 @@ "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz", "integrity": "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==" }, - "node_modules/terser": { - "version": "5.20.0", - "dev": true, - "license": "BSD-2-Clause", - "optional": true, - "peer": true, - "dependencies": { - "@jridgewell/source-map": "^0.3.3", - "acorn": "^8.8.2", - "commander": "^2.20.0", - "source-map-support": "~0.5.20" - }, - "bin": { - "terser": "bin/terser" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/terser/node_modules/commander": { - "version": "2.20.3", - "dev": true, - "license": "MIT", - "optional": true, - "peer": true - }, "node_modules/test-exclude": { "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", "dev": true, - "license": "ISC", "peer": true, "dependencies": { "@istanbuljs/schema": "^0.1.2", @@ -10895,29 +6646,57 @@ "node": ">=8" } }, + "node_modules/test-exclude/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "peer": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/test-exclude/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "peer": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, "node_modules/text-table": { "version": "0.2.0", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true }, "node_modules/tmpl": { "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", "dev": true, - "license": "BSD-3-Clause", "peer": true }, "node_modules/to-fast-properties": { "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", "dev": true, - "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/to-regex-range": { "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "dev": true, - "license": "MIT", "dependencies": { "is-number": "^7.0.0" }, @@ -10925,6 +6704,12 @@ "node": ">=8.0" } }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + "dev": true + }, "node_modules/ts-api-utils": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.2.1.tgz", @@ -10938,9 +6723,10 @@ } }, "node_modules/ts-jest": { - "version": "29.1.1", + "version": "29.1.2", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz", + "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==", "dev": true, - "license": "MIT", "dependencies": { "bs-logger": "0.x", "fast-json-stable-stringify": "2.x", @@ -10955,7 +6741,7 @@ "ts-jest": "cli.js" }, "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + "node": "^16.10.0 || ^18.0.0 || >=20.0.0" }, "peerDependencies": { "@babel/core": ">=7.0.0-beta.0 <8", @@ -10979,88 +6765,10 @@ } } }, - "node_modules/ts-jest/node_modules/jest-util": { - "version": "29.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jest/types": "^29.2.1", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } - }, - "node_modules/ts-jest/node_modules/semver": { - "version": "7.5.4", - "dev": true, - "license": "ISC", - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/tsconfig-paths": { - "version": "3.14.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/json5": "^0.0.29", - "json5": "^1.0.1", - "minimist": "^1.2.6", - "strip-bom": "^3.0.0" - } - }, - "node_modules/tsconfig-paths/node_modules/json5": { - "version": "1.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "minimist": "^1.2.0" - }, - "bin": { - "json5": "lib/cli.js" - } - }, - "node_modules/tsconfig-paths/node_modules/strip-bom": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4" - } - }, "node_modules/tslib": { - "version": "2.4.0", - "license": "0BSD" - }, - "node_modules/tsutils": { - "version": "3.21.0", - "dev": true, - "license": "MIT", - "dependencies": { - "tslib": "^1.8.1" - }, - "engines": { - "node": ">= 6" - }, - "peerDependencies": { - "typescript": ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta" - } - }, - "node_modules/tsutils/node_modules/tslib": { - "version": "1.14.1", - "dev": true, - "license": "0BSD" + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" }, "node_modules/type-check": { "version": "0.4.0", @@ -11076,8 +6784,9 @@ }, "node_modules/type-detect": { "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=4" @@ -11085,8 +6794,9 @@ }, "node_modules/type-fest": { "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", "dev": true, - "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=10" }, @@ -11107,60 +6817,11 @@ "node": ">=14.17" } }, - "node_modules/unbox-primitive": { - "version": "1.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.2", - "has-bigints": "^1.0.2", - "has-symbols": "^1.0.3", - "which-boxed-primitive": "^1.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/undici-types": { "version": "5.26.5", - "dev": true, - "license": "MIT" - }, - "node_modules/unicode-canonical-property-names-ecmascript": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-ecmascript": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "unicode-canonical-property-names-ecmascript": "^2.0.0", - "unicode-property-aliases-ecmascript": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-value-ecmascript": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-property-aliases-ecmascript": { - "version": "2.1.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4" - } + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "dev": true }, "node_modules/update-browserslist-db": { "version": "1.0.13", @@ -11194,8 +6855,9 @@ }, "node_modules/uri-js": { "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { "punycode": "^2.1.0" } @@ -11280,27 +6942,29 @@ }, "node_modules/util-deprecate": { "version": "1.0.2", - "dev": true, - "license": "MIT" + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true }, "node_modules/v8-to-istanbul": { - "version": "9.0.1", + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.2.0.tgz", + "integrity": "sha512-/EH/sDgxU2eGxajKdwLCDmQ4FWq+kpi3uCmBGpw1xJtnAxEjlD8j8PEiGWpCIMIs3ciNAgH0d3TTJiUkYzyZjA==", "dev": true, - "license": "ISC", "peer": true, "dependencies": { "@jridgewell/trace-mapping": "^0.3.12", "@types/istanbul-lib-coverage": "^2.0.1", - "convert-source-map": "^1.6.0" + "convert-source-map": "^2.0.0" }, "engines": { "node": ">=10.12.0" } }, "node_modules/vite": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.1.3.tgz", - "integrity": "sha512-UfmUD36DKkqhi/F75RrxvPpry+9+tTkrXfMNZD+SboZqBCMsxKtO52XeGzzuh7ioz+Eo/SYDBbdb0Z7vgcDJew==", + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.1.4.tgz", + "integrity": "sha512-n+MPqzq+d9nMVTKyewqw6kSt+R3CkvF9QAKY8obiQn8g1fwTscKxyfaYnC632HtBXAQGc1Yjomphwn1dtwGAHg==", "dev": true, "dependencies": { "esbuild": "^0.19.3", @@ -11353,27 +7017,47 @@ } }, "node_modules/w3c-keyname": { - "version": "2.2.6", - "license": "MIT" + "version": "2.2.8", + "resolved": "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz", + "integrity": "sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==" }, "node_modules/walker": { "version": "1.0.8", + "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", "dev": true, - "license": "Apache-2.0", "peer": true, "dependencies": { "makeerror": "1.0.12" } }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + "dev": true + }, "node_modules/whatwg-fetch": { - "version": "3.6.2", + "version": "3.6.20", + "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz", + "integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==", + "dev": true + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", "dev": true, - "license": "MIT" + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } }, "node_modules/which": { "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "dev": true, - "license": "ISC", "dependencies": { "isexe": "^2.0.0" }, @@ -11384,25 +7068,11 @@ "node": ">= 8" } }, - "node_modules/which-boxed-primitive": { - "version": "1.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "is-bigint": "^1.0.1", - "is-boolean-object": "^1.1.0", - "is-number-object": "^1.0.4", - "is-string": "^1.0.5", - "is-symbol": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/wrap-ansi": { "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "ansi-styles": "^4.0.0", @@ -11418,13 +7088,15 @@ }, "node_modules/wrappy": { "version": "1.0.2", - "dev": true, - "license": "ISC" + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true }, "node_modules/write-file-atomic": { "version": "4.0.2", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", "dev": true, - "license": "ISC", "peer": true, "dependencies": { "imurmurhash": "^0.1.4", @@ -11436,8 +7108,9 @@ }, "node_modules/y18n": { "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", "dev": true, - "license": "ISC", "peer": true, "engines": { "node": ">=10" @@ -11445,21 +7118,15 @@ }, "node_modules/yallist": { "version": "4.0.0", - "dev": true, - "license": "ISC" - }, - "node_modules/yaml": { - "version": "1.10.2", - "dev": true, - "license": "ISC", - "engines": { - "node": ">= 6" - } + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true }, "node_modules/yargs": { - "version": "17.6.2", + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "cliui": "^8.0.1", @@ -11476,87 +7143,24 @@ }, "node_modules/yargs-parser": { "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", "dev": true, - "license": "ISC", "engines": { "node": ">=12" } }, "node_modules/yocto-queue": { "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", "dev": true, - "license": "MIT", "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } - }, - "react-app": { - "name": "@prometheus-io/app", - "version": "0.49.1", - "extraneous": true, - "dependencies": { - "@codemirror/autocomplete": "^6.11.1", - "@codemirror/commands": "^6.3.2", - "@codemirror/language": "^6.9.3", - "@codemirror/lint": "^6.4.2", - "@codemirror/search": "^6.5.5", - "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.22.1", - "@forevolve/bootstrap-dark": "^2.1.1", - "@fortawesome/fontawesome-svg-core": "6.5.1", - "@fortawesome/free-solid-svg-icons": "6.5.1", - "@fortawesome/react-fontawesome": "0.2.0", - "@lezer/common": "^1.1.1", - "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.3.14", - "@nexucis/fuzzy": "^0.4.1", - "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.49.1", - "bootstrap": "^4.6.2", - "css.escape": "^1.5.1", - "downshift": "^7.6.2", - "http-proxy-middleware": "^2.0.6", - "jquery": "^3.7.1", - "jquery.flot.tooltip": "^0.9.0", - "moment": "^2.29.4", - "moment-timezone": "^0.5.43", - "popper.js": "^1.14.3", - "react": "^17.0.2", - "react-copy-to-clipboard": "^5.1.0", - "react-dom": "^17.0.2", - "react-infinite-scroll-component": "^6.1.0", - "react-resize-detector": "^7.1.2", - "react-router-dom": "^5.3.4", - "react-test-renderer": "^17.0.2", - "reactstrap": "^8.10.1", - "sanitize-html": "^2.11.0", - "sass": "1.69.5", - "tempusdominus-bootstrap-4": "^5.39.2", - "tempusdominus-core": "^5.19.3" - }, - "devDependencies": { - "@testing-library/react-hooks": "^7.0.2", - "@types/enzyme": "^3.10.18", - "@types/flot": "0.0.36", - "@types/jquery": "^3.5.29", - "@types/react": "^17.0.71", - "@types/react-copy-to-clipboard": "^5.0.7", - "@types/react-dom": "^17.0.25", - "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.9.5", - "@types/sinon": "^10.0.20", - "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", - "enzyme": "^3.11.0", - "enzyme-to-json": "^3.6.2", - "mutationobserver-shim": "^0.3.7", - "sinon": "^14.0.2" - }, - "optionalDependencies": { - "fsevents": "^2.3.3" - } } } } diff --git a/web/ui/package.json b/web/ui/package.json index 10307bc807..dc230118c3 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,10 +1,12 @@ { "name": "prometheus-io", + "description": "Monorepo for the Prometheus UI", + "version": "0.49.1", "private": true, "scripts": { - "build": "GENERATE_SOURCEMAP=false bash build_ui.sh --all", + "build": "bash build_ui.sh --all", "build:module": "bash build_ui.sh --build-module", - "start": "npm run start -w mantine-ui", + "start": "npm run start -w app", "test": "npm run test --workspaces", "lint": "npm run lint --workspaces" }, @@ -12,22 +14,12 @@ "mantine-ui", "module/*" ], - "engines": { - "npm": ">=7.0.0" - }, "devDependencies": { - "@rollup/plugin-node-resolve": "^15.2.3", - "@types/jest": "^29.5.11", - "@types/node": "^20.10.4", - "eslint-config-prettier": "^8.10.0", - "eslint-config-react-app": "^7.0.1", - "eslint-plugin-prettier": "^4.2.1", - "jest-canvas-mock": "^2.5.2", - "jest-fetch-mock": "^3.0.3", - "prettier": "^2.8.8", - "rollup": "^4.12.0", - "ts-jest": "^29.1.1", - "typescript": "^5.3.3" - }, - "version": "0.49.1" + "@types/jest": "^29.5.12", + "@typescript-eslint/eslint-plugin": "^6.21.0", + "@typescript-eslint/parser": "^6.21.0", + "ts-jest": "^29.1.2", + "typescript": "^5.2.2", + "vite": "^5.1.0" + } } diff --git a/web/ui/react-app/postcss.config.cjs b/web/ui/react-app/postcss.config.cjs new file mode 100644 index 0000000000..bfba0ddfae --- /dev/null +++ b/web/ui/react-app/postcss.config.cjs @@ -0,0 +1,14 @@ +module.exports = { + plugins: { + 'postcss-preset-mantine': {}, + 'postcss-simple-vars': { + variables: { + 'mantine-breakpoint-xs': '36em', + 'mantine-breakpoint-sm': '48em', + 'mantine-breakpoint-md': '62em', + 'mantine-breakpoint-lg': '75em', + 'mantine-breakpoint-xl': '88em', + }, + }, + }, +}; diff --git a/web/ui/react-app/src/images/prometheus_logo_grey.svg.orig b/web/ui/react-app/src/images/prometheus_logo_grey.svg.orig new file mode 100644 index 0000000000..b914095ecf --- /dev/null +++ b/web/ui/react-app/src/images/prometheus_logo_grey.svg.orig @@ -0,0 +1,19 @@ + + + + + + + + + + diff --git a/web/ui/react-app/src/images/prometheus_logo_grey.svg.rej b/web/ui/react-app/src/images/prometheus_logo_grey.svg.rej new file mode 100644 index 0000000000..957b7dd1ef --- /dev/null +++ b/web/ui/react-app/src/images/prometheus_logo_grey.svg.rej @@ -0,0 +1,11 @@ +--- web/ui/react-app/src/images/prometheus_logo_grey.svg ++++ web/ui/react-app/src/images/prometheus_logo_grey.svg +@@ -6,14 +6,8 @@ + + + + + + + + diff --git a/web/ui/react-app/src/pages/graph/GraphHistogramHelpers.ts b/web/ui/react-app/src/pages/graph/GraphHistogramHelpers.ts new file mode 100644 index 0000000000..4481f8f965 --- /dev/null +++ b/web/ui/react-app/src/pages/graph/GraphHistogramHelpers.ts @@ -0,0 +1,62 @@ +/** + * Inspired by a similar feature in VictoriaMetrics. + * See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3384 for more details. + * Developed by VictoriaMetrics team. + */ + +import { GraphExemplar, GraphProps, GraphSeries } from './Graph'; + +export function isHistogramData(data: GraphProps['data']) { + if (!data?.result?.length) return false; + const result = data.result; + if (result.length < 2) return false; + const histogramLabels = ['le']; + + const firstLabels = Object.keys(result[0].metric).filter((n) => !histogramLabels.includes(n)); + const isHistogram = result.every((r) => { + const labels = Object.keys(r.metric).filter((n) => !histogramLabels.includes(n)); + return firstLabels.length === labels.length && labels.every((l) => r.metric[l] === result[0].metric[l]); + }); + + return isHistogram && result.every((r) => histogramLabels.some((l) => l in r.metric)); +} + +export function prepareHistogramData(buckets: GraphSeries[]) { + if (!buckets.every((a) => a.labels.le)) return buckets; + + const sortedBuckets = buckets.sort((a, b) => promValueToNumber(a.labels.le) - promValueToNumber(b.labels.le)); + const result: GraphSeries[] = []; + + for (let i = 0; i < sortedBuckets.length; i++) { + const values = []; + const { data, labels, color } = sortedBuckets[i]; + + for (const [timestamp, value] of data) { + const prevVal = sortedBuckets[i - 1]?.data.find((v) => v[0] === timestamp)?.[1] || 0; + const newVal = Number(value) - +prevVal; + values.push([Number(timestamp), newVal]); + } + + result.push({ + data: values, + labels, + color, + index: i, + }); + } + return result; +} + +export function promValueToNumber(s: string) { + switch (s) { + case 'NaN': + return NaN; + case 'Inf': + case '+Inf': + return Infinity; + case '-Inf': + return -Infinity; + default: + return parseFloat(s); + } +} From ce1ca6481e09cf405fda71d6073ab53bfd81b181 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 23 Feb 2024 14:19:46 +0100 Subject: [PATCH 0011/1397] Better navbar icon sizes Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/App.tsx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index 4c7732bb53..04be6d57c6 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -136,7 +136,7 @@ function App() { component={NavLink} to="/graph" className={classes.link} - leftSection={} + leftSection={} > Graph @@ -144,7 +144,7 @@ function App() { component={NavLink} to="/alerts" className={classes.link} - leftSection={} + leftSection={} > Alerts @@ -162,9 +162,9 @@ function App() { to={p.path} className={classes.link} leftSection={p.icon} - rightSection={} + rightSection={} > - Status {p.title} + Status {p.title} } @@ -178,8 +178,8 @@ function App() { component={NavLink} to="/" className={classes.link} - leftSection={} - rightSection={} + leftSection={} + rightSection={} onClick={(e) => { e.preventDefault(); }} @@ -223,7 +223,7 @@ function App() { component="a" href="https://prometheus.io/docs/prometheus/latest/getting_started/" className={classes.link} - leftSection={} + leftSection={} target="_blank" > Help From 65cc7b058e311ffebf00661e2399ac70adb1a42c Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 23 Feb 2024 14:20:09 +0100 Subject: [PATCH 0012/1397] Remove index.css from initial default app Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/index.css | 68 --------------------------------- web/ui/mantine-ui/src/main.tsx | 1 - 2 files changed, 69 deletions(-) delete mode 100644 web/ui/mantine-ui/src/index.css diff --git a/web/ui/mantine-ui/src/index.css b/web/ui/mantine-ui/src/index.css deleted file mode 100644 index 6119ad9a8f..0000000000 --- a/web/ui/mantine-ui/src/index.css +++ /dev/null @@ -1,68 +0,0 @@ -:root { - font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif; - line-height: 1.5; - font-weight: 400; - - color-scheme: light dark; - color: rgba(255, 255, 255, 0.87); - background-color: #242424; - - font-synthesis: none; - text-rendering: optimizeLegibility; - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; -} - -a { - font-weight: 500; - color: #646cff; - text-decoration: inherit; -} -a:hover { - color: #535bf2; -} - -body { - margin: 0; - display: flex; - place-items: center; - min-width: 320px; - min-height: 100vh; -} - -h1 { - font-size: 3.2em; - line-height: 1.1; -} - -button { - border-radius: 8px; - border: 1px solid transparent; - padding: 0.6em 1.2em; - font-size: 1em; - font-weight: 500; - font-family: inherit; - background-color: #1a1a1a; - cursor: pointer; - transition: border-color 0.25s; -} -button:hover { - border-color: #646cff; -} -button:focus, -button:focus-visible { - outline: 4px auto -webkit-focus-ring-color; -} - -@media (prefers-color-scheme: light) { - :root { - color: #213547; - background-color: #ffffff; - } - a:hover { - color: #747bff; - } - button { - background-color: #f9f9f9; - } -} diff --git a/web/ui/mantine-ui/src/main.tsx b/web/ui/mantine-ui/src/main.tsx index 8e3f4e82d6..66054d5ea8 100644 --- a/web/ui/mantine-ui/src/main.tsx +++ b/web/ui/mantine-ui/src/main.tsx @@ -1,7 +1,6 @@ import React from "react"; import ReactDOM from "react-dom/client"; import App from "./App.tsx"; -import "./index.css"; import { Settings, SettingsContext } from "./settings.ts"; // Declared/defined in public/index.html, value replaced by Prometheus when serving bundle. From 128b6461e9d9a823d4cdccbeddf986953f82657d Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 23 Feb 2024 17:37:56 +0100 Subject: [PATCH 0013/1397] Start working on /alerts page, factor out rule definition display Signed-off-by: Julius Volz --- .../src/api/response-types/rules.ts | 8 + web/ui/mantine-ui/src/badge.module.css | 49 +++++ web/ui/mantine-ui/src/codebox.module.css | 39 ---- web/ui/mantine-ui/src/lib/time-format.ts | 10 +- web/ui/mantine-ui/src/pages/alerts.tsx | 186 +++++++++++++++++- web/ui/mantine-ui/src/pages/rules.tsx | 135 ++----------- web/ui/mantine-ui/src/rule-definition.tsx | 105 ++++++++++ 7 files changed, 372 insertions(+), 160 deletions(-) create mode 100644 web/ui/mantine-ui/src/badge.module.css create mode 100644 web/ui/mantine-ui/src/rule-definition.tsx diff --git a/web/ui/mantine-ui/src/api/response-types/rules.ts b/web/ui/mantine-ui/src/api/response-types/rules.ts index eba15a6169..63d63d3d4b 100644 --- a/web/ui/mantine-ui/src/api/response-types/rules.ts +++ b/web/ui/mantine-ui/src/api/response-types/rules.ts @@ -46,6 +46,14 @@ interface RuleGroup { lastEvaluation: string; } +type AlertingRuleGroup = Omit & { + rules: AlertingRule[]; +}; + export interface RulesMap { groups: RuleGroup[]; } + +export interface AlertingRulesMap { + groups: AlertingRuleGroup[]; +} diff --git a/web/ui/mantine-ui/src/badge.module.css b/web/ui/mantine-ui/src/badge.module.css new file mode 100644 index 0000000000..80eb3565d2 --- /dev/null +++ b/web/ui/mantine-ui/src/badge.module.css @@ -0,0 +1,49 @@ +.statsBadge { + background-color: light-dark( + var(--mantine-color-gray-1), + var(--mantine-color-gray-9) + ); + color: light-dark(var(--mantine-color-gray-7), var(--mantine-color-gray-5)); +} + +.labelBadge { + background-color: light-dark( + var(--mantine-color-gray-1), + var(--mantine-color-gray-9) + ); + color: light-dark(var(--mantine-color-gray-7), var(--mantine-color-gray-5)); +} + +.healthOk { + background-color: light-dark( + var(--mantine-color-green-1), + var(--mantine-color-green-9) + ); + color: light-dark(var(--mantine-color-green-9), var(--mantine-color-green-1)); +} + +.healthErr { + background-color: light-dark( + var(--mantine-color-red-1), + darken(var(--mantine-color-red-9), 0.25) + ); + color: light-dark(var(--mantine-color-red-9), var(--mantine-color-red-1)); +} + +.healthWarn { + background-color: light-dark( + var(--mantine-color-yellow-1), + var(--mantine-color-yellow-9) + ); + color: light-dark( + var(--mantine-color-yellow-9), + var(--mantine-color-yellow-1) + ); +} + +.healthUnknown { + background-color: light-dark( + var(--mantine-color-gray-1), + var(--mantine-color-gray-9) + ); +} diff --git a/web/ui/mantine-ui/src/codebox.module.css b/web/ui/mantine-ui/src/codebox.module.css index 23387c7764..d54a7d1b4f 100644 --- a/web/ui/mantine-ui/src/codebox.module.css +++ b/web/ui/mantine-ui/src/codebox.module.css @@ -4,42 +4,3 @@ var(--mantine-color-gray-9) ); } - -.statsBadge { - background-color: light-dark( - var(--mantine-color-gray-1), - var(--mantine-color-gray-9) - ); - color: light-dark(var(--mantine-color-gray-7), var(--mantine-color-gray-5)); -} - -.labelBadge { - background-color: light-dark( - var(--mantine-color-gray-1), - var(--mantine-color-gray-9) - ); - color: light-dark(var(--mantine-color-gray-7), var(--mantine-color-gray-5)); -} - -.healthOk { - background-color: light-dark( - var(--mantine-color-green-1), - var(--mantine-color-green-9) - ); - color: light-dark(var(--mantine-color-green-9), var(--mantine-color-green-1)); -} - -.healthErr { - background-color: light-dark( - var(--mantine-color-red-1), - var(--mantine-color-red-9) - ); - color: light-dark(var(--mantine-color-red-9), var(--mantine-color-red-1)); -} - -.healthUnknown { - background-color: light-dark( - var(--mantine-color-gray-1), - var(--mantine-color-gray-9) - ); -} diff --git a/web/ui/mantine-ui/src/lib/time-format.ts b/web/ui/mantine-ui/src/lib/time-format.ts index f31e574682..cfa5536319 100644 --- a/web/ui/mantine-ui/src/lib/time-format.ts +++ b/web/ui/mantine-ui/src/lib/time-format.ts @@ -109,10 +109,14 @@ export const humanizeDuration = (milliseconds: number): string => { return "0s"; }; -export const formatRelative = (startStr: string, end: number): string => { +export const formatRelative = ( + startStr: string, + end: number, + suffix: string = " ago" +): string => { const start = parseTime(startStr); if (start < 0) { - return "Never"; + return "never"; } - return humanizeDuration(end - start) + " ago"; + return humanizeDuration(end - start) + suffix; }; diff --git a/web/ui/mantine-ui/src/pages/alerts.tsx b/web/ui/mantine-ui/src/pages/alerts.tsx index e5514d254b..587d3cfe68 100644 --- a/web/ui/mantine-ui/src/pages/alerts.tsx +++ b/web/ui/mantine-ui/src/pages/alerts.tsx @@ -1,3 +1,187 @@ +import { + Card, + Group, + Table, + Text, + Accordion, + Badge, + Tooltip, + Box, + Switch, +} from "@mantine/core"; +import { useSuspenseAPIQuery } from "../api/api"; +import { AlertingRulesMap } from "../api/response-types/rules"; +import badgeClasses from "../badge.module.css"; +import RuleDefinition from "../rule-definition"; +import { formatRelative, now } from "../lib/time-format"; +import { Fragment, useState } from "react"; + export default function Alerts() { - return <>Alerts page; + const { data } = useSuspenseAPIQuery(`/rules?type=alert`); + const [showAnnotations, setShowAnnotations] = useState(false); + + const ruleStatsCount = { + inactive: 0, + pending: 0, + firing: 0, + }; + + data.data.groups.forEach((el) => + el.rules.forEach((r) => ruleStatsCount[r.state]++) + ); + + return ( + <> + setShowAnnotations(event.currentTarget.checked)} + mb="md" + /> + {data.data.groups.map((g, i) => ( + + + + + {g.name} + + + {g.file} + + + + + {g.rules.map((r, j) => { + const numFiring = r.alerts.filter( + (a) => a.state === "firing" + ).length; + const numPending = r.alerts.filter( + (a) => a.state === "pending" + ).length; + + return ( + + + + {r.name} + + {numFiring > 0 && ( + + firing ({numFiring}) + + )} + {numPending > 0 && ( + + pending ({numPending}) + + )} + {/* {numFiring === 0 && numPending === 0 && ( + + inactive + + )} */} + + + + + + {r.alerts.length > 0 && ( + + + + Alert labels + State + Active Since + Value + + + + {r.type === "alerting" && + r.alerts.map((a, k) => ( + + + + + {Object.entries(a.labels).map( + ([k, v]) => { + return ( + + {/* TODO: Proper quote escaping */} + {k}="{v}" + + ); + } + )} + + + + + {a.state} + + + + + + {formatRelative(a.activeAt, now(), "")} + + + + {a.value} + + {showAnnotations && ( + + +
+ + {Object.entries(a.annotations).map( + ([k, v]) => ( + + {k} + {v} + + ) + )} + +
+ + + )} + + ))} + + + )} +
+
+ ); + })} +
+
+ ))} + + ); } diff --git a/web/ui/mantine-ui/src/pages/rules.tsx b/web/ui/mantine-ui/src/pages/rules.tsx index ab1e8a5075..0893dd0a2c 100644 --- a/web/ui/mantine-ui/src/pages/rules.tsx +++ b/web/ui/mantine-ui/src/pages/rules.tsx @@ -1,72 +1,45 @@ -import { - Alert, - Badge, - Card, - Group, - Table, - Text, - Tooltip, - useComputedColorScheme, -} from "@mantine/core"; +import { Alert, Badge, Card, Group, Table, Text, Tooltip } from "@mantine/core"; // import { useQuery } from "react-query"; -import { - formatDuration, - formatRelative, - humanizeDuration, - now, -} from "../lib/time-format"; +import { formatRelative, humanizeDuration, now } from "../lib/time-format"; import { IconAlertTriangle, IconBell, - IconClockPause, - IconClockPlay, IconDatabaseImport, IconHourglass, IconRefresh, IconRepeat, } from "@tabler/icons-react"; -import CodeMirror, { EditorView } from "@uiw/react-codemirror"; import { useSuspenseAPIQuery } from "../api/api"; import { RulesMap } from "../api/response-types/rules"; -import { syntaxHighlighting } from "@codemirror/language"; -import { - baseTheme, - darkPromqlHighlighter, - lightTheme, - promqlHighlighter, -} from "../codemirror/theme"; -import { PromQLExtension } from "@prometheus-io/codemirror-promql"; -import classes from "../codebox.module.css"; +import badgeClasses from "../badge.module.css"; +import RuleDefinition from "../rule-definition"; const healthBadgeClass = (state: string) => { switch (state) { case "ok": - return classes.healthOk; + return badgeClasses.healthOk; case "err": - return classes.healthErr; + return badgeClasses.healthErr; case "unknown": - return classes.healthUnknown; + return badgeClasses.healthUnknown; default: return "orange"; } }; -const promqlExtension = new PromQLExtension(); - export default function Rules() { const { data } = useSuspenseAPIQuery(`/rules`); - const theme = useComputedColorScheme(); return ( <> - {data.data.groups.map((g) => ( + {data.data.groups.map((g, i) => ( @@ -81,7 +54,7 @@ export default function Rules() { } > @@ -91,7 +64,7 @@ export default function Rules() { } > @@ -101,7 +74,7 @@ export default function Rules() { } > @@ -113,8 +86,9 @@ export default function Rules() { {g.rules.map((r) => ( + // TODO: Find a stable and definitely unique key. - + {r.type === "alerting" ? ( @@ -134,7 +108,7 @@ export default function Rules() { } > @@ -148,7 +122,7 @@ export default function Rules() { > } > @@ -160,31 +134,8 @@ export default function Rules() { - - - - - + + {r.lastError && ( Error: {r.lastError} )} - {r.type === "alerting" && ( - - {r.duration && ( - } - > - for: {formatDuration(r.duration * 1000)} - - )} - {r.keepFiringFor && ( - } - > - keep_firing_for: {formatDuration(r.duration * 1000)} - - )} - - )} - {r.labels && Object.keys(r.labels).length > 0 && ( - - {Object.entries(r.labels).map(([k, v]) => ( - - {k}: {v} - - ))} - - )} - {/* {Object.keys(r.annotations).length > 0 && ( - - {Object.entries(r.annotations).map(([k, v]) => ( - - {k}: {v} - - ))} - - )} */} ))} diff --git a/web/ui/mantine-ui/src/rule-definition.tsx b/web/ui/mantine-ui/src/rule-definition.tsx new file mode 100644 index 0000000000..8f426fd82d --- /dev/null +++ b/web/ui/mantine-ui/src/rule-definition.tsx @@ -0,0 +1,105 @@ +import { + Alert, + Badge, + Card, + Group, + useComputedColorScheme, +} from "@mantine/core"; +import { + IconAlertTriangle, + IconClockPause, + IconClockPlay, +} from "@tabler/icons-react"; +import { FC } from "react"; +import { formatDuration } from "./lib/time-format"; +import codeboxClasses from "./codebox.module.css"; +import badgeClasses from "./badge.module.css"; +import { Rule } from "./api/response-types/rules"; +import CodeMirror, { EditorView } from "@uiw/react-codemirror"; +import { syntaxHighlighting } from "@codemirror/language"; +import { + baseTheme, + darkPromqlHighlighter, + lightTheme, + promqlHighlighter, +} from "./codemirror/theme"; +import { PromQLExtension } from "@prometheus-io/codemirror-promql"; + +const promqlExtension = new PromQLExtension(); + +const RuleDefinition: FC<{ rule: Rule }> = ({ rule }) => { + const theme = useComputedColorScheme(); + + return ( + <> + + + + {rule.type === "alerting" && ( + + {rule.duration && ( + } + > + for: {formatDuration(rule.duration * 1000)} + + )} + {rule.keepFiringFor && ( + } + > + keep_firing_for: {formatDuration(rule.duration * 1000)} + + )} + + )} + {rule.labels && Object.keys(rule.labels).length > 0 && ( + + {Object.entries(rule.labels).map(([k, v]) => ( + + {k}: {v} + + ))} + + )} + {/* {Object.keys(r.annotations).length > 0 && ( + + {Object.entries(r.annotations).map(([k, v]) => ( + + {k}: {v} + + ))} + + )} */} + + ); +}; + +export default RuleDefinition; From 89ecb3a3f2e48a66260a9af1a834ed8617b86f27 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 23 Feb 2024 17:38:25 +0100 Subject: [PATCH 0014/1397] Add input field to /graph page. Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/pages/graph.module.css | 14 +++++++++++ web/ui/mantine-ui/src/pages/graph.tsx | 26 +++++++++++++++++++- 2 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 web/ui/mantine-ui/src/pages/graph.module.css diff --git a/web/ui/mantine-ui/src/pages/graph.module.css b/web/ui/mantine-ui/src/pages/graph.module.css new file mode 100644 index 0000000000..c42dfcb0ec --- /dev/null +++ b/web/ui/mantine-ui/src/pages/graph.module.css @@ -0,0 +1,14 @@ +.input { + font-family: "DejaVu Sans Mono"; + padding-top: 7px; + transition: none; + + &:focus-within { + outline: rem(2px) solid var(--mantine-color-blue-filled); + border-color: transparent; + } + + &:placeholder-shown { + font-family: unset; + } +} diff --git a/web/ui/mantine-ui/src/pages/graph.tsx b/web/ui/mantine-ui/src/pages/graph.tsx index 1e1c158139..8bec399afb 100644 --- a/web/ui/mantine-ui/src/pages/graph.tsx +++ b/web/ui/mantine-ui/src/pages/graph.tsx @@ -1,3 +1,27 @@ +import { Group, Textarea, Button } from "@mantine/core"; +import { IconTerminal } from "@tabler/icons-react"; +import { useState } from "react"; +import classes from "./graph.module.css"; + export default function Graph() { - return <>Graph page; + const [expr, setExpr] = useState(""); + + return ( + + ",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"
","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function qe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function Le(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function He(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Oe(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Ut,Xt=[],Vt=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Xt.pop()||S.expando+"_"+Ct.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Vt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Vt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Vt,"$1"+r):!1!==e.jsonp&&(e.url+=(Et.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Xt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Ut=E.implementation.createHTMLDocument("").body).innerHTML="
",2===Ut.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):("number"==typeof f.top&&(f.top+="px"),"number"==typeof f.left&&(f.left+="px"),c.css(f))}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=$e(y.pixelPosition,function(e,t){if(t)return t=Be(e,n),Me.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0", "/": "?", "\\": "|" - } - }; - - function keyHandler( handleObj ) { - - var origHandler = handleObj.handler, - //use namespace as keys so it works with event delegation as well - //will also allow removing listeners of a specific key combination - //and support data objects - keys = (handleObj.namespace || "").toLowerCase().split(" "); - keys = jQuery.map(keys, function(key) { return key.split("."); }); - - //no need to modify handler if no keys specified - //Added keys[0].substring(0, 12) to work with jQuery ui 1.9.0 - //Added accordion, tabs and menu, then jquery ui can use keys. - - if (keys.length === 1 && (keys[0] === "" || - keys[0].substring(0, 12) === "autocomplete" || - keys[0].substring(0, 9) === "accordion" || - keys[0].substring(0, 4) === "tabs" || - keys[0].substring(0, 4) === "menu")) { - return; - } - - handleObj.handler = function( event ) { - // Don't fire in text-accepting inputs that we didn't directly bind to - // important to note that $.fn.prop is only available on jquery 1.6+ - if ( this !== event.target && (/textarea|select/i.test( event.target.nodeName ) || - event.target.type === "text" || $(event.target).prop('contenteditable') == 'true' )) { - return; - } - - // Keypress represents characters, not special keys - var special = event.type !== "keypress" && jQuery.hotkeys.specialKeys[ event.which ], - character = String.fromCharCode( event.which ).toLowerCase(), - key, modif = "", possible = {}; - - // check combinations (alt|ctrl|shift+anything) - if ( event.altKey && special !== "alt" ) { - modif += "alt_"; - } - - if ( event.ctrlKey && special !== "ctrl" ) { - modif += "ctrl_"; - } - - // TODO: Need to make sure this works consistently across platforms - if ( event.metaKey && !event.ctrlKey && special !== "meta" ) { - modif += "meta_"; - } - - if ( event.shiftKey && special !== "shift" ) { - modif += "shift_"; - } - - if ( special ) { - possible[ modif + special ] = true; - - } else { - possible[ modif + character ] = true; - possible[ modif + jQuery.hotkeys.shiftNums[ character ] ] = true; - - // "$" can be triggered as "Shift+4" or "Shift+$" or just "$" - if ( modif === "shift_" ) { - possible[ jQuery.hotkeys.shiftNums[ character ] ] = true; - } - } - - for ( var i = 0, l = keys.length; i < l; i++ ) { - if ( possible[ keys[i] ] ) { - return origHandler.apply( this, arguments ); - } - } - }; - } - - jQuery.each([ "keydown", "keyup", "keypress" ], function() { - jQuery.event.special[ this ] = { add: keyHandler }; - }); - -})( jQuery ); \ No newline at end of file diff --git a/web/ui/static/vendor/js/jquery.selection.js b/web/ui/static/vendor/js/jquery.selection.js deleted file mode 100644 index 6ffb878b5a..0000000000 --- a/web/ui/static/vendor/js/jquery.selection.js +++ /dev/null @@ -1,354 +0,0 @@ -/*! - * jQuery.selection - jQuery Plugin - * - * Copyright (c) 2010-2014 IWASAKI Koji (@madapaja). - * http://blog.madapaja.net/ - * Under The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -(function($, win, doc) { - /** - * get caret status of the selection of the element - * - * @param {Element} element target DOM element - * @return {Object} return - * @return {String} return.text selected text - * @return {Number} return.start start position of the selection - * @return {Number} return.end end position of the selection - */ - var _getCaretInfo = function(element){ - var res = { - text: '', - start: 0, - end: 0 - }; - - if (!element.value) { - /* no value or empty string */ - return res; - } - - try { - if (win.getSelection) { - /* except IE */ - res.start = element.selectionStart; - res.end = element.selectionEnd; - res.text = element.value.slice(res.start, res.end); - } else if (doc.selection) { - /* for IE */ - element.focus(); - - var range = doc.selection.createRange(), - range2 = doc.body.createTextRange(); - - res.text = range.text; - - try { - range2.moveToElementText(element); - range2.setEndPoint('StartToStart', range); - } catch (e) { - range2 = element.createTextRange(); - range2.setEndPoint('StartToStart', range); - } - - res.start = element.value.length - range2.text.length; - res.end = res.start + range.text.length; - } - } catch (e) { - /* give up */ - } - - return res; - }; - - /** - * caret operation for the element - * @type {Object} - */ - var _CaretOperation = { - /** - * get caret position - * - * @param {Element} element target element - * @return {Object} return - * @return {Number} return.start start position for the selection - * @return {Number} return.end end position for the selection - */ - getPos: function(element) { - var tmp = _getCaretInfo(element); - return {start: tmp.start, end: tmp.end}; - }, - - /** - * set caret position - * - * @param {Element} element target element - * @param {Object} toRange caret position - * @param {Number} toRange.start start position for the selection - * @param {Number} toRange.end end position for the selection - * @param {String} caret caret mode: any of the following: "keep" | "start" | "end" - */ - setPos: function(element, toRange, caret) { - caret = this._caretMode(caret); - - if (caret === 'start') { - toRange.end = toRange.start; - } else if (caret === 'end') { - toRange.start = toRange.end; - } - - element.focus(); - try { - if (element.createTextRange) { - var range = element.createTextRange(); - - if (win.navigator.userAgent.toLowerCase().indexOf("msie") >= 0) { - toRange.start = element.value.substr(0, toRange.start).replace(/\r/g, '').length; - toRange.end = element.value.substr(0, toRange.end).replace(/\r/g, '').length; - } - - range.collapse(true); - range.moveStart('character', toRange.start); - range.moveEnd('character', toRange.end - toRange.start); - - range.select(); - } else if (element.setSelectionRange) { - element.setSelectionRange(toRange.start, toRange.end); - } - } catch (e) { - /* give up */ - } - }, - - /** - * get selected text - * - * @param {Element} element target element - * @return {String} return selected text - */ - getText: function(element) { - return _getCaretInfo(element).text; - }, - - /** - * get caret mode - * - * @param {String} caret caret mode - * @return {String} return any of the following: "keep" | "start" | "end" - */ - _caretMode: function(caret) { - caret = caret || "keep"; - if (caret === false) { - caret = 'end'; - } - - switch (caret) { - case 'keep': - case 'start': - case 'end': - break; - - default: - caret = 'keep'; - } - - return caret; - }, - - /** - * replace selected text - * - * @param {Element} element target element - * @param {String} text replacement text - * @param {String} caret caret mode: any of the following: "keep" | "start" | "end" - */ - replace: function(element, text, caret) { - var tmp = _getCaretInfo(element), - orig = element.value, - pos = $(element).scrollTop(), - range = {start: tmp.start, end: tmp.start + text.length}; - - element.value = orig.substr(0, tmp.start) + text + orig.substr(tmp.end); - - $(element).scrollTop(pos); - this.setPos(element, range, caret); - }, - - /** - * insert before the selected text - * - * @param {Element} element target element - * @param {String} text insertion text - * @param {String} caret caret mode: any of the following: "keep" | "start" | "end" - */ - insertBefore: function(element, text, caret) { - var tmp = _getCaretInfo(element), - orig = element.value, - pos = $(element).scrollTop(), - range = {start: tmp.start + text.length, end: tmp.end + text.length}; - - element.value = orig.substr(0, tmp.start) + text + orig.substr(tmp.start); - - $(element).scrollTop(pos); - this.setPos(element, range, caret); - }, - - /** - * insert after the selected text - * - * @param {Element} element target element - * @param {String} text insertion text - * @param {String} caret caret mode: any of the following: "keep" | "start" | "end" - */ - insertAfter: function(element, text, caret) { - var tmp = _getCaretInfo(element), - orig = element.value, - pos = $(element).scrollTop(), - range = {start: tmp.start, end: tmp.end}; - - element.value = orig.substr(0, tmp.end) + text + orig.substr(tmp.end); - - $(element).scrollTop(pos); - this.setPos(element, range, caret); - } - }; - - /* add jQuery.selection */ - $.extend({ - /** - * get selected text on the window - * - * @param {String} mode selection mode: any of the following: "text" | "html" - * @return {String} return - */ - selection: function(mode) { - var getText = ((mode || 'text').toLowerCase() === 'text'); - - try { - if (win.getSelection) { - if (getText) { - // get text - return win.getSelection().toString(); - } else { - // get html - var sel = win.getSelection(), range; - - if (sel.getRangeAt) { - range = sel.getRangeAt(0); - } else { - range = doc.createRange(); - range.setStart(sel.anchorNode, sel.anchorOffset); - range.setEnd(sel.focusNode, sel.focusOffset); - } - - return $('
').append(range.cloneContents()).html(); - } - } else if (doc.selection) { - if (getText) { - // get text - return doc.selection.createRange().text; - } else { - // get html - return doc.selection.createRange().htmlText; - } - } - } catch (e) { - /* give up */ - } - - return ''; - } - }); - - /* add selection */ - $.fn.extend({ - selection: function(mode, opts) { - opts = opts || {}; - - switch (mode) { - /** - * selection('getPos') - * get caret position - * - * @return {Object} return - * @return {Number} return.start start position for the selection - * @return {Number} return.end end position for the selection - */ - case 'getPos': - return _CaretOperation.getPos(this[0]); - - /** - * selection('setPos', opts) - * set caret position - * - * @param {Number} opts.start start position for the selection - * @param {Number} opts.end end position for the selection - */ - case 'setPos': - return this.each(function() { - _CaretOperation.setPos(this, opts); - }); - - /** - * selection('replace', opts) - * replace the selected text - * - * @param {String} opts.text replacement text - * @param {String} opts.caret caret mode: any of the following: "keep" | "start" | "end" - */ - case 'replace': - return this.each(function() { - _CaretOperation.replace(this, opts.text, opts.caret); - }); - - /** - * selection('insert', opts) - * insert before/after the selected text - * - * @param {String} opts.text insertion text - * @param {String} opts.caret caret mode: any of the following: "keep" | "start" | "end" - * @param {String} opts.mode insertion mode: any of the following: "before" | "after" - */ - case 'insert': - return this.each(function() { - if (opts.mode === 'before') { - _CaretOperation.insertBefore(this, opts.text, opts.caret); - } else { - _CaretOperation.insertAfter(this, opts.text, opts.caret); - } - }); - - /** - * selection('get') - * get selected text - * - * @return {String} return - */ - case 'get': - /* falls through */ - default: - return _CaretOperation.getText(this[0]); - } - - return this; - } - }); -})(jQuery, window, window.document); diff --git a/web/ui/static/vendor/js/popper.min.js b/web/ui/static/vendor/js/popper.min.js deleted file mode 100644 index 4553f6578d..0000000000 --- a/web/ui/static/vendor/js/popper.min.js +++ /dev/null @@ -1,5 +0,0 @@ -/* - Copyright (C) Federico Zivolo 2017 - Distributed under the MIT License (license terms are at http://opensource.org/licenses/MIT). - */(function(e,t){'object'==typeof exports&&'undefined'!=typeof module?module.exports=t():'function'==typeof define&&define.amd?define(t):e.Popper=t()})(this,function(){'use strict';function e(e){return e&&'[object Function]'==={}.toString.call(e)}function t(e,t){if(1!==e.nodeType)return[];var o=window.getComputedStyle(e,null);return t?o[t]:o}function o(e){return'HTML'===e.nodeName?e:e.parentNode||e.host}function n(e){if(!e||-1!==['HTML','BODY','#document'].indexOf(e.nodeName))return window.document.body;var i=t(e),r=i.overflow,p=i.overflowX,s=i.overflowY;return /(auto|scroll)/.test(r+s+p)?e:n(o(e))}function r(e){var o=e&&e.offsetParent,i=o&&o.nodeName;return i&&'BODY'!==i&&'HTML'!==i?-1!==['TD','TABLE'].indexOf(o.nodeName)&&'static'===t(o,'position')?r(o):o:window.document.documentElement}function p(e){var t=e.nodeName;return'BODY'!==t&&('HTML'===t||r(e.firstElementChild)===e)}function s(e){return null===e.parentNode?e:s(e.parentNode)}function d(e,t){if(!e||!e.nodeType||!t||!t.nodeType)return window.document.documentElement;var o=e.compareDocumentPosition(t)&Node.DOCUMENT_POSITION_FOLLOWING,i=o?e:t,n=o?t:e,a=document.createRange();a.setStart(i,0),a.setEnd(n,0);var l=a.commonAncestorContainer;if(e!==l&&t!==l||i.contains(n))return p(l)?l:r(l);var f=s(e);return f.host?d(f.host,t):d(e,s(t).host)}function a(e){var t=1=o.clientWidth&&i>=o.clientHeight}),l=0i[e]&&!t.escapeWithReference&&(n=V(p[o],i[e]-('right'===e?p.width:p.height))),se({},o,n)}};return n.forEach(function(e){var t=-1===['left','top'].indexOf(e)?'secondary':'primary';p=de({},p,s[t](e))}),e.offsets.popper=p,e},priority:['left','right','top','bottom'],padding:5,boundariesElement:'scrollParent'},keepTogether:{order:400,enabled:!0,fn:function(e){var t=e.offsets,o=t.popper,i=t.reference,n=e.placement.split('-')[0],r=_,p=-1!==['top','bottom'].indexOf(n),s=p?'right':'bottom',d=p?'left':'top',a=p?'width':'height';return o[s]r(i[s])&&(e.offsets.popper[d]=r(i[s])),e}},arrow:{order:500,enabled:!0,fn:function(e,o){if(!F(e.instance.modifiers,'arrow','keepTogether'))return e;var i=o.element;if('string'==typeof i){if(i=e.instance.popper.querySelector(i),!i)return e;}else if(!e.instance.popper.contains(i))return console.warn('WARNING: `arrow.element` must be child of its popper element!'),e;var n=e.placement.split('-')[0],r=e.offsets,p=r.popper,s=r.reference,d=-1!==['left','right'].indexOf(n),a=d?'height':'width',l=d?'Top':'Left',f=l.toLowerCase(),m=d?'left':'top',c=d?'bottom':'right',g=O(i)[a];s[c]-gp[c]&&(e.offsets.popper[f]+=s[f]+g-p[c]);var u=s[f]+s[a]/2-g/2,b=t(e.instance.popper,'margin'+l).replace('px',''),y=u-h(e.offsets.popper)[f]-b;return y=X(V(p[a]-g,y),0),e.arrowElement=i,e.offsets.arrow={},e.offsets.arrow[f]=Math.round(y),e.offsets.arrow[m]='',e},element:'[x-arrow]'},flip:{order:600,enabled:!0,fn:function(e,t){if(W(e.instance.modifiers,'inner'))return e;if(e.flipped&&e.placement===e.originalPlacement)return e;var o=w(e.instance.popper,e.instance.reference,t.padding,t.boundariesElement),i=e.placement.split('-')[0],n=L(i),r=e.placement.split('-')[1]||'',p=[];switch(t.behavior){case fe.FLIP:p=[i,n];break;case fe.CLOCKWISE:p=K(i);break;case fe.COUNTERCLOCKWISE:p=K(i,!0);break;default:p=t.behavior;}return p.forEach(function(s,d){if(i!==s||p.length===d+1)return e;i=e.placement.split('-')[0],n=L(i);var a=e.offsets.popper,l=e.offsets.reference,f=_,m='left'===i&&f(a.right)>f(l.left)||'right'===i&&f(a.left)f(l.top)||'bottom'===i&&f(a.top)f(o.right),g=f(a.top)f(o.bottom),b='left'===i&&c||'right'===i&&h||'top'===i&&g||'bottom'===i&&u,y=-1!==['top','bottom'].indexOf(i),w=!!t.flipVariations&&(y&&'start'===r&&c||y&&'end'===r&&h||!y&&'start'===r&&g||!y&&'end'===r&&u);(m||b||w)&&(e.flipped=!0,(m||b)&&(i=p[d+1]),w&&(r=j(r)),e.placement=i+(r?'-'+r:''),e.offsets.popper=de({},e.offsets.popper,S(e.instance.popper,e.offsets.reference,e.placement)),e=N(e.instance.modifiers,e,'flip'))}),e},behavior:'flip',padding:5,boundariesElement:'viewport'},inner:{order:700,enabled:!1,fn:function(e){var t=e.placement,o=t.split('-')[0],i=e.offsets,n=i.popper,r=i.reference,p=-1!==['left','right'].indexOf(o),s=-1===['top','left'].indexOf(o);return n[p?'left':'top']=r[o]-(s?n[p?'width':'height']:0),e.placement=L(t),e.offsets.popper=h(n),e}},hide:{order:800,enabled:!0,fn:function(e){if(!F(e.instance.modifiers,'hide','preventOverflow'))return e;var t=e.offsets.reference,o=T(e.instance.modifiers,function(e){return'preventOverflow'===e.name}).boundaries;if(t.bottomo.right||t.top>o.bottom||t.right0){var x=s.data[0].x;var y=s.data[0].y;if(typeof x!="number"||typeof y!="number"&&y!==null){throw"x and y properties of points should be numbers instead of "+typeof x+" and "+typeof y}}if(s.data.length>=3){if(s.data[2].xthis.window.xMax)isInRange=false;return isInRange}return true};this.onUpdate=function(callback){this.updateCallbacks.push(callback)};this.onConfigure=function(callback){this.configureCallbacks.push(callback)};this.registerRenderer=function(renderer){this._renderers=this._renderers||{};this._renderers[renderer.name]=renderer};this.configure=function(args){this.config=this.config||{};if(args.width||args.height){this.setSize(args)}Rickshaw.keys(this.defaults).forEach(function(k){this.config[k]=k in args?args[k]:k in this?this[k]:this.defaults[k]},this);Rickshaw.keys(this.config).forEach(function(k){this[k]=this.config[k]},this);if("stack"in args)args.unstack=!args.stack;var renderer=args.renderer||this.renderer&&this.renderer.name||"stack";this.setRenderer(renderer,args);this.configureCallbacks.forEach(function(callback){callback(args)})};this.setRenderer=function(r,args){if(typeof r=="function"){this.renderer=new r({graph:self});this.registerRenderer(this.renderer)}else{if(!this._renderers[r]){throw"couldn't find renderer "+r}this.renderer=this._renderers[r]}if(typeof args=="object"){this.renderer.configure(args)}};this.setSize=function(args){args=args||{};if(typeof window!==undefined){var style=window.getComputedStyle(this.element,null);var elementWidth=parseInt(style.getPropertyValue("width"),10);var elementHeight=parseInt(style.getPropertyValue("height"),10)}this.width=args.width||elementWidth||400;this.height=args.height||elementHeight||250;this.vis&&this.vis.attr("width",this.width).attr("height",this.height)};this.initialize(args)};Rickshaw.namespace("Rickshaw.Fixtures.Color");Rickshaw.Fixtures.Color=function(){this.schemes={};this.schemes.spectrum14=["#ecb796","#dc8f70","#b2a470","#92875a","#716c49","#d2ed82","#bbe468","#a1d05d","#e7cbe6","#d8aad6","#a888c2","#9dc2d3","#649eb9","#387aa3"].reverse();this.schemes.spectrum2000=["#57306f","#514c76","#646583","#738394","#6b9c7d","#84b665","#a7ca50","#bfe746","#e2f528","#fff726","#ecdd00","#d4b11d","#de8800","#de4800","#c91515","#9a0000","#7b0429","#580839","#31082b"];this.schemes.spectrum2001=["#2f243f","#3c2c55","#4a3768","#565270","#6b6b7c","#72957f","#86ad6e","#a1bc5e","#b8d954","#d3e04e","#ccad2a","#cc8412","#c1521d","#ad3821","#8a1010","#681717","#531e1e","#3d1818","#320a1b"];this.schemes.classic9=["#423d4f","#4a6860","#848f39","#a2b73c","#ddcb53","#c5a32f","#7d5836","#963b20","#7c2626","#491d37","#2f254a"].reverse();this.schemes.httpStatus={503:"#ea5029",502:"#d23f14",500:"#bf3613",410:"#efacea",409:"#e291dc",403:"#f457e8",408:"#e121d2",401:"#b92dae",405:"#f47ceb",404:"#a82a9f",400:"#b263c6",301:"#6fa024",302:"#87c32b",307:"#a0d84c",304:"#28b55c",200:"#1a4f74",206:"#27839f",201:"#52adc9",202:"#7c979f",203:"#a5b8bd",204:"#c1cdd1"};this.schemes.colorwheel=["#b5b6a9","#858772","#785f43","#96557e","#4682b4","#65b9ac","#73c03a","#cb513a"].reverse();this.schemes.cool=["#5e9d2f","#73c03a","#4682b4","#7bc3b8","#a9884e","#c1b266","#a47493","#c09fb5"];this.schemes.munin=["#00cc00","#0066b3","#ff8000","#ffcc00","#330099","#990099","#ccff00","#ff0000","#808080","#008f00","#00487d","#b35a00","#b38f00","#6b006b","#8fb300","#b30000","#bebebe","#80ff80","#80c9ff","#ffc080","#ffe680","#aa80ff","#ee00cc","#ff8080","#666600","#ffbfff","#00ffcc","#cc6699","#999900"]};Rickshaw.namespace("Rickshaw.Fixtures.RandomData");Rickshaw.Fixtures.RandomData=function(timeInterval){var addData;timeInterval=timeInterval||1;var lastRandomValue=200;var timeBase=Math.floor((new Date).getTime()/1e3);this.addData=function(data){var randomValue=Math.random()*100+15+lastRandomValue;var index=data[0].length;var counter=1;data.forEach(function(series){var randomVariance=Math.random()*20;var v=randomValue/25+counter++ +(Math.cos(index*counter*11/960)+2)*15+(Math.cos(index/7)+2)*7+(Math.cos(index/17)+2)*1;series.push({x:index*timeInterval+timeBase,y:v+randomVariance})});lastRandomValue=randomValue*.85};this.removeData=function(data){data.forEach(function(series){series.shift()});timeBase+=timeInterval}};Rickshaw.namespace("Rickshaw.Fixtures.Time");Rickshaw.Fixtures.Time=function(){var self=this;this.months=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"];this.units=[{name:"decade",seconds:86400*365.25*10,formatter:function(d){return parseInt(d.getUTCFullYear()/10,10)*10}},{name:"year",seconds:86400*365.25,formatter:function(d){return d.getUTCFullYear()}},{name:"month",seconds:86400*30.5,formatter:function(d){return self.months[d.getUTCMonth()]}},{name:"week",seconds:86400*7,formatter:function(d){return self.formatDate(d)}},{name:"day",seconds:86400,formatter:function(d){return d.getUTCDate()}},{name:"6 hour",seconds:3600*6,formatter:function(d){return self.formatTime(d)}},{name:"hour",seconds:3600,formatter:function(d){return self.formatTime(d)}},{name:"15 minute",seconds:60*15,formatter:function(d){return self.formatTime(d)}},{name:"minute",seconds:60,formatter:function(d){return d.getUTCMinutes()}},{name:"15 second",seconds:15,formatter:function(d){return d.getUTCSeconds()+"s"}},{name:"second",seconds:1,formatter:function(d){return d.getUTCSeconds()+"s"}},{name:"decisecond",seconds:1/10,formatter:function(d){return d.getUTCMilliseconds()+"ms"}},{name:"centisecond",seconds:1/100,formatter:function(d){return d.getUTCMilliseconds()+"ms"}}];this.unit=function(unitName){return this.units.filter(function(unit){return unitName==unit.name}).shift()};this.formatDate=function(d){return d3.time.format("%b %e")(d)};this.formatTime=function(d){return d.toUTCString().match(/(\d+:\d+):/)[1]};this.ceil=function(time,unit){var date,floor,year;if(unit.name=="month"){date=new Date(time*1e3);floor=Date.UTC(date.getUTCFullYear(),date.getUTCMonth())/1e3;if(floor==time)return time;year=date.getUTCFullYear();var month=date.getUTCMonth();if(month==11){month=0;year=year+1}else{month+=1}return Date.UTC(year,month)/1e3}if(unit.name=="year"){date=new Date(time*1e3);floor=Date.UTC(date.getUTCFullYear(),0)/1e3;if(floor==time)return time;year=date.getUTCFullYear()+1;return Date.UTC(year,0)/1e3}return Math.ceil(time/unit.seconds)*unit.seconds}};Rickshaw.namespace("Rickshaw.Fixtures.Time.Local");Rickshaw.Fixtures.Time.Local=function(){var self=this;this.months=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"];this.units=[{name:"decade",seconds:86400*365.25*10,formatter:function(d){return parseInt(d.getFullYear()/10,10)*10}},{name:"year",seconds:86400*365.25,formatter:function(d){return d.getFullYear()}},{name:"month",seconds:86400*30.5,formatter:function(d){return self.months[d.getMonth()]}},{name:"week",seconds:86400*7,formatter:function(d){return self.formatDate(d)}},{name:"day",seconds:86400,formatter:function(d){return d.getDate()}},{name:"6 hour",seconds:3600*6,formatter:function(d){return self.formatTime(d)}},{name:"hour",seconds:3600,formatter:function(d){return self.formatTime(d)}},{name:"15 minute",seconds:60*15,formatter:function(d){return self.formatTime(d)}},{name:"minute",seconds:60,formatter:function(d){return d.getMinutes()}},{name:"15 second",seconds:15,formatter:function(d){return d.getSeconds()+"s"}},{name:"second",seconds:1,formatter:function(d){return d.getSeconds()+"s"}},{name:"decisecond",seconds:1/10,formatter:function(d){return d.getMilliseconds()+"ms"}},{name:"centisecond",seconds:1/100,formatter:function(d){return d.getMilliseconds()+"ms"}}];this.unit=function(unitName){return this.units.filter(function(unit){return unitName==unit.name}).shift()};this.formatDate=function(d){return d3.time.format("%b %e")(d)};this.formatTime=function(d){return d.toString().match(/(\d+:\d+):/)[1]};this.ceil=function(time,unit){var date,floor,year;if(unit.name=="day"){var nearFuture=new Date((time+unit.seconds-1)*1e3);var rounded=new Date(0);rounded.setMilliseconds(0);rounded.setSeconds(0);rounded.setMinutes(0);rounded.setHours(0);rounded.setDate(nearFuture.getDate());rounded.setMonth(nearFuture.getMonth());rounded.setFullYear(nearFuture.getFullYear());return rounded.getTime()/1e3}if(unit.name=="month"){date=new Date(time*1e3);floor=new Date(date.getFullYear(),date.getMonth()).getTime()/1e3;if(floor==time)return time;year=date.getFullYear();var month=date.getMonth();if(month==11){month=0;year=year+1}else{month+=1}return new Date(year,month).getTime()/1e3}if(unit.name=="year"){date=new Date(time*1e3);floor=new Date(date.getUTCFullYear(),0).getTime()/1e3;if(floor==time)return time;year=date.getFullYear()+1;return new Date(year,0).getTime()/1e3}return Math.ceil(time/unit.seconds)*unit.seconds}};Rickshaw.namespace("Rickshaw.Fixtures.Number");Rickshaw.Fixtures.Number.formatKMBT=function(y){var abs_y=Math.abs(y);if(abs_y>=1e12){return y/1e12+"T"}else if(abs_y>=1e9){return y/1e9+"B"}else if(abs_y>=1e6){return y/1e6+"M"}else if(abs_y>=1e3){return y/1e3+"K"}else if(abs_y<1&&y>0){return y.toFixed(2)}else if(abs_y===0){return""}else{return y}};Rickshaw.Fixtures.Number.formatBase1024KMGTP=function(y){var abs_y=Math.abs(y);if(abs_y>=0x4000000000000){return y/0x4000000000000+"P"}else if(abs_y>=1099511627776){return y/1099511627776+"T"}else if(abs_y>=1073741824){return y/1073741824+"G"}else if(abs_y>=1048576){return y/1048576+"M"}else if(abs_y>=1024){return y/1024+"K"}else if(abs_y<1&&y>0){return y.toFixed(2)}else if(abs_y===0){return""}else{return y}};Rickshaw.namespace("Rickshaw.Color.Palette");Rickshaw.Color.Palette=function(args){var color=new Rickshaw.Fixtures.Color;args=args||{};this.schemes={};this.scheme=color.schemes[args.scheme]||args.scheme||color.schemes.colorwheel;this.runningIndex=0;this.generatorIndex=0;if(args.interpolatedStopCount){var schemeCount=this.scheme.length-1;var i,j,scheme=[];for(i=0;iself.graph.x.range()[1]){if(annotation.element){annotation.line.classList.add("offscreen");annotation.element.style.display="none"}annotation.boxes.forEach(function(box){if(box.rangeElement)box.rangeElement.classList.add("offscreen")});return}if(!annotation.element){var element=annotation.element=document.createElement("div");element.classList.add("annotation");this.elements.timeline.appendChild(element);element.addEventListener("click",function(e){element.classList.toggle("active");annotation.line.classList.toggle("active");annotation.boxes.forEach(function(box){if(box.rangeElement)box.rangeElement.classList.toggle("active")})},false)}annotation.element.style.left=left+"px";annotation.element.style.display="block";annotation.boxes.forEach(function(box){var element=box.element;if(!element){element=box.element=document.createElement("div");element.classList.add("content");element.innerHTML=box.content;annotation.element.appendChild(element);annotation.line=document.createElement("div");annotation.line.classList.add("annotation_line");self.graph.element.appendChild(annotation.line);if(box.end){box.rangeElement=document.createElement("div");box.rangeElement.classList.add("annotation_range");self.graph.element.appendChild(box.rangeElement)}}if(box.end){var annotationRangeStart=left;var annotationRangeEnd=Math.min(self.graph.x(box.end),self.graph.x.range()[1]);if(annotationRangeStart>annotationRangeEnd){annotationRangeEnd=left;annotationRangeStart=Math.max(self.graph.x(box.end),self.graph.x.range()[0])}var annotationRangeWidth=annotationRangeEnd-annotationRangeStart;box.rangeElement.style.left=annotationRangeStart+"px";box.rangeElement.style.width=annotationRangeWidth+"px";box.rangeElement.classList.remove("offscreen")}annotation.line.classList.remove("offscreen");annotation.line.style.left=left+"px"})},this)};this.graph.onUpdate(function(){self.update()})};Rickshaw.namespace("Rickshaw.Graph.Axis.Time");Rickshaw.Graph.Axis.Time=function(args){var self=this;this.graph=args.graph;this.elements=[];this.ticksTreatment=args.ticksTreatment||"plain";this.fixedTimeUnit=args.timeUnit;var time=args.timeFixture||new Rickshaw.Fixtures.Time;this.appropriateTimeUnit=function(){var unit;var units=time.units;var domain=this.graph.x.domain();var rangeSeconds=domain[1]-domain[0];units.forEach(function(u){if(Math.floor(rangeSeconds/u.seconds)>=2){unit=unit||u}});return unit||time.units[time.units.length-1]};this.tickOffsets=function(){var domain=this.graph.x.domain();var unit=this.fixedTimeUnit||this.appropriateTimeUnit();var count=Math.ceil((domain[1]-domain[0])/unit.seconds);var runningTick=domain[0];var offsets=[];for(var i=0;iself.graph.x.range()[1])return;var element=document.createElement("div");element.style.left=self.graph.x(o.value)+"px";element.classList.add("x_tick");element.classList.add(self.ticksTreatment);var title=document.createElement("div");title.classList.add("title");title.innerHTML=o.unit.formatter(new Date(o.value*1e3));element.appendChild(title);self.graph.element.appendChild(element);self.elements.push(element)})};this.graph.onUpdate(function(){self.render()})};Rickshaw.namespace("Rickshaw.Graph.Axis.X");Rickshaw.Graph.Axis.X=function(args){var self=this;var berthRate=.1;this.initialize=function(args){this.graph=args.graph;this.orientation=args.orientation||"top";this.pixelsPerTick=args.pixelsPerTick||75;if(args.ticks)this.staticTicks=args.ticks;if(args.tickValues)this.tickValues=args.tickValues;this.tickSize=args.tickSize||4;this.ticksTreatment=args.ticksTreatment||"plain";if(args.element){this.element=args.element;this._discoverSize(args.element,args);this.vis=d3.select(args.element).append("svg:svg").attr("height",this.height).attr("width",this.width).attr("class","rickshaw_graph x_axis_d3");this.element=this.vis[0][0];this.element.style.position="relative";this.setSize({width:args.width,height:args.height})}else{this.vis=this.graph.vis}this.graph.onUpdate(function(){self.render()})};this.setSize=function(args){args=args||{};if(!this.element)return;this._discoverSize(this.element.parentNode,args);this.vis.attr("height",this.height).attr("width",this.width*(1+berthRate));var berth=Math.floor(this.width*berthRate/2);this.element.style.left=-1*berth+"px"};this.render=function(){if(this._renderWidth!==undefined&&this.graph.width!==this._renderWidth)this.setSize({auto:true});var axis=d3.svg.axis().scale(this.graph.x).orient(this.orientation);axis.tickFormat(args.tickFormat||function(x){return x});if(this.tickValues)axis.tickValues(this.tickValues);this.ticks=this.staticTicks||Math.floor(this.graph.width/this.pixelsPerTick);var berth=Math.floor(this.width*berthRate/2)||0;var transform;if(this.orientation=="top"){var yOffset=this.height||this.graph.height;transform="translate("+berth+","+yOffset+")"}else{transform="translate("+berth+", 0)"}if(this.element){this.vis.selectAll("*").remove()}this.vis.append("svg:g").attr("class",["x_ticks_d3",this.ticksTreatment].join(" ")).attr("transform",transform).call(axis.ticks(this.ticks).tickSubdivide(0).tickSize(this.tickSize));var gridSize=(this.orientation=="bottom"?1:-1)*this.graph.height;this.graph.vis.append("svg:g").attr("class","x_grid_d3").call(axis.ticks(this.ticks).tickSubdivide(0).tickSize(gridSize)).selectAll("text").each(function(){this.parentNode.setAttribute("data-x-value",this.textContent)});this._renderHeight=this.graph.height};this._discoverSize=function(element,args){if(typeof window!=="undefined"){var style=window.getComputedStyle(element,null);var elementHeight=parseInt(style.getPropertyValue("height"),10);if(!args.auto){var elementWidth=parseInt(style.getPropertyValue("width"),10)}}this.width=(args.width||elementWidth||this.graph.width)*(1+berthRate);this.height=args.height||elementHeight||40};this.initialize(args)};Rickshaw.namespace("Rickshaw.Graph.Axis.Y");Rickshaw.Graph.Axis.Y=Rickshaw.Class.create({initialize:function(args){this.graph=args.graph;this.orientation=args.orientation||"right";this.pixelsPerTick=args.pixelsPerTick||75;if(args.ticks)this.staticTicks=args.ticks;if(args.tickValues)this.tickValues=args.tickValues;this.tickSize=args.tickSize||4;this.ticksTreatment=args.ticksTreatment||"plain";this.tickFormat=args.tickFormat||function(y){return y};this.berthRate=.1;if(args.element){this.element=args.element;this.vis=d3.select(args.element).append("svg:svg").attr("class","rickshaw_graph y_axis");this.element=this.vis[0][0];this.element.style.position="relative";this.setSize({width:args.width,height:args.height})}else{this.vis=this.graph.vis}var self=this;this.graph.onUpdate(function(){self.render()})},setSize:function(args){args=args||{};if(!this.element)return;if(typeof window!=="undefined"){var style=window.getComputedStyle(this.element.parentNode,null);var elementWidth=parseInt(style.getPropertyValue("width"),10);if(!args.auto){var elementHeight=parseInt(style.getPropertyValue("height"),10)}}this.width=args.width||elementWidth||this.graph.width*this.berthRate;this.height=args.height||elementHeight||this.graph.height;this.vis.attr("width",this.width).attr("height",this.height*(1+this.berthRate));var berth=this.height*this.berthRate;if(this.orientation=="left"){this.element.style.top=-1*berth+"px"}},render:function(){if(this._renderHeight!==undefined&&this.graph.height!==this._renderHeight)this.setSize({auto:true});this.ticks=this.staticTicks||Math.floor(this.graph.height/this.pixelsPerTick);var axis=this._drawAxis(this.graph.y);this._drawGrid(axis);this._renderHeight=this.graph.height},_drawAxis:function(scale){var axis=d3.svg.axis().scale(scale).orient(this.orientation);axis.tickFormat(this.tickFormat);if(this.tickValues)axis.tickValues(this.tickValues);if(this.orientation=="left"){var berth=this.height*this.berthRate;var transform="translate("+this.width+", "+berth+")"}if(this.element){this.vis.selectAll("*").remove()}this.vis.append("svg:g").attr("class",["y_ticks",this.ticksTreatment].join(" ")).attr("transform",transform).call(axis.ticks(this.ticks).tickSubdivide(0).tickSize(this.tickSize));return axis},_drawGrid:function(axis){var gridSize=(this.orientation=="right"?1:-1)*this.graph.width;this.graph.vis.append("svg:g").attr("class","y_grid").call(axis.ticks(this.ticks).tickSubdivide(0).tickSize(gridSize)).selectAll("text").each(function(){this.parentNode.setAttribute("data-y-value",this.textContent) -})}});Rickshaw.namespace("Rickshaw.Graph.Axis.Y.Scaled");Rickshaw.Graph.Axis.Y.Scaled=Rickshaw.Class.create(Rickshaw.Graph.Axis.Y,{initialize:function($super,args){if(typeof args.scale==="undefined"){throw new Error("Scaled requires scale")}this.scale=args.scale;if(typeof args.grid==="undefined"){this.grid=true}else{this.grid=args.grid}$super(args)},_drawAxis:function($super,scale){var domain=this.scale.domain();var renderDomain=this.graph.renderer.domain().y;var extents=[Math.min.apply(Math,domain),Math.max.apply(Math,domain)];var extentMap=d3.scale.linear().domain([0,1]).range(extents);var adjExtents=[extentMap(renderDomain[0]),extentMap(renderDomain[1])];var adjustment=d3.scale.linear().domain(extents).range(adjExtents);var adjustedScale=this.scale.copy().domain(domain.map(adjustment)).range(scale.range());return $super(adjustedScale)},_drawGrid:function($super,axis){if(this.grid){$super(axis)}}});Rickshaw.namespace("Rickshaw.Graph.Behavior.Series.Highlight");Rickshaw.Graph.Behavior.Series.Highlight=function(args){this.graph=args.graph;this.legend=args.legend;var self=this;var colorSafe={};var activeLine=null;var disabledColor=args.disabledColor||function(seriesColor){return d3.interpolateRgb(seriesColor,d3.rgb("#d8d8d8"))(.8).toString()};this.addHighlightEvents=function(l){l.element.addEventListener("mouseover",function(e){if(activeLine)return;else activeLine=l;self.legend.lines.forEach(function(line){if(l===line){if(self.graph.renderer.unstack&&(line.series.renderer?line.series.renderer.unstack:true)){var seriesIndex=self.graph.series.indexOf(line.series);line.originalIndex=seriesIndex;var series=self.graph.series.splice(seriesIndex,1)[0];self.graph.series.push(series)}return}colorSafe[line.series.name]=colorSafe[line.series.name]||line.series.color;line.series.color=disabledColor(line.series.color)});self.graph.update()},false);l.element.addEventListener("mouseout",function(e){if(!activeLine)return;else activeLine=null;self.legend.lines.forEach(function(line){if(l===line&&line.hasOwnProperty("originalIndex")){var series=self.graph.series.pop();self.graph.series.splice(line.originalIndex,0,series);delete line.originalIndex}if(colorSafe[line.series.name]){line.series.color=colorSafe[line.series.name]}});self.graph.update()},false)};if(this.legend){this.legend.lines.forEach(function(l){self.addHighlightEvents(l)})}};Rickshaw.namespace("Rickshaw.Graph.Behavior.Series.Order");Rickshaw.Graph.Behavior.Series.Order=function(args){this.graph=args.graph;this.legend=args.legend;var self=this;if(typeof window.jQuery=="undefined"){throw"couldn't find jQuery at window.jQuery"}if(typeof window.jQuery.ui=="undefined"){throw"couldn't find jQuery UI at window.jQuery.ui"}jQuery(function(){jQuery(self.legend.list).sortable({containment:"parent",tolerance:"pointer",update:function(event,ui){var series=[];jQuery(self.legend.list).find("li").each(function(index,item){if(!item.series)return;series.push(item.series)});for(var i=self.graph.series.length-1;i>=0;i--){self.graph.series[i]=series.shift()}self.graph.update()}});jQuery(self.legend.list).disableSelection()});this.graph.onUpdate(function(){var h=window.getComputedStyle(self.legend.element).height;self.legend.element.style.height=h})};Rickshaw.namespace("Rickshaw.Graph.Behavior.Series.Toggle");Rickshaw.Graph.Behavior.Series.Toggle=function(args){this.graph=args.graph;this.legend=args.legend;var self=this;this.addAnchor=function(line){var anchor=document.createElement("a");anchor.innerHTML="✔";anchor.classList.add("action");line.element.insertBefore(anchor,line.element.firstChild);anchor.onclick=function(e){if(line.series.disabled){line.series.enable();line.element.classList.remove("disabled")}else{if(this.graph.series.filter(function(s){return!s.disabled}).length<=1)return;line.series.disable();line.element.classList.add("disabled")}self.graph.update()}.bind(this);var label=line.element.getElementsByTagName("span")[0];label.onclick=function(e){var disableAllOtherLines=line.series.disabled;if(!disableAllOtherLines){for(var i=0;idomainX){dataIndex=Math.abs(domainX-data[i].x)0){alignables.forEach(function(el){el.classList.remove("left");el.classList.add("right")});var rightAlignError=this._calcLayoutError(alignables);if(rightAlignError>leftAlignError){alignables.forEach(function(el){el.classList.remove("right");el.classList.add("left")})}}if(typeof this.onRender=="function"){this.onRender(args)}},_calcLayoutError:function(alignables){var parentRect=this.element.parentNode.getBoundingClientRect();var error=0;var alignRight=alignables.forEach(function(el){var rect=el.getBoundingClientRect();if(!rect.width){return}if(rect.right>parentRect.right){error+=rect.right-parentRect.right}if(rect.left=self.previewWidth){frameAfterDrag[0]-=frameAfterDrag[1]-self.previewWidth;frameAfterDrag[1]=self.previewWidth}}self.graphs.forEach(function(graph){var domainScale=d3.scale.linear().interpolate(d3.interpolateNumber).domain([0,self.previewWidth]).range(graph.dataDomain());var windowAfterDrag=[domainScale(frameAfterDrag[0]),domainScale(frameAfterDrag[1])];self.slideCallbacks.forEach(function(callback){callback(graph,windowAfterDrag[0],windowAfterDrag[1])});if(frameAfterDrag[0]===0){windowAfterDrag[0]=undefined}if(frameAfterDrag[1]===self.previewWidth){windowAfterDrag[1]=undefined}graph.window.xMin=windowAfterDrag[0];graph.window.xMax=windowAfterDrag[1];graph.update()})}function onMousedown(){drag.target=d3.event.target;drag.start=self._getClientXFromEvent(d3.event,drag);self.frameBeforeDrag=self.currentFrame.slice();d3.event.preventDefault?d3.event.preventDefault():d3.event.returnValue=false;d3.select(document).on("mousemove.rickshaw_range_slider_preview",onMousemove);d3.select(document).on("mouseup.rickshaw_range_slider_preview",onMouseup);d3.select(document).on("touchmove.rickshaw_range_slider_preview",onMousemove);d3.select(document).on("touchend.rickshaw_range_slider_preview",onMouseup);d3.select(document).on("touchcancel.rickshaw_range_slider_preview",onMouseup)}function onMousedownLeftHandle(datum,index){drag.left=true;onMousedown()}function onMousedownRightHandle(datum,index){drag.right=true;onMousedown()}function onMousedownMiddleHandle(datum,index){drag.left=true;drag.right=true;drag.rigid=true;onMousedown()}function onMouseup(datum,index){d3.select(document).on("mousemove.rickshaw_range_slider_preview",null);d3.select(document).on("mouseup.rickshaw_range_slider_preview",null);d3.select(document).on("touchmove.rickshaw_range_slider_preview",null);d3.select(document).on("touchend.rickshaw_range_slider_preview",null);d3.select(document).on("touchcancel.rickshaw_range_slider_preview",null);delete self.frameBeforeDrag;drag.left=false;drag.right=false;drag.rigid=false}element.select("rect.left_handle").on("mousedown",onMousedownLeftHandle);element.select("rect.right_handle").on("mousedown",onMousedownRightHandle);element.select("rect.middle_handle").on("mousedown",onMousedownMiddleHandle);element.select("rect.left_handle").on("touchstart",onMousedownLeftHandle);element.select("rect.right_handle").on("touchstart",onMousedownRightHandle);element.select("rect.middle_handle").on("touchstart",onMousedownMiddleHandle)},_getClientXFromEvent:function(event,drag){switch(event.type){case"touchstart":case"touchmove":var touchList=event.changedTouches;var touch=null;for(var touchIndex=0;touchIndexyMax)yMax=y});if(!series.length)return;if(series[0].xxMax)xMax=series[series.length-1].x});xMin-=(xMax-xMin)*this.padding.left;xMax+=(xMax-xMin)*this.padding.right;yMin=this.graph.min==="auto"?yMin:this.graph.min||0;yMax=this.graph.max===undefined?yMax:this.graph.max;if(this.graph.min==="auto"||yMin<0){yMin-=(yMax-yMin)*this.padding.bottom}if(this.graph.max===undefined){yMax+=(yMax-yMin)*this.padding.top}return{x:[xMin,xMax],y:[yMin,yMax]}},render:function(args){args=args||{};var graph=this.graph;var series=args.series||graph.series;var vis=args.vis||graph.vis;vis.selectAll("*").remove();var data=series.filter(function(s){return!s.disabled}).map(function(s){return s.stack});var pathNodes=vis.selectAll("path.path").data(data).enter().append("svg:path").classed("path",true).attr("d",this.seriesPathFactory());if(this.stroke){var strokeNodes=vis.selectAll("path.stroke").data(data).enter().append("svg:path").classed("stroke",true).attr("d",this.seriesStrokeFactory())}var i=0;series.forEach(function(series){if(series.disabled)return;series.path=pathNodes[0][i];if(this.stroke)series.stroke=strokeNodes[0][i];this._styleSeries(series);i++},this)},_styleSeries:function(series){var fill=this.fill?series.color:"none";var stroke=this.stroke?series.color:"none";series.path.setAttribute("fill",fill);series.path.setAttribute("stroke",stroke);series.path.setAttribute("stroke-width",this.strokeWidth);if(series.className){d3.select(series.path).classed(series.className,true)}if(series.className&&this.stroke){d3.select(series.stroke).classed(series.className,true)}},configure:function(args){args=args||{};Rickshaw.keys(this.defaults()).forEach(function(key){if(!args.hasOwnProperty(key)){this[key]=this[key]||this.graph[key]||this.defaults()[key];return}if(typeof this.defaults()[key]=="object"){Rickshaw.keys(this.defaults()[key]).forEach(function(k){this[key][k]=args[key][k]!==undefined?args[key][k]:this[key][k]!==undefined?this[key][k]:this.defaults()[key][k]},this)}else{this[key]=args[key]!==undefined?args[key]:this[key]!==undefined?this[key]:this.graph[key]!==undefined?this.graph[key]:this.defaults()[key]}},this)},setStrokeWidth:function(strokeWidth){if(strokeWidth!==undefined){this.strokeWidth=strokeWidth}},setTension:function(tension){if(tension!==undefined){this.tension=tension}}});Rickshaw.namespace("Rickshaw.Graph.Renderer.Line");Rickshaw.Graph.Renderer.Line=Rickshaw.Class.create(Rickshaw.Graph.Renderer,{name:"line",defaults:function($super){return Rickshaw.extend($super(),{unstack:true,fill:false,stroke:true})},seriesPathFactory:function(){var graph=this.graph;var factory=d3.svg.line().x(function(d){return graph.x(d.x)}).y(function(d){return graph.y(d.y)}).interpolate(this.graph.interpolation).tension(this.tension);factory.defined&&factory.defined(function(d){return d.y!==null});return factory}});Rickshaw.namespace("Rickshaw.Graph.Renderer.Stack");Rickshaw.Graph.Renderer.Stack=Rickshaw.Class.create(Rickshaw.Graph.Renderer,{name:"stack",defaults:function($super){return Rickshaw.extend($super(),{fill:true,stroke:false,unstack:false})},seriesPathFactory:function(){var graph=this.graph;var factory=d3.svg.area().x(function(d){return graph.x(d.x)}).y0(function(d){return graph.y(d.y0)}).y1(function(d){return graph.y(d.y+d.y0)}).interpolate(this.graph.interpolation).tension(this.tension);factory.defined&&factory.defined(function(d){return d.y!==null});return factory}});Rickshaw.namespace("Rickshaw.Graph.Renderer.Bar");Rickshaw.Graph.Renderer.Bar=Rickshaw.Class.create(Rickshaw.Graph.Renderer,{name:"bar",defaults:function($super){var defaults=Rickshaw.extend($super(),{gapSize:.05,unstack:false});delete defaults.tension;return defaults},initialize:function($super,args){args=args||{};this.gapSize=args.gapSize||this.gapSize;$super(args)},domain:function($super){var domain=$super();var frequentInterval=this._frequentInterval(this.graph.stackedData.slice(-1).shift());domain.x[1]+=Number(frequentInterval.magnitude);return domain},barWidth:function(series){var frequentInterval=this._frequentInterval(series.stack);var barWidth=this.graph.x.magnitude(frequentInterval.magnitude)*(1-this.gapSize);return barWidth},render:function(args){args=args||{};var graph=this.graph;var series=args.series||graph.series;var vis=args.vis||graph.vis;vis.selectAll("*").remove();var barWidth=this.barWidth(series.active()[0]);var barXOffset=0;var activeSeriesCount=series.filter(function(s){return!s.disabled}).length;var seriesBarWidth=this.unstack?barWidth/activeSeriesCount:barWidth;var transform=function(d){var matrix=[1,0,0,d.y<0?-1:1,0,d.y<0?graph.y.magnitude(Math.abs(d.y))*2:0];return"matrix("+matrix.join(",")+")"};series.forEach(function(series){if(series.disabled)return;var barWidth=this.barWidth(series);var nodes=vis.selectAll("path").data(series.stack.filter(function(d){return d.y!==null})).enter().append("svg:rect").attr("x",function(d){return graph.x(d.x)+barXOffset}).attr("y",function(d){return graph.y(d.y0+Math.abs(d.y))*(d.y<0?-1:1)}).attr("width",seriesBarWidth).attr("height",function(d){return graph.y.magnitude(Math.abs(d.y))}).attr("transform",transform);Array.prototype.forEach.call(nodes[0],function(n){n.setAttribute("fill",series.color)});if(this.unstack)barXOffset+=seriesBarWidth},this)},_frequentInterval:function(data){var intervalCounts={};for(var i=0;i0){this[0].data.forEach(function(plot){item.data.push({x:plot.x,y:0})})}else if(item.data.length===0){item.data.push({x:this.timeBase-(this.timeInterval||0),y:0})}this.push(item);if(this.legend){this.legend.addLine(this.itemByName(item.name))}},addData:function(data,x){var index=this.getIndex();Rickshaw.keys(data).forEach(function(name){if(!this.itemByName(name)){this.addItem({name:name})}},this);this.forEach(function(item){item.data.push({x:x||(index*this.timeInterval||1)+this.timeBase,y:data[item.name]||0})},this)},getIndex:function(){return this[0]&&this[0].data&&this[0].data.length?this[0].data.length:0},itemByName:function(name){for(var i=0;i1;i--){this.currentSize+=1;this.currentIndex+=1;this.forEach(function(item){item.data.unshift({x:((i-1)*this.timeInterval||1)+this.timeBase,y:0,i:i})},this)}}},addData:function($super,data,x){$super(data,x);this.currentSize+=1;this.currentIndex+=1;if(this.maxDataPoints!==undefined){while(this.currentSize>this.maxDataPoints){this.dropData()}}},dropData:function(){this.forEach(function(item){item.data.splice(0,1)});this.currentSize-=1},getIndex:function(){return this.currentIndex}});return Rickshaw}); \ No newline at end of file diff --git a/web/ui/static/vendor/rickshaw/vendor/d3.layout.min.js b/web/ui/static/vendor/rickshaw/vendor/d3.layout.min.js deleted file mode 100644 index 6704ca9cd1..0000000000 --- a/web/ui/static/vendor/rickshaw/vendor/d3.layout.min.js +++ /dev/null @@ -1 +0,0 @@ -(function(){function a(a){var b=a.source,d=a.target,e=c(b,d),f=[b];while(b!==e)b=b.parent,f.push(b);var g=f.length;while(d!==e)f.splice(g,0,d),d=d.parent;return f}function b(a){var b=[],c=a.parent;while(c!=null)b.push(a),a=c,c=c.parent;return b.push(a),b}function c(a,c){if(a===c)return a;var d=b(a),e=b(c),f=d.pop(),g=e.pop(),h=null;while(f===g)h=f,f=d.pop(),g=e.pop();return h}function g(a){a.fixed|=2}function h(a){a!==f&&(a.fixed&=1)}function i(){j(),f.fixed&=1,e=f=null}function j(){f.px+=d3.event.dx,f.py+=d3.event.dy,e.resume()}function k(a,b,c){var d=0,e=0;a.charge=0;if(!a.leaf){var f=a.nodes,g=f.length,h=-1,i;while(++hd&&(c=b,d=e);return c}function u(a){return a.reduce(v,0)}function v(a,b){return a+b[1]}function w(a,b){return x(a,Math.ceil(Math.log(b.length)/Math.LN2+1))}function x(a,b){var c=-1,d=+a[0],e=(a[1]-d)/b,f=[];while(++c<=b)f[c]=e*c+d;return f}function y(a){return[d3.min(a),d3.max(a)]}function z(a,b){return a.sort=d3.rebind(a,b.sort),a.children=d3.rebind(a,b.children),a.links=D,a.value=d3.rebind(a,b.value),a.nodes=function(b){return E=!0,(a.nodes=a)(b)},a}function A(a){return a.children}function B(a){return a.value}function C(a,b){return b.value-a.value}function D(a){return d3.merge(a.map(function(a){return(a.children||[]).map(function(b){return{source:a,target:b}})}))}function F(a,b){return a.value-b.value}function G(a,b){var c=a._pack_next;a._pack_next=b,b._pack_prev=a,b._pack_next=c,c._pack_prev=b}function H(a,b){a._pack_next=b,b._pack_prev=a}function I(a,b){var c=b.x-a.x,d=b.y-a.y,e=a.r+b.r;return e*e-c*c-d*d>.001}function J(a){function l(a){b=Math.min(a.x-a.r,b),c=Math.max(a.x+a.r,c),d=Math.min(a.y-a.r,d),e=Math.max(a.y+a.r,e)}var b=Infinity,c=-Infinity,d=Infinity,e=-Infinity,f=a.length,g,h,i,j,k;a.forEach(K),g=a[0],g.x=-g.r,g.y=0,l(g);if(f>1){h=a[1],h.x=h.r,h.y=0,l(h);if(f>2){i=a[2],O(g,h,i),l(i),G(g,i),g._pack_prev=i,G(i,h),h=g._pack_next;for(var m=3;m0?(H(g,j),h=j,m--):(H(j,h),g=j,m--)}}}var q=(b+c)/2,r=(d+e)/2,s=0;for(var m=0;m0&&(a=d)}return a}function X(a,b){return a.x-b.x}function Y(a,b){return b.x-a.x}function Z(a,b){return a.depth-b.depth}function $(a,b){function c(a,d){var e=a.children;if(e&&(i=e.length)){var f,g=null,h=-1,i;while(++h=0)f=d[e]._tree,f.prelim+=b,f.mod+=b,b+=f.shift+(c+=f.change)}function ba(a,b,c){a=a._tree,b=b._tree;var d=c/(b.number-a.number);a.change+=d,b.change-=d,b.shift+=c,b.prelim+=c,b.mod+=c}function bb(a,b,c){return a._tree.ancestor.parent==b.parent?a._tree.ancestor:c}function bc(a){return{x:a.x,y:a.y,dx:a.dx,dy:a.dy}}function bd(a,b){var c=a.x+b[3],d=a.y+b[0],e=a.dx-b[1]-b[3],f=a.dy-b[0]-b[2];return e<0&&(c+=e/2,e=0),f<0&&(d+=f/2,f=0),{x:c,y:d,dx:e,dy:f}}d3.layout={},d3.layout.bundle=function(){return function(b){var c=[],d=-1,e=b.length;while(++de&&(e=h),d.push(h)}for(g=0;g=i[0]&&o<=i[1]&&(k=g[d3.bisect(j,o,1,m)-1],k.y+=n,k.push(e[f]));return g}var a=!0,b=Number,c=y,d=w;return e.value=function(a){return arguments.length?(b=a,e):b},e.range=function(a){return arguments.length?(c=d3.functor(a),e):c},e.bins=function(a){return arguments.length?(d=typeof a=="number"?function(b){return x(b,a)}:d3.functor(a),e):d},e.frequency=function(b){return arguments.length?(a=!!b,e):a},e},d3.layout.hierarchy=function(){function e(f,h,i){var j=b.call(g,f,h),k=E?f:{data:f};k.depth=h,i.push(k);if(j&&(m=j.length)){var l=-1,m,n=k.children=[],o=0,p=h+1;while(++l0&&(ba(bb(g,a,d),a,m),i+=m,j+=m),k+=g._tree.mod,i+=e._tree.mod,l+=h._tree.mod,j+=f._tree.mod;g&&!V(f)&&(f._tree.thread=g,f._tree.mod+=k-j),e&&!U(h)&&(h._tree.thread=e,h._tree.mod+=i-l,d=a)}return d}var f=a.call(this,d,e),g=f[0];$(g,function(a,b){a._tree={ancestor:a,prelim:0,mod:0,change:0,shift:0,number:b?b._tree.number+1:0}}),h(g),i(g,-g._tree.prelim);var k=W(g,Y),l=W(g,X),m=W(g,Z),n=k.x-b(k,l)/2,o=l.x+b(l,k)/2,p=m.depth||1;return $(g,function(a){a.x=(a.x-n)/(o-n)*c[0],a.y=a.depth/p*c[1],delete a._tree}),f}var a=d3.layout.hierarchy().sort(null).value(null),b=T,c=[1,1];return d.separation=function(a){return arguments.length?(b=a,d):b},d.size=function(a){return arguments.length?(c=a,d):c},z(d,a)},d3.layout.treemap=function(){function i(a,b){var c=-1,d=a.length,e,f;while(++c0)d.push(g=f[o-1]),d.area+=g.area,(k=l(d,n))<=h?(f.pop(),h=k):(d.area-=d.pop().area,m(d,n,c,!1),n=Math.min(c.dx,c.dy),d.length=d.area=0,h=Infinity);d.length&&(m(d,n,c,!0),d.length=d.area=0),b.forEach(j)}}function k(a){var b=a.children;if(b&&b.length){var c=e(a),d=b.slice(),f,g=[];i(d,c.dx*c.dy/a.value),g.area=0;while(f=d.pop())g.push(f),g.area+=f.area,f.z!=null&&(m(g,f.z?c.dx:c.dy,c,!d.length),g.length=g.area=0);b.forEach(k)}}function l(a,b){var c=a.area,d,e=0,f=Infinity,g=-1,i=a.length;while(++ge&&(e=d)}return c*=c,b*=b,c?Math.max(b*e*h/c,c/(b*f*h)):Infinity}function m(a,c,d,e){var f=-1,g=a.length,h=d.x,i=d.y,j=c?b(a.area/c):0,k;if(c==d.dx){if(e||j>d.dy)j=j?d.dy:0;while(++fd.dx)j=j?d.dx:0;while(++fe;++e){var u=Do[e]+t;if(u in n)return u}}function c(){}function l(){}function s(n){function t(){for(var t,r=e,u=-1,i=r.length;++ue;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function T(n){return Lo(n,Io),n}function q(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t0&&(n=n.substring(0,a));var s=Zo.get(n);return s&&(n=s,l=j),a?t?u:r:t?c:i}function D(n,t){return function(e){var r=mo.event;mo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{mo.event=r}}}function j(n,t){var e=D(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function L(){var n=".dragsuppress-"+ ++Xo,t="touchmove"+n,e="selectstart"+n,r="dragstart"+n,u="click"+n,i=mo.select(_o).on(t,f).on(e,f).on(r,f),o=bo.style,a=o[Vo];return o[Vo]="none",function(t){function e(){i.on(u,null)}i.on(n,null),o[Vo]=a,t&&(i.on(u,function(){f(),e()},!0),setTimeout(e,0))}}function H(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>$o&&(_o.scrollX||_o.scrollY)){e=mo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();$o=!(u.f||u.e),e.remove()}return $o?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function F(n){return n>0?1:0>n?-1:0}function P(n){return n>1?0:-1>n?Bo:Math.acos(n)}function O(n){return n>1?Jo:-1>n?-Jo:Math.asin(n)}function R(n){return((n=Math.exp(n))-1/n)/2}function Y(n){return((n=Math.exp(n))+1/n)/2}function I(n){return((n=Math.exp(2*n))-1)/(n+1)}function U(n){return(n=Math.sin(n/2))*n}function Z(){}function V(n,t,e){return new X(n,t,e)}function X(n,t,e){this.h=n,this.s=t,this.l=e}function $(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,ot(u(n+120),u(n),u(n-120))}function B(n,t,e){return new W(n,t,e)}function W(n,t,e){this.h=n,this.c=t,this.l=e}function J(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),G(e,Math.cos(n*=Qo)*t,Math.sin(n)*t)}function G(n,t,e){return new K(n,t,e)}function K(n,t,e){this.l=n,this.a=t,this.b=e}function Q(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=tt(u)*sa,r=tt(r)*fa,i=tt(i)*ha,ot(rt(3.2404542*u-1.5371385*r-.4985314*i),rt(-.969266*u+1.8760108*r+.041556*i),rt(.0556434*u-.2040259*r+1.0572252*i))}function nt(n,t,e){return n>0?B(Math.atan2(e,t)*na,Math.sqrt(t*t+e*e),n):B(0/0,0/0,n)}function tt(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function et(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function rt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ut(n){return ot(n>>16,255&n>>8,255&n)}function it(n){return ut(n)+""}function ot(n,t,e){return new at(n,t,e)}function at(n,t,e){this.r=n,this.g=t,this.b=e}function ct(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function lt(n,t,e){var r,u,i,o=0,a=0,c=0;if(r=/([a-z]+)\((.*)\)/i.exec(n))switch(u=r[2].split(","),r[1]){case"hsl":return e(parseFloat(u[0]),parseFloat(u[1])/100,parseFloat(u[2])/100);case"rgb":return t(gt(u[0]),gt(u[1]),gt(u[2]))}return(i=da.get(n))?t(i.r,i.g,i.b):(null!=n&&"#"===n.charAt(0)&&(4===n.length?(o=n.charAt(1),o+=o,a=n.charAt(2),a+=a,c=n.charAt(3),c+=c):7===n.length&&(o=n.substring(1,3),a=n.substring(3,5),c=n.substring(5,7)),o=parseInt(o,16),a=parseInt(a,16),c=parseInt(c,16)),t(o,a,c))}function st(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),V(r,u,c)}function ft(n,t,e){n=ht(n),t=ht(t),e=ht(e);var r=et((.4124564*n+.3575761*t+.1804375*e)/sa),u=et((.2126729*n+.7151522*t+.072175*e)/fa),i=et((.0193339*n+.119192*t+.9503041*e)/ha);return G(116*u-16,500*(r-u),200*(u-i))}function ht(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function gt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function pt(n){return"function"==typeof n?n:function(){return n}}function dt(n){return n}function vt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),mt(t,e,n,r)}}function mt(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=mo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,l=null;return!_o.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=mo.event;mo.event=n;try{o.progress.call(i,c)}finally{mo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(l=n,i):l},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Mo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var s in a)c.setRequestHeader(s,a[s]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=l&&(c.responseType=l),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},mo.rebind(i,o,"on"),null==r?i:i.get(yt(r))}function yt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Mt(){var n=bt(),t=_t()-n;t>24?(isFinite(t)&&(clearTimeout(Ma),Ma=setTimeout(Mt,t)),ya=0):(ya=1,ba(Mt))}function xt(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now()),xa.callback=n,xa.time=e+t}function bt(){var n=Date.now();for(xa=va;xa;)n>=xa.time&&(xa.flush=xa.callback(n-xa.time)),xa=xa.next;return n}function _t(){for(var n,t=va,e=1/0;t;)t.flush?t=n?n.next=t.next:va=t.next:(t.time8?function(n){return n/e}:function(n){return n*e},symbol:n}}function St(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function Et(n){return n+""}function kt(){}function At(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function Nt(n,t){n&&Da.hasOwnProperty(n.type)&&Da[n.type](n,t)}function Tt(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++ua;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c={point:e,points:n,other:null,visited:!1,entry:!0,subject:!0},l={point:e,points:[e],other:c,visited:!1,entry:!1,subject:!1};c.other=l,i.push(c),o.push(l),c={point:r,points:[r],other:null,visited:!1,entry:!1,subject:!0},l={point:r,points:[r],other:c,visited:!1,entry:!0,subject:!1},c.other=l,i.push(c),o.push(l)}}),o.sort(t),$t(i),$t(o),i.length){for(var a=0,c=e,l=o.length;l>a;++a)o[a].entry=c=!c;for(var s,f,h,g=i[0];;){for(s=g;s.visited;)if((s=s.next)===g)return;f=s.points,u.lineStart();do{if(s.visited=s.other.visited=!0,s.entry){if(s.subject)for(var a=0;a=0;)u.point((h=f[a])[0],h[1])}else r(s.point,s.prev.point,-1,u);s=s.prev}s=s.other,f=s.points}while(!s.visited);u.lineEnd()}}}function $t(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Wt))}}var g,p,d,v=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:l,polygonStart:function(){y.point=s,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=l,g=mo.merge(g);var n=Kt(m,p);g.length?Xt(g,Gt,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},M=Jt(),x=t(M);return y}}function Wt(n){return n.length>1}function Jt(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:c,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Gt(n,t){return((n=n.point)[0]<0?n[1]-Jo-Go:Jo-n[1])-((t=t.point)[0]<0?t[1]-Jo-Go:Jo-t[1])}function Kt(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;La.reset();for(var a=0,c=t.length;c>a;++a){var l=t[a],s=l.length;if(s)for(var f=l[0],h=f[0],g=f[1]/2+Bo/4,p=Math.sin(g),d=Math.cos(g),v=1;;){v===s&&(v=0),n=l[v];var m=n[0],y=n[1]/2+Bo/4,M=Math.sin(y),x=Math.cos(y),b=m-h,_=Math.abs(b)>Bo,w=p*M;if(La.add(Math.atan2(w*Math.sin(b),d*x+w*Math.cos(b))),i+=_?b+(b>=0?2:-2)*Bo:b,_^h>=e^m>=e){var S=jt(Ct(f),Ct(n));Ft(S);var E=jt(u,S);Ft(E);var k=(_^b>=0?-1:1)*O(E[2]);(r>k||r===k&&(S[0]||S[1]))&&(o+=_^b>=0?1:-1)}if(!v++)break;h=m,p=M,d=x,f=n}}return(-Go>i||Go>i&&0>La)^1&o}function Qt(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Bo:-Bo,c=Math.abs(i-e);Math.abs(c-Bo)0?Jo:-Jo),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Bo&&(Math.abs(e-u)Go?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function te(n,t,e,r){var u;if(null==n)u=e*Jo,r.point(-Bo,u),r.point(0,u),r.point(Bo,u),r.point(Bo,0),r.point(Bo,-u),r.point(0,-u),r.point(-Bo,-u),r.point(-Bo,0),r.point(-Bo,u);else if(Math.abs(n[0]-t[0])>Go){var i=(n[0]i}function e(n){var e,i,c,l,s;return{lineStart:function(){l=c=!1,s=1},point:function(f,h){var g,p=[f,h],d=t(f,h),v=o?d?0:u(f,h):d?u(f+(0>f?Bo:-Bo),h):0;if(!e&&(l=c=d)&&n.lineStart(),d!==c&&(g=r(e,p),(Ot(e,g)||Ot(p,g))&&(p[0]+=Go,p[1]+=Go,d=t(p[0],p[1]))),d!==c)s=0,d?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^d){var m;v&i||!(m=r(p,e,!0))||(s=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!d||e&&Ot(e,p)||n.point(p[0],p[1]),e=p,c=d,i=v},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return s|(l&&c)<<1}}}function r(n,t,e){var r=Ct(n),u=Ct(t),o=[1,0,0],a=jt(r,u),c=Dt(a,a),l=a[0],s=c-l*l;if(!s)return!e&&n;var f=i*c/s,h=-i*l/s,g=jt(o,a),p=Ht(o,f),d=Ht(a,h);Lt(p,d);var v=g,m=Dt(p,v),y=Dt(v,v),M=m*m-y*(Dt(p,p)-1);if(!(0>M)){var x=Math.sqrt(M),b=Ht(v,(-m-x)/y);if(Lt(b,p),b=Pt(b),!e)return b;var _,w=n[0],S=t[0],E=n[1],k=t[1];w>S&&(_=w,w=S,S=_);var A=S-w,N=Math.abs(A-Bo)A;if(!N&&E>k&&(_=E,E=k,k=_),T?N?E+k>0^b[1]<(Math.abs(b[0]-w)Bo^(w<=b[0]&&b[0]<=S)){var q=Ht(v,(-m+x)/y);return Lt(q,p),[b,Pt(q)]}}}function u(t,e){var r=o?n:Bo-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=Math.abs(i)>Go,c=Te(n,6*Qo);return Bt(t,e,c,o?[0,-n]:[-Bo,n-Bo])}function re(n,t,e,r){function u(r,u){return Math.abs(r[0]-n)0?0:3:Math.abs(r[0]-e)0?2:1:Math.abs(r[1]-t)0?1:0:u>0?3:2}function i(n,t){return o(n.point,t.point)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}function a(u,i){var o=i[0]-u[0],a=i[1]-u[1],c=[0,1];return Math.abs(o)0&&(u[0]+=c[0]*o,u[1]+=c[0]*a),!0):!1}return function(c){function l(n){for(var t=0,e=y.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=y[u],c=a.length,l=a[0];c>o;++o)i=a[o],l[1]<=r?i[1]>r&&s(l,i,n)>0&&++t:i[1]<=r&&s(l,i,n)<0&&--t,l=i;return 0!==t}function s(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(e[0]-n[0])*(t[1]-n[1])}function f(i,a,c,l){var s=0,f=0;if(null==i||(s=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do l.point(0===s||3===s?n:e,s>1?r:t);while((s=(s+c+4)%4)!==f)}else l.point(a[0],a[1])}function h(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function g(n,t){h(n,t)&&c.point(n,t)}function p(){q.point=v,y&&y.push(M=[]),k=!0,E=!1,w=S=0/0}function d(){m&&(v(x,b),_&&E&&T.rejoin(),m.push(T.buffer())),q.point=g,E&&c.lineEnd()}function v(n,t){n=Math.max(-Ja,Math.min(Ja,n)),t=Math.max(-Ja,Math.min(Ja,t));var e=h(n,t);if(y&&M.push([n,t]),k)x=n,b=t,_=e,k=!1,e&&(c.lineStart(),c.point(n,t));else if(e&&E)c.point(n,t);else{var r=[w,S],u=[n,t];a(r,u)?(E||(c.lineStart(),c.point(r[0],r[1])),c.point(u[0],u[1]),e||c.lineEnd(),A=!1):e&&(c.lineStart(),c.point(n,t),A=!1)}w=n,S=t,E=e}var m,y,M,x,b,_,w,S,E,k,A,N=c,T=Jt(),q={point:g,lineStart:p,lineEnd:d,polygonStart:function(){c=T,m=[],y=[],A=!0},polygonEnd:function(){c=N,m=mo.merge(m);var t=l([n,r]),e=A&&t,u=m.length;(e||u)&&(c.polygonStart(),e&&(c.lineStart(),f(null,null,1,c),c.lineEnd()),u&&Xt(m,i,t,f,c),c.polygonEnd()),m=y=M=null}};return q}}function ue(n,t,e){if(Math.abs(t)=n;var r=n/t;if(t>0){if(r>e[1])return!1;r>e[0]&&(e[0]=r)}else{if(rn&&(Qa=n),n>tc&&(tc=n),nc>t&&(nc=t),t>ec&&(ec=t)}function se(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=fe(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=fe(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function fe(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function he(n,t){Oa+=n,Ra+=t,++Ya}function ge(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);Ia+=o*(t+n)/2,Ua+=o*(e+r)/2,Za+=o,he(t=n,e=r)}var t,e;ic.point=function(r,u){ic.point=n,he(t=r,e=u)}}function pe(){ic.point=he}function de(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);Ia+=o*(r+n)/2,Ua+=o*(u+t)/2,Za+=o,o=u*n-r*t,Va+=o*(r+n),Xa+=o*(u+t),$a+=3*o,he(r=n,u=t)}var t,e,r,u;ic.point=function(i,o){ic.point=n,he(t=r=i,e=u=o)},ic.lineEnd=function(){n(t,e)}}function ve(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,Wo)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:c};return a}function me(n){function t(t){function r(e,r){e=n(e,r),t.point(e[0],e[1])}function u(){M=0/0,S.point=o,t.lineStart()}function o(r,u){var o=Ct([r,u]),a=n(r,u);e(M,x,y,b,_,w,M=a[0],x=a[1],y=r,b=o[0],_=o[1],w=o[2],i,t),t.point(M,x)}function a(){S.point=r,t.lineEnd()}function c(){u(),S.point=l,S.lineEnd=s}function l(n,t){o(f=n,h=t),g=M,p=x,d=b,v=_,m=w,S.point=o}function s(){e(M,x,y,b,_,w,g,p,f,d,v,m,i,t),S.lineEnd=a,a()}var f,h,g,p,d,v,m,y,M,x,b,_,w,S={point:r,lineStart:u,lineEnd:a,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=u}};return S}function e(t,i,o,a,c,l,s,f,h,g,p,d,v,m){var y=s-t,M=f-i,x=y*y+M*M;if(x>4*r&&v--){var b=a+g,_=c+p,w=l+d,S=Math.sqrt(b*b+_*_+w*w),E=Math.asin(w/=S),k=Math.abs(Math.abs(w)-1)r||Math.abs((y*q+M*z)/x-.5)>.3||u>a*g+c*p+l*d)&&(e(t,i,o,a,c,l,N,T,k,b/=S,_/=S,w,v,m),m.point(N,T),e(N,T,k,b,_,w,s,f,h,g,p,d,v,m))}}var r=.5,u=Math.cos(30*Qo),i=16;return t.precision=function(n){return arguments.length?(i=(r=n*n)>0&&16,t):Math.sqrt(r)},t}function ye(n){this.stream=n}function Me(n){var t=me(function(t,e){return n([t*na,e*na])});return function(n){var e=new ye(n=t(n));return e.point=function(t,e){n.point(t*Qo,e*Qo)},e}}function xe(n){return be(function(){return n})()}function be(n){function t(n){return n=a(n[0]*Qo,n[1]*Qo),[n[0]*h+c,l-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(l-n[1])/h),n&&[n[0]*na,n[1]*na]}function r(){a=ie(o=Ee(m,y,M),i);var n=i(d,v);return c=g-n[0]*h,l=p+n[1]*h,u()}function u(){return s&&(s.valid=!1,s=null),t}var i,o,a,c,l,s,f=me(function(n,t){return n=i(n,t),[n[0]*h+c,l-n[1]*h]}),h=150,g=480,p=250,d=0,v=0,m=0,y=0,M=0,x=Wa,b=dt,_=null,w=null;return t.stream=function(n){return s&&(s.valid=!1),s=_e(x(o,f(b(n)))),s.valid=!0,s},t.clipAngle=function(n){return arguments.length?(x=null==n?(_=n,Wa):ee((_=+n)*Qo),u()):_},t.clipExtent=function(n){return arguments.length?(w=n,b=n?re(n[0][0],n[0][1],n[1][0],n[1][1]):dt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(d=n[0]%360*Qo,v=n[1]%360*Qo,r()):[d*na,v*na]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Qo,y=n[1]%360*Qo,M=n.length>2?n[2]%360*Qo:0,r()):[m*na,y*na,M*na]},mo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function _e(n){var t=new ye(n);return t.point=function(t,e){n.point(t*Qo,e*Qo)},t}function we(n,t){return[n,t]}function Se(n,t){return[n>Bo?n-Wo:-Bo>n?n+Wo:n,t]}function Ee(n,t,e){return n?t||e?ie(Ae(n),Ne(t,e)):Ae(n):t||e?Ne(t,e):Se}function ke(n){return function(t,e){return t+=n,[t>Bo?t-Wo:-Bo>t?t+Wo:t,e]}}function Ae(n){var t=ke(n);return t.invert=ke(-n),t}function Ne(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,l=Math.sin(t),s=l*r+a*u;return[Math.atan2(c*i-s*o,a*r-l*u),O(s*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,l=Math.sin(t),s=l*i-c*o;return[Math.atan2(c*i+l*o,a*r+s*u),O(s*r-a*u)]},e}function Te(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=qe(e,u),i=qe(e,i),(o>0?i>u:u>i)&&(u+=o*Wo)):(u=n+o*Wo,i=n-.5*c);for(var l,s=u;o>0?s>i:i>s;s-=c)a.point((l=Pt([e,-r*Math.cos(s),-r*Math.sin(s)]))[0],l[1])}}function qe(n,t){var e=Ct(t);e[0]-=n,Ft(e);var r=P(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Go)%(2*Math.PI)}function ze(n,t,e){var r=mo.range(n,t-Go,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function Ce(n,t,e){var r=mo.range(n,t-Go,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function De(n){return n.source}function je(n){return n.target}function Le(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),l=u*Math.sin(n),s=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(U(r-t)+u*o*U(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*s,u=e*l+t*f,o=e*i+t*a;return[Math.atan2(u,r)*na,Math.atan2(o,Math.sqrt(r*r+u*u))*na]}:function(){return[n*na,t*na]};return p.distance=h,p}function He(){function n(n,u){var i=Math.sin(u*=Qo),o=Math.cos(u),a=Math.abs((n*=Qo)-t),c=Math.cos(a);oc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;ac.point=function(u,i){t=u*Qo,e=Math.sin(i*=Qo),r=Math.cos(i),ac.point=n},ac.lineEnd=function(){ac.point=ac.lineEnd=c}}function Fe(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function Pe(n,t){function e(n,t){var e=Math.abs(Math.abs(t)-Jo)1&&u.push("H",r[0]),u.join("")}function We(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var l=2;l9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function fr(n){return n.length<3?Xe(n):n[0]+nr(n,sr(n))}function hr(n,t,e,r){var u,i,o,a,c,l,s;return u=r[n],i=u[0],o=u[1],u=r[t],a=u[0],c=u[1],u=r[e],l=u[0],s=u[1],(s-o)*(a-i)-(c-o)*(l-i)>0}function gr(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function pr(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],l=e[1],s=t[1]-c,f=r[1]-l,h=(a*(c-l)-f*(u-i))/(f*o-a*s);return[u+h*o,c+h*s]}function dr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function vr(n,t){var e={list:n.map(function(n,t){return{index:t,x:n[0],y:n[1]}}).sort(function(n,t){return n.yt.y?1:n.xt.x?1:0}),bottomSite:null},r={list:[],leftEnd:null,rightEnd:null,init:function(){r.leftEnd=r.createHalfEdge(null,"l"),r.rightEnd=r.createHalfEdge(null,"l"),r.leftEnd.r=r.rightEnd,r.rightEnd.l=r.leftEnd,r.list.unshift(r.leftEnd,r.rightEnd)},createHalfEdge:function(n,t){return{edge:n,side:t,vertex:null,l:null,r:null}},insert:function(n,t){t.l=n,t.r=n.r,n.r.l=t,n.r=t},leftBound:function(n){var t=r.leftEnd;do t=t.r;while(t!=r.rightEnd&&u.rightOf(t,n));return t=t.l},del:function(n){n.l.r=n.r,n.r.l=n.l,n.edge=null},right:function(n){return n.r},left:function(n){return n.l},leftRegion:function(n){return null==n.edge?e.bottomSite:n.edge.region[n.side]},rightRegion:function(n){return null==n.edge?e.bottomSite:n.edge.region[yc[n.side]]}},u={bisect:function(n,t){var e={region:{l:n,r:t},ep:{l:null,r:null}},r=t.x-n.x,u=t.y-n.y,i=r>0?r:-r,o=u>0?u:-u;return e.c=n.x*r+n.y*u+.5*(r*r+u*u),i>o?(e.a=1,e.b=u/r,e.c/=r):(e.b=1,e.a=r/u,e.c/=u),e},intersect:function(n,t){var e=n.edge,r=t.edge;if(!e||!r||e.region.r==r.region.r)return null;var u=e.a*r.b-e.b*r.a;if(Math.abs(u)<1e-10)return null;var i,o,a=(e.c*r.b-r.c*e.b)/u,c=(r.c*e.a-e.c*r.a)/u,l=e.region.r,s=r.region.r;l.y=o.region.r.x;return f&&"l"===i.side||!f&&"r"===i.side?null:{x:a,y:c}},rightOf:function(n,t){var e=n.edge,r=e.region.r,u=t.x>r.x;if(u&&"l"===n.side)return 1;if(!u&&"r"===n.side)return 0;if(1===e.a){var i=t.y-r.y,o=t.x-r.x,a=0,c=0;if(!u&&e.b<0||u&&e.b>=0?c=a=i>=e.b*o:(c=t.x+t.y*e.b>e.c,e.b<0&&(c=!c),c||(a=1)),!a){var l=r.x-e.region.l.x;c=e.b*(o*o-i*i)h*h+g*g}return"l"===n.side?c:!c},endPoint:function(n,e,r){n.ep[e]=r,n.ep[yc[e]]&&t(n)},distance:function(n,t){var e=n.x-t.x,r=n.y-t.y;return Math.sqrt(e*e+r*r)}},i={list:[],insert:function(n,t,e){n.vertex=t,n.ystar=t.y+e;for(var r=0,u=i.list,o=u.length;o>r;r++){var a=u[r];if(!(n.ystar>a.ystar||n.ystar==a.ystar&&t.x>a.vertex.x))break}u.splice(r,0,n)},del:function(n){for(var t=0,e=i.list,r=e.length;r>t&&e[t]!=n;++t);e.splice(t,1)},empty:function(){return 0===i.list.length},nextEvent:function(n){for(var t=0,e=i.list,r=e.length;r>t;++t)if(e[t]==n)return e[t+1];return null},min:function(){var n=i.list[0];return{x:n.vertex.x,y:n.ystar}},extractMin:function(){return i.list.shift()}};r.init(),e.bottomSite=e.list.shift();for(var o,a,c,l,s,f,h,g,p,d,v,m,y,M=e.list.shift();;)if(i.empty()||(o=i.min()),M&&(i.empty()||M.yg.y&&(p=h,h=g,g=p,y="r"),m=u.bisect(h,g),f=r.createHalfEdge(m,y),r.insert(l,f),u.endPoint(m,yc[y],v),d=u.intersect(l,f),d&&(i.del(l),i.insert(l,d,u.distance(d,h))),d=u.intersect(f,s),d&&i.insert(f,d,u.distance(d,h))}for(a=r.right(r.leftEnd);a!=r.rightEnd;a=r.right(a))t(a.edge)}function mr(n){return n.x}function yr(n){return n.y}function Mr(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function xr(n,t,e,r,u,i){if(!n(t,e,r,u,i)){var o=.5*(e+u),a=.5*(r+i),c=t.nodes;c[0]&&xr(n,c[0],e,r,o,a),c[1]&&xr(n,c[1],o,r,u,a),c[2]&&xr(n,c[2],e,a,o,i),c[3]&&xr(n,c[3],o,a,u,i)}}function br(n,t){n=mo.rgb(n),t=mo.rgb(t);var e=n.r,r=n.g,u=n.b,i=t.r-e,o=t.g-r,a=t.b-u;return function(n){return"#"+ct(Math.round(e+i*n))+ct(Math.round(r+o*n))+ct(Math.round(u+a*n))}}function _r(n,t){var e,r={},u={};for(e in n)e in t?r[e]=Er(n[e],t[e]):u[e]=n[e];for(e in t)e in n||(u[e]=t[e]);return function(n){for(e in r)u[e]=r[e](n);return u}}function wr(n,t){return t-=n=+n,function(e){return n+t*e}}function Sr(n,t){var e,r,u,i,o,a=0,c=0,l=[],s=[];for(n+="",t+="",Mc.lastIndex=0,r=0;e=Mc.exec(t);++r)e.index&&l.push(t.substring(a,c=e.index)),s.push({i:l.length,x:e[0]}),l.push(null),a=Mc.lastIndex;for(ar;++r)if(o=s[r],o.x==e[0]){if(o.i)if(null==l[o.i+1])for(l[o.i-1]+=o.x,l.splice(o.i,1),u=r+1;i>u;++u)s[u].i--;else for(l[o.i-1]+=o.x+l[o.i+1],l.splice(o.i,2),u=r+1;i>u;++u)s[u].i-=2;else if(null==l[o.i+1])l[o.i]=o.x;else for(l[o.i]=o.x+l[o.i+1],l.splice(o.i+1,1),u=r+1;i>u;++u)s[u].i--;s.splice(r,1),i--,r--}else o.x=wr(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=s.pop(),null==l[o.i+1]?l[o.i]=o.x:(l[o.i]=o.x+l[o.i+1],l.splice(o.i+1,1)),i--;return 1===l.length?null==l[0]?(o=s[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)l[(o=s[r]).i]=o.x(n);return l.join("")}}function Er(n,t){for(var e,r=mo.interpolators.length;--r>=0&&!(e=mo.interpolators[r](n,t)););return e}function kr(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(Er(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function Ar(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function Nr(n){return function(t){return 1-n(1-t)}}function Tr(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function qr(n){return n*n}function zr(n){return n*n*n}function Cr(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function Dr(n){return function(t){return Math.pow(t,n)}}function jr(n){return 1-Math.cos(n*Jo)}function Lr(n){return Math.pow(2,10*(n-1))}function Hr(n){return 1-Math.sqrt(1-n*n)}function Fr(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/Wo*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*Wo/t)}}function Pr(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function Or(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Rr(n,t){n=mo.hcl(n),t=mo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return J(e+i*n,r+o*n,u+a*n)+""}}function Yr(n,t){n=mo.hsl(n),t=mo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return $(e+i*n,r+o*n,u+a*n)+""}}function Ir(n,t){n=mo.lab(n),t=mo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return Q(e+i*n,r+o*n,u+a*n)+""}}function Ur(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Zr(n){var t=[n.a,n.b],e=[n.c,n.d],r=Xr(t),u=Vr(t,e),i=Xr($r(e,t,-u))||0;t[0]*e[1]180?s+=360:s-l>180&&(l+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:wr(l,s)})):s&&r.push(r.pop()+"rotate("+s+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:wr(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:wr(g[0],p[0])},{i:e-2,x:wr(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++ie;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function vu(n){return n.reduce(mu,0)}function mu(n,t){return n+t[1]}function yu(n,t){return Mu(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function Mu(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function xu(n){return[mo.min(n),mo.max(n)]}function bu(n,t){return n.parent==t.parent?1:2}function _u(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function wu(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function Su(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i0&&(n=r);return n}function Eu(n,t){return n.x-t.x}function ku(n,t){return t.x-n.x}function Au(n,t){return n.depth-t.depth}function Nu(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function qu(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function zu(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function Cu(n,t){return n.value-t.value}function Du(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function ju(n,t){n._pack_next=t,t._pack_prev=n}function Lu(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function Hu(n){function t(n){s=Math.min(n.x-n.r,s),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(l=e.length)){var e,r,u,i,o,a,c,l,s=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(Fu),r=e[0],r.x=-r.r,r.y=0,t(r),l>1&&(u=e[1],u.x=u.r,u.y=0,t(u),l>2))for(i=e[2],Ru(r,u,i),t(i),Du(r,i),r._pack_prev=i,Du(i,u),u=r._pack_next,o=3;l>o;o++){Ru(r,u,i=e[o]);var p=0,d=1,v=1;for(a=u._pack_next;a!==u;a=a._pack_next,d++)if(Lu(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!Lu(c,i);c=c._pack_prev,v++);p?(v>d||d==v&&u.ro;o++)i=e[o],i.x-=m,i.y-=y,M=Math.max(M,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=M,e.forEach(Pu)}}function Fu(n){n._pack_next=n._pack_prev=n}function Pu(n){delete n._pack_next,delete n._pack_prev}function Ou(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++iu&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function $u(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Bu(n){return n.rangeExtent?n.rangeExtent():$u(n.range())}function Wu(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Ju(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Gu(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:Tc}function Ku(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]2?Ku:Wu,c=r?Jr:Wr;return o=u(n,t,c,e),a=u(t,n,c,Er),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Ur)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return ri(n,t)},i.tickFormat=function(t,e){return ui(n,t,e)},i.nice=function(t){return ti(n,t),u()},i.copy=function(){return Qu(n,t,e,r)},u()}function ni(n,t){return mo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function ti(n,t){return Ju(n,Gu(ei(n,t)[2]))}function ei(n,t){null==t&&(t=10);var e=$u(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function ri(n,t){return mo.range.apply(mo,ei(n,t))}function ui(n,t,e){var r=-Math.floor(Math.log(ei(n,t)[2])/Math.LN10+.01);return mo.format(e?e.replace(Aa,function(n,t,e,u,i,o,a,c,l,s){return[t,e,u,i,o,a,c,l||"."+(r-2*("%"===s)),s].join("")}):",."+r+"f")}function ii(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Ju(r.map(u),e?Math:zc);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=$u(r),o=[],a=n[0],c=n[1],l=Math.floor(u(a)),s=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(s-l)){if(e){for(;s>l;l++)for(var h=1;f>h;h++)o.push(i(l)*h);o.push(i(l))}else for(o.push(i(l));l++0;h--)o.push(i(l)*h);for(l=0;o[l]c;s--);o=o.slice(l,s)}return o},o.tickFormat=function(n,t){if(!arguments.length)return qc;arguments.length<2?t=qc:"function"!=typeof t&&(t=mo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return ii(n.copy(),t,e,r)},ni(o,n)}function oi(n,t,e){function r(t){return n(u(t))}var u=ai(t),i=ai(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return ri(e,n)},r.tickFormat=function(n,t){return ui(e,n,t)},r.nice=function(n){return r.domain(ti(e,n))},r.exponent=function(o){return arguments.length?(u=ai(t=o),i=ai(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return oi(n.copy(),t,e)},ni(r,n)}function ai(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function ci(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return mo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++ae?[0/0,0/0]:[e>0?u[e-1]:n[0],et?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return si(n,t,e)},u()}function fi(n,t){function e(e){return e>=e?t[mo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return fi(n,t)},e}function hi(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return ri(n,t)},t.tickFormat=function(t,e){return ui(n,t,e)},t.copy=function(){return hi(n)},t}function gi(n){return n.innerRadius}function pi(n){return n.outerRadius}function di(n){return n.startAngle}function vi(n){return n.endAngle}function mi(n){for(var t,e,r,u=-1,i=n.length;++ue?l():(i.active=e,o.event&&o.event.start.call(n,s,t),o.tween.forEach(function(e,r){(r=r.call(n,s,t))&&p.push(r)}),c(r||1)?1:(xt(c,h,a),void 0))}function c(r){if(i.active!==e)return l();for(var u=r/g,a=f(u),c=p.length;c>0;)p[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,s,t),l()):void 0}function l(){return--i.count?delete i[e]:delete n.__transition__,1}var s=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=[];return r>=h?u(r-h):(xt(u,h,a),void 0)},0,a)}}function Ti(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function qi(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function zi(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Ci(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new Jc(e-1)),1),e}function i(n,e){return t(n=new Jc(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{Jc=zi;var r=new zi;return r._=n,o(r,t,e)}finally{Jc=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Di(n);return c.floor=c,c.round=Di(r),c.ceil=Di(u),c.offset=Di(i),c.range=a,n}function Di(n){return function(t,e){try{Jc=zi;var r=new zi;return r._=t,n(r,e)._}finally{Jc=Date}}}function ji(n){function t(t){for(var r,u,i,o=[],a=-1,c=0;++aa;){if(r>=l)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=ml[o in dl?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function Hi(n){return new RegExp("^(?:"+n.map(mo.requote).join("|")+")","i")}function Fi(n){for(var t=new u,e=-1,r=n.length;++en?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function Oi(n,t,e){cl.lastIndex=0;var r=cl.exec(t.substring(e));return r?(n.w=ll.get(r[0].toLowerCase()),e+r[0].length):-1}function Ri(n,t,e){ol.lastIndex=0;var r=ol.exec(t.substring(e));return r?(n.w=al.get(r[0].toLowerCase()),e+r[0].length):-1}function Yi(n,t,e){yl.lastIndex=0;var r=yl.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function Ii(n,t,e){yl.lastIndex=0;var r=yl.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function Ui(n,t,e){yl.lastIndex=0;var r=yl.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function Zi(n,t,e){hl.lastIndex=0;var r=hl.exec(t.substring(e));return r?(n.m=gl.get(r[0].toLowerCase()),e+r[0].length):-1}function Vi(n,t,e){sl.lastIndex=0;var r=sl.exec(t.substring(e));return r?(n.m=fl.get(r[0].toLowerCase()),e+r[0].length):-1}function Xi(n,t,e){return Li(n,vl.c.toString(),t,e)}function $i(n,t,e){return Li(n,vl.x.toString(),t,e)}function Bi(n,t,e){return Li(n,vl.X.toString(),t,e)}function Wi(n,t,e){yl.lastIndex=0;var r=yl.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Ji(n,t,e){yl.lastIndex=0;var r=yl.exec(t.substring(e,e+2));return r?(n.y=Ki(+r[0]),e+r[0].length):-1}function Gi(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function Ki(n){return n+(n>68?1900:2e3)}function Qi(n,t,e){yl.lastIndex=0;var r=yl.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function no(n,t,e){yl.lastIndex=0;var r=yl.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function to(n,t,e){yl.lastIndex=0;var r=yl.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function eo(n,t,e){yl.lastIndex=0;var r=yl.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function ro(n,t,e){yl.lastIndex=0;var r=yl.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function uo(n,t,e){yl.lastIndex=0;var r=yl.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function io(n,t,e){yl.lastIndex=0;var r=yl.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function oo(n,t,e){var r=Ml.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}function ao(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(Math.abs(t)/60),u=Math.abs(t)%60;return e+Pi(r,"0",2)+Pi(u,"0",2)}function co(n,t,e){pl.lastIndex=0;var r=pl.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function lo(n){function t(n){try{Jc=zi;var t=new Jc;return t._=n,e(t)}finally{Jc=Date}}var e=ji(n);return t.parse=function(n){try{Jc=zi;var t=e.parse(n);return t&&t._}finally{Jc=Date}},t.toString=e.toString,t}function so(n){return n.toISOString()}function fo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=mo.bisect(bl,u);return i==bl.length?[t.year,ei(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/bl[i-1]1?{floor:function(t){for(;e(t=n.floor(t));)t=ho(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=ho(+t+1);return t}}:n))},r.ticks=function(n,t){var e=$u(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],ho(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return fo(n.copy(),t,e)},ni(r,n)}function ho(n){return new Date(n)}function go(n){return function(t){for(var e=n.length-1,r=n[e];!r[1](t);)r=n[--e];return r[0](t)}}function po(n){return JSON.parse(n.responseText)}function vo(n){var t=xo.createRange();return t.selectNode(xo.body),t.createContextualFragment(n.responseText)}var mo={version:"3.3.6"};Date.now||(Date.now=function(){return+new Date});var yo=[].slice,Mo=function(n){return yo.call(n)},xo=document,bo=xo.documentElement,_o=window;try{Mo(bo.childNodes)[0].nodeType}catch(wo){Mo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{xo.createElement("div").style.setProperty("opacity",0,"")}catch(So){var Eo=_o.Element.prototype,ko=Eo.setAttribute,Ao=Eo.setAttributeNS,No=_o.CSSStyleDeclaration.prototype,To=No.setProperty;Eo.setAttribute=function(n,t){ko.call(this,n,t+"")},Eo.setAttributeNS=function(n,t,e){Ao.call(this,n,t,e+"")},No.setProperty=function(n,t,e){To.call(this,n,t+"",e)}}mo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},mo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},mo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u=e);)e=void 0;for(;++ur&&(e=r)}else{for(;++u=e);)e=void 0;for(;++ur&&(e=r)}return e},mo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u=e);)e=void 0;for(;++ue&&(e=r)}else{for(;++u=e);)e=void 0;for(;++ue&&(e=r)}return e},mo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i=e);)e=u=void 0;for(;++ir&&(e=r),r>u&&(u=r))}else{for(;++i=e);)e=void 0;for(;++ir&&(e=r),r>u&&(u=r))}return[e,u]},mo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i1&&(t=t.map(e)),t=t.filter(n),t.length?mo.quantile(t.sort(mo.ascending),.5):void 0},mo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)r;){var i=r+u>>>1;er?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},mo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=mo.min(arguments,t),r=new Array(e);++nr)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var l,s,f,h,g=-1,p=a.length,d=o[c++],v=new u;++g=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(mo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},mo.set=function(n){var t=new i;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(i,{has:function(n){return zo+n in this},add:function(n){return this[zo+n]=!0,n},remove:function(n){return n=zo+n,n in this&&delete this[n]},values:function(){var n=[];return this.forEach(function(t){n.push(t)}),n},forEach:function(n){for(var t in this)t.charCodeAt(0)===Co&&n.call(this,t.substring(1))}}),mo.behavior={},mo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},mo.event=null,mo.requote=function(n){return n.replace(jo,"\\$&")};var jo=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,Lo={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},Ho=function(n,t){return t.querySelector(n)},Fo=function(n,t){return t.querySelectorAll(n)},Po=bo[a(bo,"matchesSelector")],Oo=function(n,t){return Po.call(n,t)};"function"==typeof Sizzle&&(Ho=function(n,t){return Sizzle(n,t)[0]||null},Fo=function(n,t){return Sizzle.uniqueSort(Sizzle(n,t))},Oo=Sizzle.matchesSelector),mo.selection=function(){return Uo};var Ro=mo.selection.prototype=[];Ro.select=function(n){var t,e,r,u,i=[];n=d(n);for(var o=-1,a=this.length;++o=0&&(e=n.substring(0,t),n=n.substring(t+1)),Yo.hasOwnProperty(e)?{space:Yo[e],local:n}:n}},Ro.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=mo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(m(t,n[t]));return this}return this.each(m(n,t))},Ro.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=n.trim().split(/^|\s+/g)).length,u=-1;if(t=e.classList){for(;++ur){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(_(e,n[e],t));return this}if(2>r)return _o.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(_(n,t,e))},Ro.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(w(t,n[t]));return this}return this.each(w(n,t))},Ro.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},Ro.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},Ro.append=function(n){return n=S(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},Ro.insert=function(n,t){return n=S(n),t=d(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments))})},Ro.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},Ro.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),d=new Array(a);if(t){var v,m=new u,y=new u,M=[];for(r=-1;++rr;++r)p[r]=E(e[r]);for(;a>r;++r)d[r]=n[r]}p.update=g,p.parentNode=g.parentNode=d.parentNode=n.parentNode,c.push(p),l.push(g),s.push(d)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++oi;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a)&&t.push(r)}return p(u)},Ro.order=function(){for(var n=-1,t=this.length;++n=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},Ro.sort=function(n){n=A.apply(this,arguments);for(var t=-1,e=this.length;++tn;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},Ro.size=function(){var n=0;return this.each(function(){++n}),n};var Io=[];mo.selection.enter=T,mo.selection.enter.prototype=Io,Io.append=Ro.append,Io.empty=Ro.empty,Io.node=Ro.node,Io.call=Ro.call,Io.size=Ro.size,Io.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++ar){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(C(n,t,e))};var Zo=mo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Zo.forEach(function(n){"on"+n in xo&&Zo.remove(n)});var Vo=a(bo.style,"userSelect"),Xo=0;mo.mouse=function(n){return H(n,h())};var $o=/WebKit/.test(_o.navigator.userAgent)?-1:0;mo.touches=function(n,t){return arguments.length<2&&(t=h().touches),t?Mo(t).map(function(t){var e=H(n,t);return e.identifier=t.identifier,e}):[]},mo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return mo.event.changedTouches[0].identifier}function e(n,t){return mo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(s,g),e=n[0]-d[0],r=n[1]-d[1];v|=e|r,d=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(v&&mo.event.target===h),f({type:"dragend"})}var c,l=this,s=l.parentNode,f=u.of(l,arguments),h=mo.event.target,g=n(),p=null==g?"drag":"drag-"+g,d=t(s,g),v=0,m=mo.select(_o).on(e+"."+p,o).on(r+"."+p,a),y=L();i?(c=i.apply(l,arguments),c=[c.x-d[0],c.y-d[1]]):c=[0,0],f({type:"dragstart"})}}var u=g(n,"drag","dragstart","dragend"),i=null,o=r(c,mo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},mo.rebind(n,u,"on")};var Bo=Math.PI,Wo=2*Bo,Jo=Bo/2,Go=1e-6,Ko=Go*Go,Qo=Bo/180,na=180/Bo,ta=Math.SQRT2,ea=2,ra=4;mo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=Y(d),o=i/(ea*h)*(e*I(ta*t+d)-R(d));return[r+o*l,u+o*s,i*e/Y(ta*t+d)]}return[r+n*l,u+n*s,i*Math.exp(ta*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],l=o-r,s=a-u,f=l*l+s*s,h=Math.sqrt(f),g=(c*c-i*i+ra*f)/(2*i*ea*h),p=(c*c-i*i-ra*f)/(2*c*ea*h),d=Math.log(Math.sqrt(g*g+1)-g),v=Math.log(Math.sqrt(p*p+1)-p),m=v-d,y=(m||Math.log(c/i))/ta;return e.duration=1e3*y,e},mo.behavior.zoom=function(){function n(n){n.on(A,l).on(oa+".zoom",h).on(N,p).on("dblclick.zoom",d).on(q,s)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(k[0],Math.min(k[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){b&&b.domain(x.range().map(function(n){return(n-S.x)/S.k}).map(x.invert)),w&&w.domain(_.range().map(function(n){return(n-S.y)/S.k}).map(_.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function l(){function n(){s=1,u(mo.mouse(r),h),a(i)}function e(){f.on(N,_o===r?p:null).on(T,null),g(s&&mo.event.target===l),c(i)}var r=this,i=C.of(r,arguments),l=mo.event.target,s=0,f=mo.select(_o).on(N,n).on(T,e),h=t(mo.mouse(r)),g=L();z.call(r),o(i)}function s(){function n(){var n=mo.touches(p);return g=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=mo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-M){var l=o[0],s=v[l.identifier];r(2*S.k),u(l,s),f(),a(d)}M=c}else if(o.length>1){var l=o[0],h=o[1],g=l[0]-h[0],p=l[1]-h[1];m=g*g+p*p}}function i(){for(var n,t,e,i,o=mo.touches(p),c=0,l=o.length;l>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var s=(s=e[0]-n[0])*s+(s=e[1]-n[1])*s,f=m&&Math.sqrt(s/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*g)}M=null,u(n,t),a(d)}function h(){if(mo.event.touches.length){for(var t=mo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}_.on(x,null).on(b,null),w.on(A,l).on(q,s),E(),c(d)}var g,p=this,d=C.of(p,arguments),v={},m=0,y=mo.event.changedTouches[0].identifier,x="touchmove.zoom-"+y,b="touchend.zoom-"+y,_=mo.select(_o).on(x,i).on(b,h),w=mo.select(p).on(A,null).on(q,e),E=L();z.call(p),e(),o(d)}function h(){var n=C.of(this,arguments);y?clearTimeout(y):(z.call(this),o(n)),y=setTimeout(function(){y=null,c(n)},50),f();var e=m||mo.mouse(this);v||(v=t(e)),r(Math.pow(2,.002*ua())*S.k),u(e,v),a(n)}function p(){v=null}function d(){var n=C.of(this,arguments),e=mo.mouse(this),i=t(e),l=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,mo.event.shiftKey?Math.ceil(l)-1:Math.floor(l)+1)),u(e,i),a(n),c(n)}var v,m,y,M,x,b,_,w,S={x:0,y:0,k:1},E=[960,500],k=ia,A="mousedown.zoom",N="mousemove.zoom",T="mouseup.zoom",q="touchstart.zoom",C=g(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=C.of(this,arguments),t=S;Oc?mo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=E[0],r=E[1],u=e/2,i=r/2,o=mo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(k=null==t?ia:[+t[0],+t[1]],n):k},n.center=function(t){return arguments.length?(m=t&&[+t[0],+t[1]],n):m},n.size=function(t){return arguments.length?(E=t&&[+t[0],+t[1]],n):E},n.x=function(t){return arguments.length?(b=t,x=t.copy(),S={x:0,y:0,k:1},n):b},n.y=function(t){return arguments.length?(w=t,_=t.copy(),S={x:0,y:0,k:1},n):w},mo.rebind(n,C,"on")};var ua,ia=[0,1/0],oa="onwheel"in xo?(ua=function(){return-mo.event.deltaY*(mo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in xo?(ua=function(){return mo.event.wheelDelta},"mousewheel"):(ua=function(){return-mo.event.detail},"MozMousePixelScroll");Z.prototype.toString=function(){return this.rgb()+""},mo.hsl=function(n,t,e){return 1===arguments.length?n instanceof X?V(n.h,n.s,n.l):lt(""+n,st,V):V(+n,+t,+e)};var aa=X.prototype=new Z;aa.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),V(this.h,this.s,this.l/n)},aa.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),V(this.h,this.s,n*this.l)},aa.rgb=function(){return $(this.h,this.s,this.l)},mo.hcl=function(n,t,e){return 1===arguments.length?n instanceof W?B(n.h,n.c,n.l):n instanceof K?nt(n.l,n.a,n.b):nt((n=ft((n=mo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):B(+n,+t,+e)};var ca=W.prototype=new Z;ca.brighter=function(n){return B(this.h,this.c,Math.min(100,this.l+la*(arguments.length?n:1)))},ca.darker=function(n){return B(this.h,this.c,Math.max(0,this.l-la*(arguments.length?n:1)))},ca.rgb=function(){return J(this.h,this.c,this.l).rgb()},mo.lab=function(n,t,e){return 1===arguments.length?n instanceof K?G(n.l,n.a,n.b):n instanceof W?J(n.l,n.c,n.h):ft((n=mo.rgb(n)).r,n.g,n.b):G(+n,+t,+e)};var la=18,sa=.95047,fa=1,ha=1.08883,ga=K.prototype=new Z;ga.brighter=function(n){return G(Math.min(100,this.l+la*(arguments.length?n:1)),this.a,this.b)},ga.darker=function(n){return G(Math.max(0,this.l-la*(arguments.length?n:1)),this.a,this.b)},ga.rgb=function(){return Q(this.l,this.a,this.b)},mo.rgb=function(n,t,e){return 1===arguments.length?n instanceof at?ot(n.r,n.g,n.b):lt(""+n,ot,$):ot(~~n,~~t,~~e)};var pa=at.prototype=new Z;pa.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),ot(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):ot(u,u,u)},pa.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),ot(~~(n*this.r),~~(n*this.g),~~(n*this.b))},pa.hsl=function(){return st(this.r,this.g,this.b)},pa.toString=function(){return"#"+ct(this.r)+ct(this.g)+ct(this.b)};var da=mo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});da.forEach(function(n,t){da.set(n,ut(t))}),mo.functor=pt,mo.xhr=vt(dt),mo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=mo.xhr(n,t,i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o.row(e)}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function o(t){return t.map(a).join(n)}function a(n){return c.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var c=new RegExp('["'+n+"\n]"),l=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(s>=c)return o;if(u)return u=!1,i;var t=s;if(34===n.charCodeAt(t)){for(var e=t;e++s;){var r=n.charCodeAt(s++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(s)&&(++s,++a);else if(r!==l)continue;return n.substring(t,s-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],c=n.length,s=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new i,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(a).join(n)].concat(t.map(function(t){return u.map(function(n){return a(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(o).join("\n")},e},mo.csv=mo.dsv(",","text/csv"),mo.tsv=mo.dsv(" ","text/tab-separated-values");var va,ma,ya,Ma,xa,ba=_o[a(_o,"requestAnimationFrame")]||function(n){setTimeout(n,17)};mo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={callback:n,time:u,next:null};ma?ma.next=i:va=i,ma=i,ya||(Ma=clearTimeout(Ma),ya=1,ba(Mt))},mo.timer.flush=function(){bt(),_t()};var _a=".",wa=",",Sa=[3,3],Ea="$",ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(wt);mo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=mo.round(n,St(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),ka[8+e/3]},mo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)},mo.format=function(n){var t=Aa.exec(n),e=t[1]||" ",r=t[2]||">",u=t[3]||"",i=t[4]||"",o=t[5],a=+t[6],c=t[7],l=t[8],s=t[9],f=1,h="",g=!1;switch(l&&(l=+l.substring(1)),(o||"0"===e&&"="===r)&&(o=e="0",r="=",c&&(a-=Math.floor((a-1)/4))),s){case"n":c=!0,s="g";break;case"%":f=100,h="%",s="f";break;case"p":f=100,h="%",s="r";break;case"b":case"o":case"x":case"X":"#"===i&&(i="0"+s.toLowerCase());case"c":case"d":g=!0,l=0;break;case"s":f=-1,s="r"}"#"===i?i="":"$"===i&&(i=Ea),"r"!=s||l||(s="g"),null!=l&&("g"==s?l=Math.max(1,Math.min(21,l)):("e"==s||"f"==s)&&(l=Math.max(0,Math.min(20,l)))),s=Na.get(s)||Et;var p=o&&c;return function(n){if(g&&n%1)return"";var t=0>n||0===n&&0>1/n?(n=-n,"-"):u;if(0>f){var d=mo.formatPrefix(n,l);n=d.scale(n),h=d.symbol}else n*=f;n=s(n,l);var v=n.lastIndexOf("."),m=0>v?n:n.substring(0,v),y=0>v?"":_a+n.substring(v+1);!o&&c&&(m=Ta(m));var M=i.length+m.length+y.length+(p?0:t.length),x=a>M?new Array(M=a-M+1).join(e):"";return p&&(m=Ta(x+m)),t+=i,n=m+y,("<"===r?t+n+x:">"===r?x+t+n:"^"===r?x.substring(0,M>>=1)+t+n+x.substring(M):t+(p?n:x+n))+h}};var Aa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,Na=mo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=mo.round(n,St(n,t))).toFixed(Math.max(0,Math.min(20,St(n*(1+1e-15),t))))}}),Ta=dt;if(Sa){var qa=Sa.length;Ta=function(n){for(var t=n.length,e=[],r=0,u=Sa[0];t>0&&u>0;)e.push(n.substring(t-=u,t+u)),u=Sa[r=(r+1)%qa];return e.reverse().join(wa)}}mo.geo={},kt.prototype={s:0,t:0,add:function(n){At(n,this.t,za),At(za.s,this.s,this),this.s?this.t+=za.t:this.s=za.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var za=new kt;mo.geo.stream=function(n,t){n&&Ca.hasOwnProperty(n.type)?Ca[n.type](n,t):Nt(n,t)};var Ca={Feature:function(n,t){Nt(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++rn?4*Bo+n:n,Ha.lineStart=Ha.lineEnd=Ha.point=c}};mo.geo.bounds=function(){function n(n,t){M.push(x=[s=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=Ct([t*Qo,e*Qo]);if(m){var u=jt(m,r),i=[u[1],-u[0],0],o=jt(i,u);Ft(o),o=Pt(o);var c=t-p,l=c>0?1:-1,d=o[0]*na*l,v=Math.abs(c)>180;if(v^(d>l*p&&l*t>d)){var y=o[1]*na;y>g&&(g=y)}else if(d=(d+360)%360-180,v^(d>l*p&&l*t>d)){var y=-o[1]*na;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);v?p>t?a(s,t)>a(s,h)&&(h=t):a(t,h)>a(s,h)&&(s=t):h>=s?(s>t&&(s=t),t>h&&(h=t)):t>p?a(s,t)>a(s,h)&&(h=t):a(t,h)>a(s,h)&&(s=t)}else n(t,e);m=r,p=t}function e(){b.point=t}function r(){x[0]=s,x[1]=h,b.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=Math.abs(r)>180?r+(r>0?360:-360):r}else d=n,v=e;Ha.point(n,e),t(n,e)}function i(){Ha.lineStart()}function o(){u(d,v),Ha.lineEnd(),Math.abs(y)>Go&&(s=-(h=180)),x[0]=s,x[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function l(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:nLa?(s=-(h=180),f=-(g=90)):y>Go?g=90:-Go>y&&(f=-90),x[0]=s,x[1]=h}};return function(n){g=h=-(s=f=1/0),M=[],mo.geo.stream(n,b);var t=M.length;if(t){M.sort(c);for(var e,r=1,u=M[0],i=[u];t>r;++r)e=M[r],l(e[0],u)||l(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,s=e[0],h=u[1])}return M=x=null,1/0===s||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[s,f],[h,g]]}}(),mo.geo.centroid=function(n){Fa=Pa=Oa=Ra=Ya=Ia=Ua=Za=Va=Xa=$a=0,mo.geo.stream(n,Ba);var t=Va,e=Xa,r=$a,u=t*t+e*e+r*r;return Ko>u&&(t=Ia,e=Ua,r=Za,Go>Pa&&(t=Oa,e=Ra,r=Ya),u=t*t+e*e+r*r,Ko>u)?[0/0,0/0]:[Math.atan2(e,t)*na,O(r/Math.sqrt(u))*na]};var Fa,Pa,Oa,Ra,Ya,Ia,Ua,Za,Va,Xa,$a,Ba={sphere:c,point:Rt,lineStart:It,lineEnd:Ut,polygonStart:function(){Ba.lineStart=Zt},polygonEnd:function(){Ba.lineStart=It}},Wa=Bt(Vt,Qt,te,[-Bo,-Bo/2]),Ja=1e9;mo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=re(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(mo.geo.conicEqualArea=function(){return oe(ae)}).raw=ae,mo.geo.albers=function(){return mo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},mo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=mo.geo.albers(),o=mo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=mo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var l=i.scale(),s=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[s-.455*l,f-.238*l],[s+.455*l,f+.238*l]]).stream(c).point,r=o.translate([s-.307*l,f+.201*l]).clipExtent([[s-.425*l+Go,f+.12*l+Go],[s-.214*l-Go,f+.234*l-Go]]).stream(c).point,u=a.translate([s-.205*l,f+.212*l]).clipExtent([[s-.214*l+Go,f+.166*l+Go],[s-.115*l-Go,f+.234*l-Go]]).stream(c).point,n},n.scale(1070)};var Ga,Ka,Qa,nc,tc,ec,rc={point:c,lineStart:c,lineEnd:c,polygonStart:function(){Ka=0,rc.lineStart=ce},polygonEnd:function(){rc.lineStart=rc.lineEnd=rc.point=c,Ga+=Math.abs(Ka/2)}},uc={point:le,lineStart:c,lineEnd:c,polygonStart:c,polygonEnd:c},ic={point:he,lineStart:ge,lineEnd:pe,polygonStart:function(){ic.lineStart=de},polygonEnd:function(){ic.point=he,ic.lineStart=ge,ic.lineEnd=pe}};mo.geo.transform=function(n){return{stream:function(t){var e=new ye(t);for(var r in n)e[r]=n[r];return e}}},ye.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},mo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),mo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Ga=0,mo.geo.stream(n,u(rc)),Ga},n.centroid=function(n){return Oa=Ra=Ya=Ia=Ua=Za=Va=Xa=$a=0,mo.geo.stream(n,u(ic)),$a?[Va/$a,Xa/$a]:Za?[Ia/Za,Ua/Za]:Ya?[Oa/Ya,Ra/Ya]:[0/0,0/0]},n.bounds=function(n){return tc=ec=-(Qa=nc=1/0),mo.geo.stream(n,u(uc)),[[Qa,nc],[tc,ec]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Me(n):dt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new se:new ve(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(mo.geo.albersUsa()).context(null)},mo.geo.projection=xe,mo.geo.projectionMutator=be,(mo.geo.equirectangular=function(){return xe(we)}).raw=we.invert=we,mo.geo.rotation=function(n){function t(t){return t=n(t[0]*Qo,t[1]*Qo),t[0]*=na,t[1]*=na,t}return n=Ee(n[0]%360*Qo,n[1]*Qo,n.length>2?n[2]*Qo:0),t.invert=function(t){return t=n.invert(t[0]*Qo,t[1]*Qo),t[0]*=na,t[1]*=na,t},t},Se.invert=we,mo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=Ee(-n[0]*Qo,-n[1]*Qo,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=na,n[1]*=na}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=Te((t=+r)*Qo,u*Qo),n):t},n.precision=function(r){return arguments.length?(e=Te(t*Qo,(u=+r)*Qo),n):u},n.angle(90)},mo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Qo,u=n[1]*Qo,i=t[1]*Qo,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),l=Math.cos(u),s=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=l*s-c*f*a)*e),c*s+l*f*a)},mo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return mo.range(Math.ceil(i/v)*v,u,v).map(h).concat(mo.range(Math.ceil(l/m)*m,c,m).map(g)).concat(mo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return Math.abs(n%v)>Go -}).map(s)).concat(mo.range(Math.ceil(a/d)*d,o,d).filter(function(n){return Math.abs(n%m)>Go}).map(f))}var e,r,u,i,o,a,c,l,s,f,h,g,p=10,d=p,v=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(l).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],l=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),l>c&&(t=l,l=c,c=t),n.precision(y)):[[i,l],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(v=+t[0],m=+t[1],n):[v,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],d=+t[1],n):[p,d]},n.precision=function(t){return arguments.length?(y=+t,s=ze(a,o,90),f=Ce(r,e,y),h=ze(l,c,90),g=Ce(i,u,y),n):y},n.majorExtent([[-180,-90+Go],[180,90-Go]]).minorExtent([[-180,-80-Go],[180,80+Go]])},mo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=De,u=je;return n.distance=function(){return mo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},mo.geo.interpolate=function(n,t){return Le(n[0]*Qo,n[1]*Qo,t[0]*Qo,t[1]*Qo)},mo.geo.length=function(n){return oc=0,mo.geo.stream(n,ac),oc};var oc,ac={sphere:c,point:c,lineStart:He,lineEnd:c,polygonStart:c,polygonEnd:c},cc=Fe(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(mo.geo.azimuthalEqualArea=function(){return xe(cc)}).raw=cc;var lc=Fe(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},dt);(mo.geo.azimuthalEquidistant=function(){return xe(lc)}).raw=lc,(mo.geo.conicConformal=function(){return oe(Pe)}).raw=Pe,(mo.geo.conicEquidistant=function(){return oe(Oe)}).raw=Oe;var sc=Fe(function(n){return 1/n},Math.atan);(mo.geo.gnomonic=function(){return xe(sc)}).raw=sc,Re.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Jo]},(mo.geo.mercator=function(){return Ye(Re)}).raw=Re;var fc=Fe(function(){return 1},Math.asin);(mo.geo.orthographic=function(){return xe(fc)}).raw=fc;var hc=Fe(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(mo.geo.stereographic=function(){return xe(hc)}).raw=hc,Ie.invert=function(n,t){return[Math.atan2(R(n),Math.cos(t)),O(Math.sin(t)/Y(n))]},(mo.geo.transverseMercator=function(){return Ye(Ie)}).raw=Ie,mo.geom={},mo.svg={},mo.svg.line=function(){return Ue(dt)};var gc=mo.map({linear:Xe,"linear-closed":$e,step:Be,"step-before":We,"step-after":Je,basis:er,"basis-open":rr,"basis-closed":ur,bundle:ir,cardinal:Qe,"cardinal-open":Ge,"cardinal-closed":Ke,monotone:fr});gc.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var pc=[0,2/3,1/3,0],dc=[0,1/3,2/3,0],vc=[0,1/6,2/3,1/6];mo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u,i,o,a,c,l,s,f,h,g,p,d=pt(e),v=pt(r),m=n.length,y=m-1,M=[],x=[],b=0;if(d===Ze&&r===Ve)t=n;else for(i=0,t=[];m>i;++i)t.push([+d.call(this,u=n[i],i),+v.call(this,u,i)]);for(i=1;m>i;++i)(t[i][1]i;++i)i!==b&&(c=t[i][1]-t[b][1],a=t[i][0]-t[b][0],M.push({angle:Math.atan2(c,a),index:i}));for(M.sort(function(n,t){return n.angle-t.angle}),g=M[0].angle,h=M[0].index,f=0,i=1;y>i;++i){if(o=M[i].index,g==M[i].angle){if(a=t[h][0]-t[b][0],c=t[h][1]-t[b][1],l=t[o][0]-t[b][0],s=t[o][1]-t[b][1],a*a+c*c>=l*l+s*s){M[i].index=-1;continue}M[f].index=-1}g=M[i].angle,f=i,h=o}for(x.push(b),i=0,o=0;2>i;++o)M[o].index>-1&&(x.push(M[o].index),i++);for(p=x.length;y>o;++o)if(!(M[o].index<0)){for(;!hr(x[p-2],x[p-1],M[o].index,t);)--p;x[p++]=M[o].index}var _=[];for(i=p-1;i>=0;--i)_.push(n[x[i]]);return _}var e=Ze,r=Ve;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},mo.geom.polygon=function(n){return Lo(n,mc),n};var mc=mo.geom.polygon.prototype=[];mc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++ta;a++)e.push([u,t[a],t[a+1]])}),e},mo.geom.voronoi=function(n){function t(n){var t,i,o,a=n.map(function(){return[]}),c=pt(e),l=pt(r),s=n.length,f=1e6;if(c===Ze&&l===Ve)t=n;else for(t=new Array(s),o=0;s>o;++o)t[o]=[+c.call(this,i=n[o],o),+l.call(this,i,o)];if(vr(t,function(n){var t,e,r,u,i,o;1===n.a&&n.b>=0?(t=n.ep.r,e=n.ep.l):(t=n.ep.l,e=n.ep.r),1===n.a?(i=t?t.y:-f,r=n.c-n.b*i,o=e?e.y:f,u=n.c-n.b*o):(r=t?t.x:-f,i=n.c-n.a*r,u=e?e.x:f,o=n.c-n.a*u);var c=[r,i],l=[u,o];a[n.region.l.index].push(c,l),a[n.region.r.index].push(c,l)}),a=a.map(function(n,e){var r=t[e][0],u=t[e][1],i=n.map(function(n){return Math.atan2(n[0]-r,n[1]-u)}),o=mo.range(n.length).sort(function(n,t){return i[n]-i[t]});return o.filter(function(n,t){return!t||i[n]-i[o[t-1]]>Go}).map(function(t){return n[t]})}),a.forEach(function(n,e){var r=n.length;if(!r)return n.push([-f,-f],[-f,f],[f,f],[f,-f]);if(!(r>2)){var u=t[e],i=n[0],o=n[1],a=u[0],c=u[1],l=i[0],s=i[1],h=o[0],g=o[1],p=Math.abs(h-l),d=g-s;if(Math.abs(d)c?-f:f;n.push([-f,v],[f,v])}else if(Go>p){var m=l>a?-f:f;n.push([m,-f],[m,f])}else{var v=(l-a)*(g-s)>(h-l)*(s-c)?f:-f,y=Math.abs(d)-p;Math.abs(y)d?v:-v,v]):(y>0&&(v*=-1),n.push([-f,v],[f,v]))}}}),u)for(o=0;s>o;++o)u.clip(a[o]);for(o=0;s>o;++o)a[o].point=n[o];return a}var e=Ze,r=Ve,u=null;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.clipExtent=function(n){if(!arguments.length)return u&&[u[0],u[2]];if(null==n)u=null;else{var e=+n[0][0],r=+n[0][1],i=+n[1][0],o=+n[1][1];u=mo.geom.polygon([[e,r],[e,o],[i,o],[i,r]])}return t},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):u&&u[2]},t.links=function(n){var t,u,i,o=n.map(function(){return[]}),a=[],c=pt(e),l=pt(r),s=n.length;if(c===Ze&&l===Ve)t=n;else for(t=new Array(s),i=0;s>i;++i)t[i]=[+c.call(this,u=n[i],i),+l.call(this,u,i)];return vr(t,function(t){var e=t.region.l.index,r=t.region.r.index;o[e][r]||(o[e][r]=o[r][e]=!0,a.push({source:n[e],target:n[r]}))}),a},t.triangles=function(n){if(e===Ze&&r===Ve)return mo.geom.delaunay(n);for(var t,u=new Array(c),i=pt(e),o=pt(r),a=-1,c=n.length;++a=l,h=r>=s,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=Mr()),f?u=l:a=l,h?o=s:c=s,i(n,t,e,r,u,o,a,c)}var s,f,h,g,p,d,v,m,y,M=pt(a),x=pt(c);if(null!=t)d=t,v=e,m=r,y=u;else if(m=y=-(d=v=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)s=n[g],s.xm&&(m=s.x),s.y>y&&(y=s.y),f.push(s.x),h.push(s.y);else for(g=0;p>g;++g){var b=+M(s=n[g],g),_=+x(s,g);d>b&&(d=b),v>_&&(v=_),b>m&&(m=b),_>y&&(y=_),f.push(b),h.push(_)}var w=m-d,S=y-v;w>S?y=v+w:m=d+S;var E=Mr();if(E.add=function(n){i(E,n,+M(n,++g),+x(n,g),d,v,m,y)},E.visit=function(n){xr(n,E,d,v,m,y)},g=-1,null==t){for(;++g=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=bc.get(e)||xc,r=_c.get(r)||dt,Ar(r(e.apply(null,Array.prototype.slice.call(arguments,1))))},mo.interpolateHcl=Rr,mo.interpolateHsl=Yr,mo.interpolateLab=Ir,mo.interpolateRound=Ur,mo.transform=function(n){var t=xo.createElementNS(mo.ns.prefix.svg,"g");return(mo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Zr(e?e.matrix:wc)})(n)},Zr.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var wc={a:1,b:0,c:0,d:1,e:0,f:0};mo.interpolateTransform=Br,mo.layout={},mo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e(u-e)*a){var c=t.charge*a*a;return n.px-=i*c,n.py-=o*c,!0}if(t.point&&isFinite(a)){var c=t.pointCharge*a*a;n.px-=i*c,n.py-=o*c}}return!t.charge}}function t(n){n.px=mo.event.x,n.py=mo.event.y,a.resume()}var e,r,u,i,o,a={},c=mo.dispatch("start","tick","end"),l=[1,1],s=.9,f=Sc,h=Ec,g=-30,p=.1,d=.8,v=[],m=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,d,y,M,x,b=v.length,_=m.length;for(e=0;_>e;++e)a=m[e],f=a.source,h=a.target,M=h.x-f.x,x=h.y-f.y,(d=M*M+x*x)&&(d=r*i[e]*((d=Math.sqrt(d))-u[e])/d,M*=d,x*=d,h.x-=M*(y=f.weight/(h.weight+f.weight)),h.y-=x*y,f.x+=M*(y=1-y),f.y+=x*y);if((y=r*p)&&(M=l[0]/2,x=l[1]/2,e=-1,y))for(;++e0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),mo.timer(a.tick)),a):r},a.start=function(){function n(n,r){for(var u,i=t(e),o=-1,a=i.length;++or;++r)c[r]=[];for(r=0;d>r;++r){var n=m[r];c[n.source.index].push(n.target),c[n.target.index].push(n.source)}}return c[e]}var e,r,c,s,p=v.length,d=m.length,y=l[0],M=l[1];for(e=0;p>e;++e)(s=v[e]).index=e,s.weight=0;for(e=0;d>e;++e)s=m[e],"number"==typeof s.source&&(s.source=v[s.source]),"number"==typeof s.target&&(s.target=v[s.target]),++s.source.weight,++s.target.weight;for(e=0;p>e;++e)s=v[e],isNaN(s.x)&&(s.x=n("x",y)),isNaN(s.y)&&(s.y=n("y",M)),isNaN(s.px)&&(s.px=s.x),isNaN(s.py)&&(s.py=s.y);if(u=[],"function"==typeof f)for(e=0;d>e;++e)u[e]=+f.call(this,m[e],e);else for(e=0;d>e;++e)u[e]=f;if(i=[],"function"==typeof h)for(e=0;d>e;++e)i[e]=+h.call(this,m[e],e);else for(e=0;d>e;++e)i[e]=h;if(o=[],"function"==typeof g)for(e=0;p>e;++e)o[e]=+g.call(this,v[e],e);else for(e=0;p>e;++e)o[e]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=mo.behavior.drag().origin(dt).on("dragstart.force",nu).on("drag.force",t).on("dragend.force",tu)),arguments.length?(this.on("mouseover.force",eu).on("mouseout.force",ru).call(e),void 0):e},mo.rebind(a,c,"on")};var Sc=20,Ec=1;mo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(l=c.length)){for(var l,s,f=-1,h=t.children=[],g=0,p=o+1;++fg;++g)for(u.call(n,l[0][g],p=d[g],s[0][g][1]),h=1;v>h;++h)u.call(n,l[h][g],p+=s[h-1][g][1],s[h][g][1]);return a}var t=dt,e=gu,r=pu,u=hu,i=su,o=fu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:Ac.get(t)||gu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:Nc.get(t)||pu,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var Ac=mo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(du),i=n.map(vu),o=mo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,l=[],s=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],l.push(e)):(c+=i[e],s.push(e));return s.reverse().concat(l)},reverse:function(n){return mo.range(n.length).reverse()},"default":gu}),Nc=mo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,l,s=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=l=0,e=1;h>e;++e){for(t=0,u=0;s>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];s>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,l>c&&(l=c)}for(e=0;h>e;++e)g[e]-=l;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:pu});mo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],l=n.map(e,this),s=r.call(this,l,i),f=u.call(this,s,l,i),i=-1,h=l.length,g=f.length-1,p=t?1:1/h;++i0)for(i=-1;++i=s[0]&&a<=s[1]&&(o=c[mo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=xu,u=yu;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=pt(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return Mu(n,t)}:pt(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},mo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,l,s=r[0],f=s,h=-1;++h0&&(qu(zu(a,n,r),n,u),l+=u,s+=u),f+=a._tree.mod,l+=i._tree.mod,h+=c._tree.mod,s+=o._tree.mod;a&&!wu(o)&&(o._tree.thread=a,o._tree.mod+=f-s),i&&!_u(c)&&(c._tree.thread=i,c._tree.mod+=l-h,r=n)}return r}var l=t.call(this,n,i),s=l[0];Nu(s,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(s),a(s,-s._tree.prelim);var f=Su(s,ku),h=Su(s,Eu),g=Su(s,Au),p=f.x-e(f,h)/2,d=h.x+e(h,f)/2,v=g.depth||1;return Nu(s,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(d-p)*r[0],n.y=n.depth/v*r[1],delete n._tree}),l}var t=mo.layout.hierarchy().sort(null).value(null),e=bu,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},iu(n,t)},mo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],l=u[1],s=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,Nu(a,function(n){n.r=+s(n.value)}),Nu(a,Hu),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/l))/2;Nu(a,function(n){n.r+=f}),Nu(a,Hu),Nu(a,function(n){n.r-=f})}return Ou(a,c/2,l/2,t?1:1/Math.max(2*a.r/c,2*a.r/l)),o}var t,e=mo.layout.hierarchy().sort(Cu),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},iu(n,e)},mo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],l=0;Nu(c,function(n){var t=n.children;t&&t.length?(n.x=Iu(t),n.y=Yu(t)):(n.x=o?l+=e(n,o):0,n.y=0,o=n)});var s=Uu(c),f=Zu(c),h=s.x-e(s,f)/2,g=f.x+e(f,s)/2;return Nu(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=mo.layout.hierarchy().sort(null).value(null),e=bu,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},iu(n,t)},mo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++ut?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,l=f(e),s=[],h=i.slice(),p=1/0,d="slice"===g?l.dx:"dice"===g?l.dy:"slice-dice"===g?1&e.depth?l.dy:l.dx:Math.min(l.dx,l.dy);for(n(h,l.dx*l.dy/e.value),s.area=0;(c=h.length)>0;)s.push(o=h[c-1]),s.area+=o.area,"squarify"!==g||(a=r(s,d))<=p?(h.pop(),p=a):(s.area-=s.pop().area,u(s,d,l,!1),d=Math.min(l.dx,l.dy),s.length=s.area=0,p=1/0);s.length&&(u(s,d,l,!0),s.length=s.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++oe&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,l=e.y,s=t?c(n.area/t):0;if(t==e.dx){for((r||s>e.dy)&&(s=e.dy);++ie.dx)&&(s=e.dx);++ie&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=mo.random.normal.apply(mo,arguments);return function(){return Math.exp(n())}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t/n}}},mo.scale={};var Tc={floor:dt,ceil:dt};mo.scale.linear=function(){return Qu([0,1],[0,1],Er,!1)},mo.scale.log=function(){return ii(mo.scale.linear().domain([0,1]),10,!0,[1,10])};var qc=mo.format(".0e"),zc={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};mo.scale.pow=function(){return oi(mo.scale.linear(),1,[0,1])},mo.scale.sqrt=function(){return mo.scale.pow().exponent(.5)},mo.scale.ordinal=function(){return ci([],{t:"range",a:[[]]})},mo.scale.category10=function(){return mo.scale.ordinal().range(Cc)},mo.scale.category20=function(){return mo.scale.ordinal().range(Dc)},mo.scale.category20b=function(){return mo.scale.ordinal().range(jc)},mo.scale.category20c=function(){return mo.scale.ordinal().range(Lc)};var Cc=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(it),Dc=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(it),jc=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(it),Lc=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(it);mo.scale.quantile=function(){return li([],[])},mo.scale.quantize=function(){return si(0,1,[0,1])},mo.scale.threshold=function(){return fi([.5],[0,1])},mo.scale.identity=function(){return hi([0,1])},mo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+Hc,a=u.apply(this,arguments)+Hc,c=(o>a&&(c=o,o=a,a=c),a-o),l=Bo>c?"0":"1",s=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=Fc?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*s+","+i*f+"A"+i+","+i+" 0 "+l+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+l+",0 "+n*s+","+n*f+"Z":"M"+i*s+","+i*f+"A"+i+","+i+" 0 "+l+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=gi,e=pi,r=di,u=vi;return n.innerRadius=function(e){return arguments.length?(t=pt(e),n):t},n.outerRadius=function(t){return arguments.length?(e=pt(t),n):e},n.startAngle=function(t){return arguments.length?(r=pt(t),n):r},n.endAngle=function(t){return arguments.length?(u=pt(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+Hc;return[Math.cos(i)*n,Math.sin(i)*n]},n};var Hc=-Jo,Fc=Wo-Go;mo.svg.line.radial=function(){var n=Ue(mi);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},We.reverse=Je,Je.reverse=We,mo.svg.area=function(){return yi(dt)},mo.svg.area.radial=function(){var n=yi(mi);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},mo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),l=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,l)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,l.r,l.p0)+r(l.r,l.p1,l.a1-l.a0)+u(l.r,l.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+Hc,s=l.call(n,u,r)+Hc;return{r:i,a0:o,a1:s,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(s),i*Math.sin(s)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Bo)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=De,o=je,a=Mi,c=di,l=vi;return n.radius=function(t){return arguments.length?(a=pt(t),n):a},n.source=function(t){return arguments.length?(i=pt(t),n):i},n.target=function(t){return arguments.length?(o=pt(t),n):o},n.startAngle=function(t){return arguments.length?(c=pt(t),n):c},n.endAngle=function(t){return arguments.length?(l=pt(t),n):l},n},mo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=De,e=je,r=xi;return n.source=function(e){return arguments.length?(t=pt(e),n):t},n.target=function(t){return arguments.length?(e=pt(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},mo.svg.diagonal.radial=function(){var n=mo.svg.diagonal(),t=xi,e=n.projection;return n.projection=function(n){return arguments.length?e(bi(t=n)):t},n},mo.svg.symbol=function(){function n(n,r){return(Pc.get(t.call(this,n,r))||Si)(e.call(this,n,r))}var t=wi,e=_i;return n.type=function(e){return arguments.length?(t=pt(e),n):t},n.size=function(t){return arguments.length?(e=pt(t),n):e},n};var Pc=mo.map({circle:Si,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Ic)),e=t*Ic;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/Yc),e=t*Yc/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/Yc),e=t*Yc/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});mo.svg.symbolTypes=Pc.keys();var Oc,Rc,Yc=Math.sqrt(3),Ic=Math.tan(30*Qo),Uc=[],Zc=0;Uc.call=Ro.call,Uc.empty=Ro.empty,Uc.node=Ro.node,Uc.size=Ro.size,mo.transition=function(n){return arguments.length?Oc?n.transition():n:Uo.transition()},mo.transition.prototype=Uc,Uc.select=function(n){var t,e,r,u=this.id,i=[];n=d(n);for(var o=-1,a=this.length;++oi;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a)&&t.push(r)}return Ei(u,this.id)},Uc.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):N(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Uc.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n)) -})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Br:Er,a=mo.ns.qualify(n);return ki(this,"attr."+n,t,a.local?i:u)},Uc.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=mo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Uc.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=_o.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=Er(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return ki(this,"style."+n,t,u)},Uc.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,_o.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Uc.text=function(n){return ki(this,"text",n,Ai)},Uc.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Uc.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=mo.ease.apply(mo,arguments)),N(this,function(e){e.__transition__[t].ease=n}))},Uc.delay=function(n){var t=this.id;return N(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Uc.duration=function(n){var t=this.id;return N(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Uc.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Rc,u=Oc;Oc=e,N(this,function(t,r,u){Rc=t.__transition__[e],n.call(t,t.__data__,r,u)}),Rc=r,Oc=u}else N(this,function(r){var u=r.__transition__[e];(u.event||(u.event=mo.dispatch("start","end"))).on(n,t)});return this},Uc.transition=function(){for(var n,t,e,r,u=this.id,i=++Zc,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],l=0,s=t.length;s>l;l++)(e=t[l])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,Ni(e,l,i,r)),n.push(e)}return Ei(o,i)},mo.svg.axis=function(){function n(n){n.each(function(){var n,l=mo.select(this),s=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):dt:t,p=l.selectAll(".tick").data(h,f),d=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Go),v=mo.transition(p.exit()).style("opacity",Go).remove(),m=mo.transition(p).style("opacity",1),y=Bu(f),M=l.selectAll(".domain").data([0]),x=(M.enter().append("path").attr("class","domain"),mo.transition(M));d.append("line"),d.append("text");var b=d.select("line"),_=m.select("line"),w=p.select("text").text(g),S=d.select("text"),E=m.select("text");switch(r){case"bottom":n=Ti,b.attr("y2",u),S.attr("y",Math.max(u,0)+o),_.attr("x2",0).attr("y2",u),E.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),x.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ti,b.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),_.attr("x2",0).attr("y2",-u),E.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),x.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=qi,b.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),_.attr("x2",-u).attr("y2",0),E.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),x.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=qi,b.attr("x2",u),S.attr("x",Math.max(u,0)+o),_.attr("x2",u).attr("y2",0),E.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),x.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var k=f.rangeBand()/2,A=function(n){return f(n)+k};d.call(n,A),m.call(n,A)}else d.call(n,s),m.call(n,f),v.call(n,f)})}var t,e=mo.scale.linear(),r=Vc,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in Xc?t+"":Vc,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Vc="bottom",Xc={top:1,right:1,bottom:1,left:1};mo.svg.brush=function(){function n(i){i.each(function(){var i=mo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(v,dt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return $c[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var s,f=mo.transition(i),h=mo.transition(o);c&&(s=Bu(c),h.attr("x",s[0]).attr("width",s[1]-s[0]),e(f)),l&&(s=Bu(l),h.attr("y",s[0]).attr("height",s[1]-s[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+s[+/e$/.test(n)]+","+h[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",s[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",s[1]-s[0])}function r(n){n.select(".extent").attr("y",h[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",h[1]-h[0])}function u(){function u(){32==mo.event.keyCode&&(N||(M=null,q[0]-=s[1],q[1]-=h[1],N=2),f())}function g(){32==mo.event.keyCode&&2==N&&(q[0]+=s[1],q[1]+=h[1],N=0,f())}function v(){var n=mo.mouse(b),u=!1;x&&(n[0]+=x[0],n[1]+=x[1]),N||(mo.event.altKey?(M||(M=[(s[0]+s[1])/2,(h[0]+h[1])/2]),q[0]=s[+(n[0]f?(u=r,r=f):u=f),g[0]!=r||g[1]!=u?(e?o=null:i=null,g[0]=r,g[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),mo.select("body").style("cursor",null),z.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),T(),w({type:"brushend"})}var M,x,b=this,_=mo.select(mo.event.target),w=a.of(b,arguments),S=mo.select(b),E=_.datum(),k=!/^(n|s)$/.test(E)&&c,A=!/^(e|w)$/.test(E)&&l,N=_.classed("extent"),T=L(),q=mo.mouse(b),z=mo.select(_o).on("keydown.brush",u).on("keyup.brush",g);if(mo.event.changedTouches?z.on("touchmove.brush",v).on("touchend.brush",y):z.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),N)q[0]=s[0]-q[0],q[1]=h[0]-q[1];else if(E){var C=+/w$/.test(E),D=+/^n/.test(E);x=[s[1-C]-q[0],h[1-D]-q[1]],q[0]=s[C],q[1]=h[D]}else mo.event.altKey&&(M=q.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),mo.select("body").style("cursor",_.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=g(n,"brushstart","brush","brushend"),c=null,l=null,s=[0,0],h=[0,0],p=!0,d=!0,v=Bc[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:s,y:h,i:i,j:o},e=this.__chart__||t;this.__chart__=t,Oc?mo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,s=e.x,h=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=kr(s,t.x),r=kr(h,t.y);return i=o=null,function(u){s=t.x=e(u),h=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,v=Bc[!c<<1|!l],n):c},n.y=function(t){return arguments.length?(l=t,v=Bc[!c<<1|!l],n):l},n.clamp=function(t){return arguments.length?(c&&l?(p=!!t[0],d=!!t[1]):c?p=!!t:l&&(d=!!t),n):c&&l?[p,d]:c?p:l?d:null},n.extent=function(t){var e,r,u,a,f;return arguments.length?(c&&(e=t[0],r=t[1],l&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(f=e,e=r,r=f),(e!=s[0]||r!=s[1])&&(s=[e,r])),l&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],l.invert&&(u=l(u),a=l(a)),u>a&&(f=u,u=a,a=f),(u!=h[0]||a!=h[1])&&(h=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=s[0],r=s[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(f=e,e=r,r=f))),l&&(o?(u=o[0],a=o[1]):(u=h[0],a=h[1],l.invert&&(u=l.invert(u),a=l.invert(a)),u>a&&(f=u,u=a,a=f))),c&&l?[[e,u],[r,a]]:c?[e,r]:l&&[u,a])},n.clear=function(){return n.empty()||(s=[0,0],h=[0,0],i=o=null),n},n.empty=function(){return!!c&&s[0]==s[1]||!!l&&h[0]==h[1]},mo.rebind(n,a,"on")};var $c={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Bc=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Wc=mo.time={},Jc=Date,Gc=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"];zi.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){Kc.setUTCDate.apply(this._,arguments)},setDay:function(){Kc.setUTCDay.apply(this._,arguments)},setFullYear:function(){Kc.setUTCFullYear.apply(this._,arguments)},setHours:function(){Kc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){Kc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){Kc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){Kc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){Kc.setUTCSeconds.apply(this._,arguments)},setTime:function(){Kc.setTime.apply(this._,arguments)}};var Kc=Date.prototype,Qc="%a %b %e %X %Y",nl="%m/%d/%Y",tl="%H:%M:%S",el=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],rl=["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],ul=["January","February","March","April","May","June","July","August","September","October","November","December"],il=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"];Wc.year=Ci(function(n){return n=Wc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),Wc.years=Wc.year.range,Wc.years.utc=Wc.year.utc.range,Wc.day=Ci(function(n){var t=new Jc(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),Wc.days=Wc.day.range,Wc.days.utc=Wc.day.utc.range,Wc.dayOfYear=function(n){var t=Wc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},Gc.forEach(function(n,t){n=n.toLowerCase(),t=7-t;var e=Wc[n]=Ci(function(n){return(n=Wc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=Wc.year(n).getDay();return Math.floor((Wc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});Wc[n+"s"]=e.range,Wc[n+"s"].utc=e.utc.range,Wc[n+"OfYear"]=function(n){var e=Wc.year(n).getDay();return Math.floor((Wc.dayOfYear(n)+(e+t)%7)/7)}}),Wc.week=Wc.sunday,Wc.weeks=Wc.sunday.range,Wc.weeks.utc=Wc.sunday.utc.range,Wc.weekOfYear=Wc.sundayOfYear,Wc.format=ji;var ol=Hi(el),al=Fi(el),cl=Hi(rl),ll=Fi(rl),sl=Hi(ul),fl=Fi(ul),hl=Hi(il),gl=Fi(il),pl=/^%/,dl={"-":"",_:" ",0:"0"},vl={a:function(n){return rl[n.getDay()]},A:function(n){return el[n.getDay()]},b:function(n){return il[n.getMonth()]},B:function(n){return ul[n.getMonth()]},c:ji(Qc),d:function(n,t){return Pi(n.getDate(),t,2)},e:function(n,t){return Pi(n.getDate(),t,2)},H:function(n,t){return Pi(n.getHours(),t,2)},I:function(n,t){return Pi(n.getHours()%12||12,t,2)},j:function(n,t){return Pi(1+Wc.dayOfYear(n),t,3)},L:function(n,t){return Pi(n.getMilliseconds(),t,3)},m:function(n,t){return Pi(n.getMonth()+1,t,2)},M:function(n,t){return Pi(n.getMinutes(),t,2)},p:function(n){return n.getHours()>=12?"PM":"AM"},S:function(n,t){return Pi(n.getSeconds(),t,2)},U:function(n,t){return Pi(Wc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Pi(Wc.mondayOfYear(n),t,2)},x:ji(nl),X:ji(tl),y:function(n,t){return Pi(n.getFullYear()%100,t,2)},Y:function(n,t){return Pi(n.getFullYear()%1e4,t,4)},Z:ao,"%":function(){return"%"}},ml={a:Oi,A:Ri,b:Zi,B:Vi,c:Xi,d:no,e:no,H:eo,I:eo,j:to,L:io,m:Qi,M:ro,p:oo,S:uo,U:Ii,w:Yi,W:Ui,x:$i,X:Bi,y:Ji,Y:Wi,Z:Gi,"%":co},yl=/^\s*\d+/,Ml=mo.map({am:0,pm:1});ji.utc=lo;var xl=lo("%Y-%m-%dT%H:%M:%S.%LZ");ji.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?so:xl,so.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},so.toString=xl.toString,Wc.second=Ci(function(n){return new Jc(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),Wc.seconds=Wc.second.range,Wc.seconds.utc=Wc.second.utc.range,Wc.minute=Ci(function(n){return new Jc(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),Wc.minutes=Wc.minute.range,Wc.minutes.utc=Wc.minute.utc.range,Wc.hour=Ci(function(n){var t=n.getTimezoneOffset()/60;return new Jc(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),Wc.hours=Wc.hour.range,Wc.hours.utc=Wc.hour.utc.range,Wc.month=Ci(function(n){return n=Wc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),Wc.months=Wc.month.range,Wc.months.utc=Wc.month.utc.range;var bl=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],_l=[[Wc.second,1],[Wc.second,5],[Wc.second,15],[Wc.second,30],[Wc.minute,1],[Wc.minute,5],[Wc.minute,15],[Wc.minute,30],[Wc.hour,1],[Wc.hour,3],[Wc.hour,6],[Wc.hour,12],[Wc.day,1],[Wc.day,2],[Wc.week,1],[Wc.month,1],[Wc.month,3],[Wc.year,1]],wl=[[ji("%Y"),Vt],[ji("%B"),function(n){return n.getMonth()}],[ji("%b %d"),function(n){return 1!=n.getDate()}],[ji("%a %d"),function(n){return n.getDay()&&1!=n.getDate()}],[ji("%I %p"),function(n){return n.getHours()}],[ji("%I:%M"),function(n){return n.getMinutes()}],[ji(":%S"),function(n){return n.getSeconds()}],[ji(".%L"),function(n){return n.getMilliseconds()}]],Sl=go(wl);_l.year=Wc.year,Wc.scale=function(){return fo(mo.scale.linear(),_l,Sl)};var El={range:function(n,t,e){return mo.range(+n,+t,e).map(ho)}},kl=_l.map(function(n){return[n[0].utc,n[1]]}),Al=[[lo("%Y"),Vt],[lo("%B"),function(n){return n.getUTCMonth()}],[lo("%b %d"),function(n){return 1!=n.getUTCDate()}],[lo("%a %d"),function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],[lo("%I %p"),function(n){return n.getUTCHours()}],[lo("%I:%M"),function(n){return n.getUTCMinutes()}],[lo(":%S"),function(n){return n.getUTCSeconds()}],[lo(".%L"),function(n){return n.getUTCMilliseconds()}]],Nl=go(Al);return kl.year=Wc.year.utc,Wc.scale.utc=function(){return fo(mo.scale.linear(),kl,Nl)},mo.text=vt(function(n){return n.responseText}),mo.json=function(n,t){return mt(n,"application/json",po,t)},mo.html=function(n,t){return mt(n,"text/html",vo,t)},mo.xml=vt(function(n){return n.responseXML}),mo}(); \ No newline at end of file diff --git a/web/web.go b/web/web.go index 8e84acd039..cae6f5b660 100644 --- a/web/web.go +++ b/web/web.go @@ -378,13 +378,6 @@ func New(logger log.Logger, o *Options) *Handler { http.Redirect(w, r, path.Join(o.ExternalURL.Path, homePage), http.StatusFound) }) - // The console library examples at 'console_libraries/prom.lib' still depend on old asset files being served under `classic`. - router.Get("/classic/static/*filepath", func(w http.ResponseWriter, r *http.Request) { - r.URL.Path = path.Join("/static", route.Param(r.Context(), "filepath")) - fs := server.StaticFileServer(ui.Assets) - fs.ServeHTTP(w, r) - }) - router.Get("/version", h.version) router.Get("/metrics", promhttp.Handler().ServeHTTP) From 6c08f8f5597c781215e1c561c941a32b5100e7d0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Sep 2024 12:06:35 +0000 Subject: [PATCH 0390/1397] Bump github.com/ionos-cloud/sdk-go/v6 from 6.2.0 to 6.2.1 Bumps [github.com/ionos-cloud/sdk-go/v6](https://github.com/ionos-cloud/sdk-go) from 6.2.0 to 6.2.1. - [Release notes](https://github.com/ionos-cloud/sdk-go/releases) - [Changelog](https://github.com/ionos-cloud/sdk-go/blob/master/docs/CHANGELOG.md) - [Commits](https://github.com/ionos-cloud/sdk-go/compare/v6.2.0...v6.2.1) --- updated-dependencies: - dependency-name: github.com/ionos-cloud/sdk-go/v6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ecb6efefdb..79313967fc 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/hashicorp/consul/api v1.29.4 github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 github.com/hetznercloud/hcloud-go/v2 v2.13.1 - github.com/ionos-cloud/sdk-go/v6 v6.2.0 + github.com/ionos-cloud/sdk-go/v6 v6.2.1 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.9 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b diff --git a/go.sum b/go.sum index e898588f1b..f1659d3cd0 100644 --- a/go.sum +++ b/go.sum @@ -423,8 +423,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.2.0 h1:qX7gachC0wJSmFfVRnd+DHmz9AStvVraKcwQ/JokIB4= -github.com/ionos-cloud/sdk-go/v6 v6.2.0/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY= +github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= From e8ecc118167f8dc69d909e477a51f185a17fb587 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 2 Sep 2024 16:39:02 +0100 Subject: [PATCH 0391/1397] [TESTS] Fix up item numbers in TestProtobufParse If an error is thrown by the test, these numbers help you see which item is wrong. Signed-off-by: Bryan Boreham --- model/textparse/protobufparse_test.go | 50 +++++++++++++-------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index e323a6cc8f..534addfa9f 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -1995,15 +1995,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "without_quantiles_sum", ), }, - { // 78 + { // 81 m: "empty_histogram", help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", }, - { // 79 + { // 82 m: "empty_histogram", typ: model.MetricTypeHistogram, }, - { // 80 + { // 83 m: "empty_histogram", shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, @@ -2014,15 +2014,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "empty_histogram", ), }, - { // 81 + { // 84 m: "test_counter_with_createdtimestamp", help: "A counter with a created timestamp.", }, - { // 82 + { // 85 m: "test_counter_with_createdtimestamp", typ: model.MetricTypeCounter, }, - { // 83 + { // 86 m: "test_counter_with_createdtimestamp", v: 42, ct: 1000, @@ -2030,15 +2030,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_counter_with_createdtimestamp", ), }, - { // 84 + { // 87 m: "test_summary_with_createdtimestamp", help: "A summary with a created timestamp.", }, - { // 85 + { // 88 m: "test_summary_with_createdtimestamp", typ: model.MetricTypeSummary, }, - { // 86 + { // 89 m: "test_summary_with_createdtimestamp_count", v: 42, ct: 1000, @@ -2046,7 +2046,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_summary_with_createdtimestamp_count", ), }, - { // 87 + { // 90 m: "test_summary_with_createdtimestamp_sum", v: 1.234, ct: 1000, @@ -2054,15 +2054,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_summary_with_createdtimestamp_sum", ), }, - { // 88 + { // 91 m: "test_histogram_with_createdtimestamp", help: "A histogram with a created timestamp.", }, - { // 89 + { // 92 m: "test_histogram_with_createdtimestamp", typ: model.MetricTypeHistogram, }, - { // 90 + { // 93 m: "test_histogram_with_createdtimestamp", ct: 1000, shs: &histogram.Histogram{ @@ -2074,15 +2074,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram_with_createdtimestamp", ), }, - { // 91 + { // 94 m: "test_gaugehistogram_with_createdtimestamp", help: "A gauge histogram with a created timestamp.", }, - { // 92 + { // 95 m: "test_gaugehistogram_with_createdtimestamp", typ: model.MetricTypeGaugeHistogram, }, - { // 93 + { // 96 m: "test_gaugehistogram_with_createdtimestamp", ct: 1000, shs: &histogram.Histogram{ @@ -2094,15 +2094,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_gaugehistogram_with_createdtimestamp", ), }, - { // 94 + { // 97 m: "test_histogram_with_native_histogram_exemplars", help: "A histogram with native histogram exemplars.", }, - { // 95 + { // 98 m: "test_histogram_with_native_histogram_exemplars", typ: model.MetricTypeHistogram, }, - { // 96 + { // 99 m: "test_histogram_with_native_histogram_exemplars", t: 1234568, shs: &histogram.Histogram{ @@ -2130,7 +2130,7 @@ func TestProtobufParse(t *testing.T) { {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, }, }, - { // 97 + { // 100 m: "test_histogram_with_native_histogram_exemplars_count", t: 1234568, v: 175, @@ -2138,7 +2138,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram_with_native_histogram_exemplars_count", ), }, - { // 98 + { // 101 m: "test_histogram_with_native_histogram_exemplars_sum", t: 1234568, v: 0.0008280461746287094, @@ -2146,7 +2146,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram_with_native_histogram_exemplars_sum", ), }, - { // 99 + { // 102 m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0004899999999999998", t: 1234568, v: 2, @@ -2155,7 +2155,7 @@ func TestProtobufParse(t *testing.T) { "le", "-0.0004899999999999998", ), }, - { // 100 + { // 103 m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0003899999999999998", t: 1234568, v: 4, @@ -2167,7 +2167,7 @@ func TestProtobufParse(t *testing.T) { {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 101 + { // 104 m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0002899999999999998", t: 1234568, v: 16, @@ -2179,7 +2179,7 @@ func TestProtobufParse(t *testing.T) { {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 102 + { // 105 m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff+Inf", t: 1234568, v: 175, From 3aaf2c3c9d82c24a0ab8cacd5c68a67d995c312b Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 2 Sep 2024 16:39:45 +0100 Subject: [PATCH 0392/1397] [TESTS] Add second native histogram with exemplars in TestProtobufParse This test fails. Signed-off-by: Bryan Boreham --- model/textparse/protobufparse_test.go | 186 ++++++++++++++++++++++++++ 1 file changed, 186 insertions(+) diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index 534addfa9f..cf34ae52df 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -695,6 +695,70 @@ metric: < timestamp_ms: 1234568 > +`, + + `name: "test_histogram_with_native_histogram_exemplars2" +help: "Another histogram with native histogram exemplars." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + exemplars: < + label: < + name: "dummyID" + value: "59780" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + timestamp_ms: 1234568 +> + `, } @@ -1276,6 +1340,41 @@ func TestProtobufParse(t *testing.T) { {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, }, }, + { + m: "test_histogram_with_native_histogram_exemplars2", + help: "Another histogram with native histogram exemplars.", + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, }, }, { @@ -2188,6 +2287,93 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, + { // 106 + m: "test_histogram_with_native_histogram_exemplars2", + help: "Another histogram with native histogram exemplars.", + }, + { // 107 + m: "test_histogram_with_native_histogram_exemplars2", + typ: model.MetricTypeHistogram, + }, + { // 108 + m: "test_histogram_with_native_histogram_exemplars2", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 109 + m: "test_histogram_with_native_histogram_exemplars2_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_count", + ), + }, + { // 110 + m: "test_histogram_with_native_histogram_exemplars2_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_sum", + ), + }, + { // 111 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 112 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0003899999999999998", + ), + }, + { // 113 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0002899999999999998", + ), + }, + { // 114 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "+Inf", + ), + }, }, }, } From d599c4b28c406c95051de5e4905e17203fca0d70 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 2 Sep 2024 16:42:48 +0100 Subject: [PATCH 0393/1397] [BUGFIX] Protobuf scraping: reset exemplar position Signed-off-by: Bryan Boreham --- model/textparse/protobufparse.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index ea3a2e1a34..d7cbd3519a 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -409,6 +409,7 @@ func (p *ProtobufParser) Next() (Entry, error) { switch p.state { case EntryInvalid: p.metricPos = 0 + p.exemplarPos = 0 p.fieldPos = -2 n, err := readDelimited(p.in[p.inPos:], p.mf) p.inPos += n @@ -485,6 +486,7 @@ func (p *ProtobufParser) Next() (Entry, error) { p.metricPos++ p.fieldPos = -2 p.fieldsDone = false + p.exemplarPos = 0 // If this is a metric family containing native // histograms, we have to switch back to native // histograms after parsing a classic histogram. From ce7d830f1f99f3b00c9d6d50ae8e816a1a542d14 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Mon, 2 Sep 2024 18:20:10 +0200 Subject: [PATCH 0394/1397] Bring back BenchmarkLoadRealWLs (#14757) This was part of #14525 which was reverted. I still think that having this benchmark committed in to the repo is useful. Signed-off-by: Oleg Zaytsev --- tsdb/head_test.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index a7a8847e80..b32b37fcad 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -438,6 +438,32 @@ func BenchmarkLoadWLs(b *testing.B) { } } +// BenchmarkLoadRealWLs will be skipped unless the BENCHMARK_LOAD_REAL_WLS_DIR environment variable is set. +// BENCHMARK_LOAD_REAL_WLS_DIR should be the folder where `wal` and `chunks_head` are located. +func BenchmarkLoadRealWLs(b *testing.B) { + dir := os.Getenv("BENCHMARK_LOAD_REAL_WLS_DIR") + if dir == "" { + b.SkipNow() + } + + wal, err := wlog.New(nil, nil, filepath.Join(dir, "wal"), wlog.CompressionNone) + require.NoError(b, err) + b.Cleanup(func() { wal.Close() }) + + wbl, err := wlog.New(nil, nil, filepath.Join(dir, "wbl"), wlog.CompressionNone) + require.NoError(b, err) + b.Cleanup(func() { wbl.Close() }) + + // Load the WAL. + for i := 0; i < b.N; i++ { + opts := DefaultHeadOptions() + opts.ChunkDirRoot = dir + h, err := NewHead(nil, nil, wal, wbl, opts, nil) + require.NoError(b, err) + require.NoError(b, h.Init(0)) + } +} + // TestHead_HighConcurrencyReadAndWrite generates 1000 series with a step of 15s and fills a whole block with samples, // this means in total it generates 4000 chunks because with a step of 15s there are 4 chunks per block per series. // While appending the samples to the head it concurrently queries them from multiple go routines and verifies that the From 442f24e09927957936dd2a5b2c5c0817da0674cf Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Mon, 2 Sep 2024 17:30:37 -0300 Subject: [PATCH 0395/1397] chore: Simplify TestHeadAppender_AppendCTZeroSample (#14812) Signed-off-by: Arthur Silva Sens --- tsdb/head_test.go | 81 +++++++++++++++++++++-------------------------- 1 file changed, 36 insertions(+), 45 deletions(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index b32b37fcad..18ec4f0aca 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -33,7 +33,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/atomic" "golang.org/x/sync/errgroup" @@ -5945,16 +5944,16 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { for _, tc := range []struct { name string appendableSamples []appendableSamples - expectedSamples []model.Sample + expectedSamples []chunks.Sample }{ { name: "In order ct+normal sample", appendableSamples: []appendableSamples{ {ts: 100, val: 10, ct: 1}, }, - expectedSamples: []model.Sample{ - {Timestamp: 1, Value: 0}, - {Timestamp: 100, Value: 10}, + expectedSamples: []chunks.Sample{ + sample{t: 1, f: 0}, + sample{t: 100, f: 10}, }, }, { @@ -5963,10 +5962,10 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { {ts: 100, val: 10, ct: 1}, {ts: 101, val: 10, ct: 1}, }, - expectedSamples: []model.Sample{ - {Timestamp: 1, Value: 0}, - {Timestamp: 100, Value: 10}, - {Timestamp: 101, Value: 10}, + expectedSamples: []chunks.Sample{ + sample{t: 1, f: 0}, + sample{t: 100, f: 10}, + sample{t: 101, f: 10}, }, }, { @@ -5975,11 +5974,11 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { {ts: 100, val: 10, ct: 1}, {ts: 102, val: 10, ct: 101}, }, - expectedSamples: []model.Sample{ - {Timestamp: 1, Value: 0}, - {Timestamp: 100, Value: 10}, - {Timestamp: 101, Value: 0}, - {Timestamp: 102, Value: 10}, + expectedSamples: []chunks.Sample{ + sample{t: 1, f: 0}, + sample{t: 100, f: 10}, + sample{t: 101, f: 0}, + sample{t: 102, f: 10}, }, }, { @@ -5988,41 +5987,33 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { {ts: 100, val: 10, ct: 1}, {ts: 101, val: 10, ct: 100}, }, - expectedSamples: []model.Sample{ - {Timestamp: 1, Value: 0}, - {Timestamp: 100, Value: 10}, - {Timestamp: 101, Value: 10}, + expectedSamples: []chunks.Sample{ + sample{t: 1, f: 0}, + sample{t: 100, f: 10}, + sample{t: 101, f: 10}, }, }, } { - h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) - defer func() { - require.NoError(t, h.Close()) - }() - a := h.Appender(context.Background()) - lbls := labels.FromStrings("foo", "bar") - for _, sample := range tc.appendableSamples { - _, err := a.AppendCTZeroSample(0, lbls, sample.ts, sample.ct) - require.NoError(t, err) - _, err = a.Append(0, lbls, sample.ts, sample.val) - require.NoError(t, err) - } - require.NoError(t, a.Commit()) + t.Run(tc.name, func(t *testing.T) { + h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) + defer func() { + require.NoError(t, h.Close()) + }() + a := h.Appender(context.Background()) + lbls := labels.FromStrings("foo", "bar") + for _, sample := range tc.appendableSamples { + _, err := a.AppendCTZeroSample(0, lbls, sample.ts, sample.ct) + require.NoError(t, err) + _, err = a.Append(0, lbls, sample.ts, sample.val) + require.NoError(t, err) + } + require.NoError(t, a.Commit()) - q, err := NewBlockQuerier(h, math.MinInt64, math.MaxInt64) - require.NoError(t, err) - ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) - require.True(t, ss.Next()) - s := ss.At() - require.False(t, ss.Next()) - it := s.Iterator(nil) - for _, sample := range tc.expectedSamples { - require.Equal(t, chunkenc.ValFloat, it.Next()) - timestamp, value := it.At() - require.Equal(t, sample.Timestamp, model.Time(timestamp)) - require.Equal(t, sample.Value, model.SampleValue(value)) - } - require.Equal(t, chunkenc.ValNone, it.Next()) + q, err := NewBlockQuerier(h, math.MinInt64, math.MaxInt64) + require.NoError(t, err) + result := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) + require.Equal(t, tc.expectedSamples, result[`{foo="bar"}`]) + }) } } From f1d3c3f46468e97f5bad0aa0004f9e5775eb3429 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Tue, 3 Sep 2024 08:33:44 +0200 Subject: [PATCH 0396/1397] fix: Properly put OM text p.CreatedTimestamp behind feature flag (#14809) (#14815) Mitigates https://github.com/prometheus/prometheus/issues/14808 Signed-off-by: Manik Rana Co-authored-by: Manik Rana --- scrape/scrape.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index 5fcd623c0d..2abd4691d6 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1655,12 +1655,14 @@ loop: if seriesAlreadyScraped && parsedTimestamp == nil { err = storage.ErrDuplicateSampleForTimestamp } else { - if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil { - ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) - if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. - // CT is an experimental feature. For now, we don't need to fail the - // scrape on errors updating the created timestamp, log debug. - level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) + if sl.enableCTZeroIngestion { + if ctMs := p.CreatedTimestamp(); ctMs != nil { + ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. + // CT is an experimental feature. For now, we don't need to fail the + // scrape on errors updating the created timestamp, log debug. + level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) + } } } From cfe8b5616f5d1e26b0e5ef6138cc1924ed829142 Mon Sep 17 00:00:00 2001 From: Julien Date: Tue, 3 Sep 2024 11:36:58 +0200 Subject: [PATCH 0397/1397] Only update hash when config reload succeeds Signed-off-by: Julien --- cmd/prometheus/main.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 26f96db53b..3840c94155 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -1163,12 +1163,12 @@ func main() { rc <- err } else { rc <- nil - } - if cfg.enableAutoReload { - if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { - checksum = currentChecksum - } else { - level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err) + if cfg.enableAutoReload { + if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { + checksum = currentChecksum + } else { + level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err) + } } } case <-time.Tick(time.Duration(cfg.autoReloadInterval)): From 2c553b17729775448f37f5a2d3b1f7c6aed2b149 Mon Sep 17 00:00:00 2001 From: Julien Date: Tue, 3 Sep 2024 11:37:14 +0200 Subject: [PATCH 0398/1397] autoreload: do not ignore errors Signed-off-by: Julien --- config/reload.go | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/config/reload.go b/config/reload.go index 2f03ca463e..8be1b28d8a 100644 --- a/config/reload.go +++ b/config/reload.go @@ -36,7 +36,10 @@ func GenerateChecksum(yamlFilePath string) (string, error) { if err != nil { return "", fmt.Errorf("error reading YAML file: %w", err) } - _, _ = hash.Write(yamlContent) + _, err = hash.Write(yamlContent) + if err != nil { + return "", fmt.Errorf("error writing YAML file to hash: %w", err) + } var config ExternalFilesConfig if err := yaml.Unmarshal(yamlContent, &config); err != nil { @@ -61,23 +64,26 @@ func GenerateChecksum(yamlFilePath string) (string, error) { for _, pattern := range files[prefix] { matchingFiles, err := filepath.Glob(pattern) if err != nil { - return "", fmt.Errorf("error finding files with pattern %s: %w", pattern, err) + return "", fmt.Errorf("error finding files with pattern %q: %w", pattern, err) } for _, file := range matchingFiles { - // Write prefix to the hash ("r" or "s") followed by \0. - _, _ = hash.Write([]byte(prefix + "\x00")) - - // Write the file path to the hash, followed by \0 to ensure - // separation. - _, _ = hash.Write([]byte(file + "\x00")) + // Write prefix to the hash ("r" or "s") followed by \0, then + // the file path. + _, err = hash.Write([]byte(prefix + "\x00" + file + "\x00")) + if err != nil { + return "", fmt.Errorf("error writing %q path to hash: %w", file, err) + } // Read and hash the content of the file. content, err := os.ReadFile(file) if err != nil { return "", fmt.Errorf("error reading file %s: %w", file, err) } - _, _ = hash.Write(append(content, []byte("\x00")...)) + _, err = hash.Write(append(content, []byte("\x00")...)) + if err != nil { + return "", fmt.Errorf("error writing %q content to hash: %w", file, err) + } } } } From 1f02c2177b0ef86301ceb534ae4388fef63809b5 Mon Sep 17 00:00:00 2001 From: Julien Date: Tue, 3 Sep 2024 11:37:29 +0200 Subject: [PATCH 0399/1397] Use t.Tempdir Signed-off-by: Julien --- config/reload_test.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/config/reload_test.go b/config/reload_test.go index 0d86309114..f0f44f3588 100644 --- a/config/reload_test.go +++ b/config/reload_test.go @@ -22,10 +22,7 @@ import ( ) func TestGenerateChecksum(t *testing.T) { - // Create a temporary directory to hold the test files. - tmpDir, err := os.MkdirTemp("", "checksum_test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) // Clean up. + tmpDir := t.TempDir() // Define paths for the temporary files. yamlFilePath := filepath.Join(tmpDir, "test.yml") From e896643403412eda013c2e11c68939290ccff53e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:14:58 +0000 Subject: [PATCH 0400/1397] Bump actions/setup-node from 4.0.2 to 4.0.3 Bumps [actions/setup-node](https://github.com/actions/setup-node) from 4.0.2 to 4.0.3. - [Release notes](https://github.com/actions/setup-node/releases) - [Commits](https://github.com/actions/setup-node/compare/60edb5dd545a775178f52524783378180af0d1f8...1e60f620b9541d16bece96c5465dc8ee9832be0b) --- updated-dependencies: - dependency-name: actions/setup-node dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d314ac50ad..5934a3dafe 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -231,7 +231,7 @@ jobs: uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 - name: Install nodejs - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version-file: "web/ui/.nvmrc" registry-url: "https://registry.npmjs.org" From 0e376d1d6bce6ab2b9c7bf17e64aef137030d660 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:19:29 +0000 Subject: [PATCH 0401/1397] Bump github.com/influxdata/influxdb Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.5 to 1.11.6. - [Release notes](https://github.com/influxdata/influxdb/releases) - [Commits](https://github.com/influxdata/influxdb/compare/v1.11.5...v1.11.6) --- updated-dependencies: - dependency-name: github.com/influxdata/influxdb dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 2 +- documentation/examples/remote_storage/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index e5e052469b..22a292db0f 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -7,7 +7,7 @@ require ( github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/influxdata/influxdb v1.11.5 + github.com/influxdata/influxdb v1.11.6 github.com/prometheus/client_golang v1.20.0 github.com/prometheus/common v0.57.0 github.com/prometheus/prometheus v0.53.1 diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 34c474ef89..8f71c3c4a4 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -166,8 +166,8 @@ github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MI github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/influxdata/influxdb v1.11.5 h1:+em5VOl6lhAZubXj5o6SobCwvrRs3XDlBx/MUI4schI= -github.com/influxdata/influxdb v1.11.5/go.mod h1:k8sWREQl1/9t46VrkrH5adUM4UNGIt206ipO3plbkw8= +github.com/influxdata/influxdb v1.11.6 h1:zS5MRY+RQ5/XFTer5R8xQRnY17JYSbacvO6OaP164wU= +github.com/influxdata/influxdb v1.11.6/go.mod h1:F10NoQb9qa04lME3pTPWQrYt4JZ/ke1Eei+1ttgHHrg= github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= From 4c9fca8719c1f820b45e310207832937f00eec6d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:29:22 +0000 Subject: [PATCH 0402/1397] Bump google.golang.org/grpc from 1.65.0 to 1.66.0 Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.65.0 to 1.66.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.65.0...v1.66.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 5 +++-- go.sum | 10 ++++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 9a4a35c4d1..57a0ab383e 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/digitalocean/godo v1.119.0 github.com/docker/docker v27.1.1+incompatible github.com/edsrzf/mmap-go v1.1.0 - github.com/envoyproxy/go-control-plane v0.12.0 + github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155 github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/fsnotify/fsnotify v1.7.0 @@ -83,7 +83,7 @@ require ( golang.org/x/tools v0.23.0 google.golang.org/api v0.190.0 google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.66.0 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -176,6 +176,7 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect diff --git a/go.sum b/go.sum index 7e763af265..0573aa6047 100644 --- a/go.sum +++ b/go.sum @@ -168,8 +168,8 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155 h1:IgJPqnrlY2Mr4pYB6oaMKvFvwJ9H+X6CCY5x1vCTcpc= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155/go.mod h1:5Wkq+JduFtdAXihLmeTJf+tRYIT4KBc2vPXDhwVo1pA= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= @@ -592,6 +592,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -1107,8 +1109,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= +google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From deabb30b207c22eeda4cdfd15e15529b7c98c55b Mon Sep 17 00:00:00 2001 From: Julien Date: Tue, 3 Sep 2024 11:18:49 +0200 Subject: [PATCH 0403/1397] Document custom HTTP headers and add tests Signed-off-by: Julien --- config/config_test.go | 11 +- config/testdata/conf.good.yml | 6 + docs/configuration/configuration.md | 288 ++++++++++++++++++++++++++++ go.mod | 14 +- go.sum | 28 +-- 5 files changed, 325 insertions(+), 22 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index 2219061823..7b910b5d18 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -225,6 +225,15 @@ var expectedConf = &Config{ TLSConfig: config.TLSConfig{ MinVersion: config.TLSVersion(tls.VersionTLS10), }, + HTTPHeaders: &config.Headers{ + Headers: map[string]config.Header{ + "foo": { + Values: []string{"foobar"}, + Secrets: []config.Secret{"bar", "foo"}, + Files: []string{filepath.FromSlash("testdata/valid_password_file")}, + }, + }, + }, }, ServiceDiscoveryConfigs: discovery.Configs{ @@ -1532,7 +1541,7 @@ func TestElideSecrets(t *testing.T) { yamlConfig := string(config) matches := secretRe.FindAllStringIndex(yamlConfig, -1) - require.Len(t, matches, 22, "wrong number of secret matches found") + require.Len(t, matches, 24, "wrong number of secret matches found") require.NotContains(t, yamlConfig, "mysecret", "yaml marshal reveals authentication credentials.") } diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 56741822c2..a826282d2c 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -87,6 +87,12 @@ scrape_configs: my: label your: label + http_headers: + foo: + values: ["foobar"] + secrets: ["bar", "foo"] + files: ["valid_password_file"] + relabel_configs: - source_labels: [job, __meta_dns_name] regex: (.*)some-[regex] diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index a42126cf22..1b205d1263 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -608,6 +608,18 @@ tls_config: # Specifies headers to send to proxies during CONNECT requests. [ proxy_connect_header: [ : [, ...] ] ] + +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] ``` ### `` @@ -699,6 +711,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -812,6 +836,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -899,6 +935,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -957,6 +1005,18 @@ host: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # TLS configuration. tls_config: [ ] @@ -1137,6 +1197,18 @@ host: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # TLS configuration. tls_config: [ ] @@ -1346,6 +1418,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -1623,6 +1707,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -1849,6 +1945,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -1943,6 +2051,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2026,6 +2146,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2260,6 +2392,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2351,6 +2495,18 @@ server: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # TLS configuration. tls_config: [ ] @@ -2478,6 +2634,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2567,6 +2735,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2674,6 +2854,18 @@ tls_config: # Specifies headers to send to proxies during CONNECT requests. [ proxy_connect_header: [ : [, ...] ] ] + +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] ``` By default every app listed in Marathon will be scraped by Prometheus. If not all @@ -2773,6 +2965,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -2959,6 +3163,18 @@ tls_config: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -3085,6 +3301,18 @@ tags_filter: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # TLS configuration. tls_config: [ ] @@ -3161,6 +3389,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -3243,6 +3483,18 @@ oauth2: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -3468,6 +3720,18 @@ tls_config: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -3731,6 +3995,18 @@ tls_config: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -3852,6 +4128,18 @@ tls_config: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] diff --git a/go.mod b/go.mod index 9a4a35c4d1..1a9cc9555a 100644 --- a/go.mod +++ b/go.mod @@ -54,7 +54,7 @@ require ( github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/client_golang v1.20.2 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.56.0 + github.com/prometheus/common v0.58.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.11.0 @@ -76,9 +76,9 @@ require ( go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 golang.org/x/oauth2 v0.22.0 - golang.org/x/sync v0.7.0 - golang.org/x/sys v0.22.0 - golang.org/x/text v0.16.0 + golang.org/x/sync v0.8.0 + golang.org/x/sys v0.23.0 + golang.org/x/text v0.17.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.23.0 google.golang.org/api v0.190.0 @@ -186,11 +186,11 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.25.0 // indirect + golang.org/x/crypto v0.26.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/term v0.22.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/term v0.23.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 7e763af265..01676cb4d3 100644 --- a/go.sum +++ b/go.sum @@ -625,8 +625,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.56.0 h1:UffReloqkBtvtQEYDg2s+uDPGRrJyC6vZWPGXf6OhPY= -github.com/prometheus/common v0.56.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI= +github.com/prometheus/common v0.58.0 h1:N+N8vY4/23r6iYfD3UQZUoJPnUYAo7v6LG5XZxjZTXo= +github.com/prometheus/common v0.58.0/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= @@ -774,8 +774,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -857,8 +857,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -880,8 +880,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -947,16 +947,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -968,8 +968,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 87420774980288cc4d9c59342bf9ff278db8c04c Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 2 Sep 2024 20:20:40 +0100 Subject: [PATCH 0404/1397] [BUGFIX] PromQL: pass Context so spans parent correctly Assigning to `evaluator.ctx` in `eval()` broke the parent-child relationship. Signed-off-by: Bryan Boreham --- promql/engine.go | 103 +++++++++++++++++++++----------------------- promql/functions.go | 9 ++-- 2 files changed, 53 insertions(+), 59 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index b54ce2d6dc..537eafb41e 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -713,7 +713,6 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval startTimestamp: start, endTimestamp: start, interval: 1, - ctx: ctxInnerEval, maxSamples: ng.maxSamplesPerQuery, logger: ng.logger, lookbackDelta: s.LookbackDelta, @@ -723,7 +722,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval } query.sampleStats.InitStepTracking(start, start, 1) - val, warnings, err := evaluator.Eval(s.Expr) + val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr) evalSpanTimer.Finish() @@ -772,7 +771,6 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval startTimestamp: timeMilliseconds(s.Start), endTimestamp: timeMilliseconds(s.End), interval: durationMilliseconds(s.Interval), - ctx: ctxInnerEval, maxSamples: ng.maxSamplesPerQuery, logger: ng.logger, lookbackDelta: s.LookbackDelta, @@ -781,7 +779,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval enableDelayedNameRemoval: ng.enableDelayedNameRemoval, } query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval) - val, warnings, err := evaluator.Eval(s.Expr) + val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr) evalSpanTimer.Finish() @@ -1029,8 +1027,6 @@ func (e errWithWarnings) Error() string { return e.err.Error() } // querier and reports errors. On timeout or cancellation of its context it // terminates. type evaluator struct { - ctx context.Context - startTimestamp int64 // Start time in milliseconds. endTimestamp int64 // End time in milliseconds. interval int64 // Interval in milliseconds. @@ -1079,10 +1075,10 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp } } -func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) { +func (ev *evaluator) Eval(ctx context.Context, expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) { defer ev.recover(expr, &ws, &err) - v, ws = ev.eval(expr) + v, ws = ev.eval(ctx, expr) if ev.enableDelayedNameRemoval { ev.cleanupMetricLabels(v) } @@ -1133,7 +1129,7 @@ func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { // function call results. // The prepSeries function (if provided) can be used to prepare the helper // for each series, then passed to each call funcCall. -func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { +func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 matrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs)) @@ -1144,7 +1140,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // Functions will take string arguments from the expressions, not the values. if e != nil && e.Type() != parser.ValueTypeString { // ev.currentSamples will be updated to the correct value within the ev.eval call. - val, ws := ev.eval(e) + val, ws := ev.eval(ctx, e) warnings.Merge(ws) matrixes[i] = val.(Matrix) @@ -1196,7 +1192,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } // Reset number of samples in memory after each timestamp. @@ -1306,7 +1302,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) return mat, warnings } -func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) { +func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) { // Keep a copy of the original point slice so that it can be returned to the pool. origMatrix := slices.Clone(inputMatrix) defer func() { @@ -1386,7 +1382,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping } for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } // Reset number of samples in memory after each timestamp. @@ -1437,11 +1433,11 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping // evalSubquery evaluates given SubqueryExpr and returns an equivalent // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. -func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { +func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { samplesStats := ev.samplesStats // Avoid double counting samples when running a subquery, those samples will be counted in later stage. ev.samplesStats = ev.samplesStats.NewChild() - val, ws := ev.eval(subq) + val, ws := ev.eval(ctx, subq) // But do incorporate the peak from the subquery samplesStats.UpdatePeakFromSubquery(ev.samplesStats) ev.samplesStats = samplesStats @@ -1468,17 +1464,16 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele } // eval evaluates the given expression as the given AST expression node requires. -func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotations) { +func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, annotations.Annotations) { // This is the top-level evaluation method. // Thus, we check for timeout/cancellation here. - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 // Create a new span to help investigate inner evaluation performances. - ctxWithSpan, span := otel.Tracer("").Start(ev.ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String()) - ev.ctx = ctxWithSpan + ctx, span := otel.Tracer("").Start(ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String()) defer span.End() switch e := expr.(type) { @@ -1500,7 +1495,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio sortedGrouping = append(sortedGrouping, valueLabel.Val) slices.Sort(sortedGrouping) } - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0].(Vector), enh) }, e.Expr) } @@ -1510,16 +1505,16 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio // param is the number k for topk/bottomk, or q for quantile. var fParam float64 if param != nil { - val, ws := ev.eval(param) + val, ws := ev.eval(ctx, param) warnings.Merge(ws) fParam = val.(Matrix)[0].Floats[0].F } // Now fetch the data to be aggregated. - val, ws := ev.eval(e.Expr) + val, ws := ev.eval(ctx, e.Expr) warnings.Merge(ws) inputMatrix := val.(Matrix) - result, ws := ev.rangeEvalAgg(e, sortedGrouping, inputMatrix, fParam) + result, ws := ev.rangeEvalAgg(ctx, e, sortedGrouping, inputMatrix, fParam) warnings.Merge(ws) ev.currentSamples = originalNumSamples + result.TotalSamples() ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -1537,7 +1532,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio unwrapParenExpr(&arg) vs, ok := arg.(*parser.VectorSelector) if ok { - return ev.rangeEvalTimestampFunctionOverVectorSelector(vs, call, e) + return ev.rangeEvalTimestampFunctionOverVectorSelector(ctx, vs, call, e) } } @@ -1561,7 +1556,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio matrixArgIndex = i matrixArg = true // Replacing parser.SubqueryExpr with parser.MatrixSelector. - val, totalSamples, ws := ev.evalSubquery(subq) + val, totalSamples, ws := ev.evalSubquery(ctx, subq) e.Args[i] = val warnings.Merge(ws) defer func() { @@ -1576,14 +1571,14 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio // Special handling for functions that work on series not samples. switch e.Func.Name { case "label_replace": - return ev.evalLabelReplace(e.Args) + return ev.evalLabelReplace(ctx, e.Args) case "label_join": - return ev.evalLabelJoin(e.Args) + return ev.evalLabelJoin(ctx, e.Args) } if !matrixArg { // Does not have a matrix argument. - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec, annos := call(v, e.Args, enh) return vec, warnings.Merge(annos) }, e.Args...) @@ -1595,7 +1590,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio otherInArgs := make([]Vector, len(e.Args)) for i, e := range e.Args { if i != matrixArgIndex { - val, ws := ev.eval(e) + val, ws := ev.eval(ctx, e) otherArgs[i] = val.(Matrix) otherInArgs[i] = Vector{Sample{}} inArgs[i] = otherInArgs[i] @@ -1609,7 +1604,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio sel := arg.(*parser.MatrixSelector) selVS := sel.VectorSelector.(*parser.VectorSelector) - ws, err := checkAndExpandSeriesSet(ev.ctx, sel) + ws, err := checkAndExpandSeriesSet(ctx, sel) warnings.Merge(ws) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), warnings}) @@ -1639,7 +1634,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio dropName := e.Func.Name != "last_over_time" for i, s := range selVS.Series { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } ev.currentSamples -= len(floats) + totalHPointSize(histograms) @@ -1785,10 +1780,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio return mat, warnings case *parser.ParenExpr: - return ev.eval(e.Expr) + return ev.eval(ctx, e.Expr) case *parser.UnaryExpr: - val, ws := ev.eval(e.Expr) + val, ws := ev.eval(ctx, e.Expr) mat := val.(Matrix) if e.Op == parser.SUB { for i := range mat { @@ -1809,7 +1804,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio case *parser.BinaryExpr: switch lt, rt := e.LHS.Type(), e.RHS.Type(); { case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) return append(enh.Out, Sample{F: val}), nil }, e.LHS, e.RHS) @@ -1822,39 +1817,39 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio } switch e.Op { case parser.LAND: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LOR: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LUNLESS: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) default: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case *parser.NumberLiteral: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) @@ -1862,7 +1857,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio return String{V: e.Val, T: ev.startTimestamp}, nil case *parser.VectorSelector: - ws, err := checkAndExpandSeriesSet(ev.ctx, e) + ws, err := checkAndExpandSeriesSet(ctx, e) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } @@ -1871,7 +1866,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) var chkIter chunkenc.Iterator for i, s := range e.Series { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } chkIter = s.Iterator(chkIter) @@ -1922,14 +1917,13 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio if ev.startTimestamp != ev.endTimestamp { panic(errors.New("cannot do range evaluation of matrix selector")) } - return ev.matrixSelector(e) + return ev.matrixSelector(ctx, e) case *parser.SubqueryExpr: offsetMillis := durationMilliseconds(e.Offset) rangeMillis := durationMilliseconds(e.Range) newEv := &evaluator{ endTimestamp: ev.endTimestamp - offsetMillis, - ctx: ev.ctx, currentSamples: ev.currentSamples, maxSamples: ev.maxSamples, logger: ev.logger, @@ -1959,7 +1953,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio setOffsetForAtModifier(newEv.startTimestamp, e.Expr) } - res, ws := newEv.eval(e.Expr) + res, ws := newEv.eval(ctx, e.Expr) ev.currentSamples = newEv.currentSamples ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) ev.samplesStats.IncrementSamplesAtTimestamp(ev.endTimestamp, newEv.samplesStats.TotalSamples) @@ -1967,14 +1961,13 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio case *parser.StepInvariantExpr: switch ce := e.Expr.(type) { case *parser.StringLiteral, *parser.NumberLiteral: - return ev.eval(ce) + return ev.eval(ctx, ce) } newEv := &evaluator{ startTimestamp: ev.startTimestamp, endTimestamp: ev.startTimestamp, // Always a single evaluation. interval: ev.interval, - ctx: ev.ctx, currentSamples: ev.currentSamples, maxSamples: ev.maxSamples, logger: ev.logger, @@ -1983,7 +1976,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, enableDelayedNameRemoval: ev.enableDelayedNameRemoval, } - res, ws := newEv.eval(e.Expr) + res, ws := newEv.eval(ctx, e.Expr) ev.currentSamples = newEv.currentSamples ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { @@ -2059,8 +2052,8 @@ func reuseOrGetFPointSlices(prevSS *Series, numSteps int) (r []FPoint) { return getFPointSlice(numSteps) } -func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) { - ws, err := checkAndExpandSeriesSet(ev.ctx, vs) +func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Context, vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) { + ws, err := checkAndExpandSeriesSet(ctx, vs) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } @@ -2071,7 +2064,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) } - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if vs.Timestamp != nil { // This is a special case for "timestamp()" when the @ modifier is used, to ensure that // we return a point for each time step in this case. @@ -2207,7 +2200,7 @@ func putMatrixSelectorHPointSlice(p []HPoint) { } // matrixSelector evaluates a *parser.MatrixSelector expression. -func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annotations.Annotations) { +func (ev *evaluator) matrixSelector(ctx context.Context, node *parser.MatrixSelector) (Matrix, annotations.Annotations) { var ( vs = node.VectorSelector.(*parser.VectorSelector) @@ -2218,7 +2211,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annota it = storage.NewBuffer(durationMilliseconds(node.Range)) ) - ws, err := checkAndExpandSeriesSet(ev.ctx, node) + ws, err := checkAndExpandSeriesSet(ctx, node) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } @@ -2226,7 +2219,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annota var chkIter chunkenc.Iterator series := vs.Series for i, s := range series { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } chkIter = s.Iterator(chkIter) diff --git a/promql/functions.go b/promql/functions.go index 9fa7fbe190..8141d2a9e6 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -14,6 +14,7 @@ package promql import ( + "context" "errors" "fmt" "math" @@ -1463,7 +1464,7 @@ func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelp } // label_replace function operates only on series; does not look at timestamps or values. -func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, annotations.Annotations) { +func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { var ( dst = stringFromArg(args[1]) repl = stringFromArg(args[2]) @@ -1479,7 +1480,7 @@ func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, an panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) } - val, ws := ev.eval(args[0]) + val, ws := ev.eval(ctx, args[0]) matrix := val.(Matrix) lb := labels.NewBuilder(labels.EmptyLabels()) @@ -1520,7 +1521,7 @@ func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe } // label_join function operates only on series; does not look at timestamps or values. -func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annotations.Annotations) { +func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { var ( dst = stringFromArg(args[1]) sep = stringFromArg(args[2]) @@ -1537,7 +1538,7 @@ func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annot panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst)) } - val, ws := ev.eval(args[0]) + val, ws := ev.eval(ctx, args[0]) matrix := val.(Matrix) srcVals := make([]string, len(srcLabels)) lb := labels.NewBuilder(labels.EmptyLabels()) From 54989ce6fd6f25feb334cb4610e9e65ec700755e Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 2 Sep 2024 21:17:45 +0100 Subject: [PATCH 0405/1397] PromQL: add short string method To be used in tracing or logging. Signed-off-by: Bryan Boreham --- promql/parser/printer.go | 46 ++++++++++++++++++++++++++++++++++------ 1 file changed, 40 insertions(+), 6 deletions(-) diff --git a/promql/parser/printer.go b/promql/parser/printer.go index 69f5f082d6..63b1950827 100644 --- a/promql/parser/printer.go +++ b/promql/parser/printer.go @@ -72,6 +72,11 @@ func (node *AggregateExpr) String() string { return aggrString } +func (node *AggregateExpr) ShortString() string { + aggrString := node.getAggOpStr() + return aggrString +} + func (node *AggregateExpr) getAggOpStr() string { aggrString := node.Op.String() @@ -95,14 +100,20 @@ func joinLabels(ss []string) string { return strings.Join(ss, ", ") } -func (node *BinaryExpr) String() string { - returnBool := "" +func (node *BinaryExpr) returnBool() string { if node.ReturnBool { - returnBool = " bool" + return " bool" } + return "" +} +func (node *BinaryExpr) String() string { matching := node.getMatchingStr() - return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, returnBool, matching, node.RHS) + return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, node.returnBool(), matching, node.RHS) +} + +func (node *BinaryExpr) ShortString() string { + return fmt.Sprintf("%s%s%s", node.Op, node.returnBool(), node.getMatchingStr()) } func (node *BinaryExpr) getMatchingStr() string { @@ -130,9 +141,13 @@ func (node *Call) String() string { return fmt.Sprintf("%s(%s)", node.Func.Name, node.Args) } -func (node *MatrixSelector) String() string { +func (node *Call) ShortString() string { + return node.Func.Name +} + +func (node *MatrixSelector) atOffset() (string, string) { // Copy the Vector selector before changing the offset - vecSelector := *node.VectorSelector.(*VectorSelector) + vecSelector := node.VectorSelector.(*VectorSelector) offset := "" switch { case vecSelector.OriginalOffset > time.Duration(0): @@ -149,7 +164,13 @@ func (node *MatrixSelector) String() string { case vecSelector.StartOrEnd == END: at = " @ end()" } + return at, offset +} +func (node *MatrixSelector) String() string { + at, offset := node.atOffset() + // Copy the Vector selector before changing the offset + vecSelector := *node.VectorSelector.(*VectorSelector) // Do not print the @ and offset twice. offsetVal, atVal, preproc := vecSelector.OriginalOffset, vecSelector.Timestamp, vecSelector.StartOrEnd vecSelector.OriginalOffset = 0 @@ -163,10 +184,19 @@ func (node *MatrixSelector) String() string { return str } +func (node *MatrixSelector) ShortString() string { + at, offset := node.atOffset() + return fmt.Sprintf("[%s]%s%s", model.Duration(node.Range), at, offset) +} + func (node *SubqueryExpr) String() string { return fmt.Sprintf("%s%s", node.Expr.String(), node.getSubqueryTimeSuffix()) } +func (node *SubqueryExpr) ShortString() string { + return node.getSubqueryTimeSuffix() +} + // getSubqueryTimeSuffix returns the '[:] @ offset ' suffix of the subquery. func (node *SubqueryExpr) getSubqueryTimeSuffix() string { step := "" @@ -208,6 +238,10 @@ func (node *UnaryExpr) String() string { return fmt.Sprintf("%s%s", node.Op, node.Expr) } +func (node *UnaryExpr) ShortString() string { + return node.Op.String() +} + func (node *VectorSelector) String() string { var labelStrings []string if len(node.LabelMatchers) > 1 { From abb0502685ee7ee745c84fa07f246f158fe8b170 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 2 Sep 2024 13:38:05 +0100 Subject: [PATCH 0406/1397] [ENHANCEMENT] PromQL: Add detail to tracing spans For aggregates, operators, calls, show what operation is performed. Also add an event when series are expanded, typically time spent accessing TSDB. Signed-off-by: Bryan Boreham --- promql/engine.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/promql/engine.go b/promql/engine.go index 537eafb41e..3094ca3b29 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -991,6 +991,8 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations if e.Series != nil { return nil, nil } + span := trace.SpanFromContext(ctx) + span.AddEvent("expand start", trace.WithAttributes(attribute.String("selector", e.String()))) series, ws, err := expandSeriesSet(ctx, e.UnexpandedSeriesSet) if e.SkipHistogramBuckets { for i := range series { @@ -998,6 +1000,7 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations } } e.Series = series + span.AddEvent("expand end", trace.WithAttributes(attribute.Int("num_series", len(series)))) return ws, err } return nil, nil @@ -1475,6 +1478,9 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, // Create a new span to help investigate inner evaluation performances. ctx, span := otel.Tracer("").Start(ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String()) defer span.End() + if ss, ok := expr.(interface{ ShortString() string }); ok { + span.SetAttributes(attribute.String("operation", ss.ShortString())) + } switch e := expr.(type) { case *parser.AggregateExpr: @@ -1849,11 +1855,13 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, } case *parser.NumberLiteral: + span.SetAttributes(attribute.Float64("value", e.Val)) return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) case *parser.StringLiteral: + span.SetAttributes(attribute.String("value", e.Val)) return String{V: e.Val, T: ev.startTimestamp}, nil case *parser.VectorSelector: From 9e4ffd044c86aced974dfe359aa08992a05027e8 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Tue, 3 Sep 2024 15:51:32 +0200 Subject: [PATCH 0407/1397] Implement Alertmanagers discovery page Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/App.tsx | 9 ++ .../src/api/responseTypes/alertmanagers.ts | 10 ++ .../src/pages/AlertmanagerDiscoveryPage.tsx | 115 ++++++++++++++++++ .../ServiceDiscoveryPoolsList.tsx | 1 - .../pages/service-discovery/TargetLabels.tsx | 49 -------- 5 files changed, 134 insertions(+), 50 deletions(-) create mode 100644 web/ui/mantine-ui/src/api/responseTypes/alertmanagers.ts create mode 100644 web/ui/mantine-ui/src/pages/AlertmanagerDiscoveryPage.tsx delete mode 100644 web/ui/mantine-ui/src/pages/service-discovery/TargetLabels.tsx diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index 02160e460b..2ca61761fa 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -20,6 +20,7 @@ import { } from "@mantine/core"; import { useDisclosure } from "@mantine/hooks"; import { + IconBell, IconBellFilled, IconChevronDown, IconChevronRight, @@ -62,6 +63,7 @@ import ReadinessWrapper from "./components/ReadinessWrapper"; import { QueryParamProvider } from "use-query-params"; import { ReactRouter6Adapter } from "use-query-params/adapters/react-router-6"; import ServiceDiscoveryPage from "./pages/service-discovery/ServiceDiscoveryPage"; +import AlertmanagerDiscoveryPage from "./pages/AlertmanagerDiscoveryPage"; const queryClient = new QueryClient(); @@ -106,6 +108,13 @@ const monitoringStatusPages = [ element: , inAgentMode: true, }, + { + title: "Alertmanager discovery", + path: "/discovered-alertmanagers", + icon: , + element: , + inAgentMode: false, + }, ]; const serverStatusPages = [ diff --git a/web/ui/mantine-ui/src/api/responseTypes/alertmanagers.ts b/web/ui/mantine-ui/src/api/responseTypes/alertmanagers.ts new file mode 100644 index 0000000000..2fcb8c23a3 --- /dev/null +++ b/web/ui/mantine-ui/src/api/responseTypes/alertmanagers.ts @@ -0,0 +1,10 @@ +export type AlertmanagerTarget = { + url: string; +}; + +// Result type for /api/v1/alertmanagers endpoint. +// See: https://prometheus.io/docs/prometheus/latest/querying/api/#alertmanagers +export type AlertmanagersResult = { + activeAlertmanagers: AlertmanagerTarget[]; + droppedAlertmanagers: AlertmanagerTarget[]; +}; diff --git a/web/ui/mantine-ui/src/pages/AlertmanagerDiscoveryPage.tsx b/web/ui/mantine-ui/src/pages/AlertmanagerDiscoveryPage.tsx new file mode 100644 index 0000000000..8fd12511ee --- /dev/null +++ b/web/ui/mantine-ui/src/pages/AlertmanagerDiscoveryPage.tsx @@ -0,0 +1,115 @@ +import { + ActionIcon, + Alert, + Box, + Card, + Group, + Select, + Skeleton, + Stack, + Table, + Text, +} from "@mantine/core"; +import { + IconBell, + IconBellOff, + IconInfoCircle, + IconLayoutNavbarCollapse, + IconLayoutNavbarExpand, + IconSearch, +} from "@tabler/icons-react"; +import { Suspense } from "react"; +import { useAppDispatch, useAppSelector } from "../state/hooks"; + +import { + ArrayParam, + StringParam, + useQueryParam, + withDefault, +} from "use-query-params"; +import ErrorBoundary from "../components/ErrorBoundary"; +import ScrapePoolList from "./ServiceDiscoveryPoolsList"; +import { useSuspenseAPIQuery } from "../api/api"; +import { ScrapePoolsResult } from "../api/responseTypes/scrapePools"; +import { + setCollapsedPools, + setShowLimitAlert, +} from "../state/serviceDiscoveryPageSlice"; +import { StateMultiSelect } from "../components/StateMultiSelect"; +import badgeClasses from "../Badge.module.css"; +import { AlertmanagersResult } from "../api/responseTypes/alertmanagers"; +import EndpointLink from "../components/EndpointLink"; + +export const targetPoolDisplayLimit = 20; + +export default function AlertmanagerDiscoveryPage() { + // Load the list of all available scrape pools. + const { + data: { + data: { activeAlertmanagers, droppedAlertmanagers }, + }, + } = useSuspenseAPIQuery({ + path: `/alertmanagers`, + }); + + return ( + + + + + + Active Alertmanagers + + + {activeAlertmanagers.length === 0 ? ( + }> + No active alertmanagers found. + + ) : ( + + + {activeAlertmanagers.map((alertmanager) => ( + + + + + + ))} + +
+ )} +
+ + + + + Dropped Alertmanagers + + + {droppedAlertmanagers.length === 0 ? ( + }> + No dropped alertmanagers found. + + ) : ( + + + {droppedAlertmanagers.map((alertmanager) => ( + + + + + + ))} + +
+ )} +
+
+ ); +} diff --git a/web/ui/mantine-ui/src/pages/service-discovery/ServiceDiscoveryPoolsList.tsx b/web/ui/mantine-ui/src/pages/service-discovery/ServiceDiscoveryPoolsList.tsx index 537468944a..d29808d99a 100644 --- a/web/ui/mantine-ui/src/pages/service-discovery/ServiceDiscoveryPoolsList.tsx +++ b/web/ui/mantine-ui/src/pages/service-discovery/ServiceDiscoveryPoolsList.tsx @@ -25,7 +25,6 @@ import { } from "../../state/serviceDiscoveryPageSlice"; import CustomInfiniteScroll from "../../components/CustomInfiniteScroll"; -import TargetLabels from "./TargetLabels"; import { useDebouncedValue } from "@mantine/hooks"; import { targetPoolDisplayLimit } from "./ServiceDiscoveryPage"; import { BooleanParam, useQueryParam, withDefault } from "use-query-params"; diff --git a/web/ui/mantine-ui/src/pages/service-discovery/TargetLabels.tsx b/web/ui/mantine-ui/src/pages/service-discovery/TargetLabels.tsx deleted file mode 100644 index 54e944f8cf..0000000000 --- a/web/ui/mantine-ui/src/pages/service-discovery/TargetLabels.tsx +++ /dev/null @@ -1,49 +0,0 @@ -import { FC } from "react"; -import { Labels } from "../../api/responseTypes/targets"; -import { LabelBadges } from "../../components/LabelBadges"; -import { ActionIcon, Collapse, Group, Stack, Text } from "@mantine/core"; -import { useDisclosure } from "@mantine/hooks"; -import { IconChevronDown, IconChevronUp } from "@tabler/icons-react"; - -type TargetLabelsProps = { - labels: Labels; - discoveredLabels: Labels; -}; - -const TargetLabels: FC = ({ discoveredLabels, labels }) => { - const [showDiscovered, { toggle: toggleDiscovered }] = useDisclosure(false); - - return ( - - - - - - {showDiscovered ? ( - - ) : ( - - )} - - - - - - Discovered labels: - - - - - ); -}; - -export default TargetLabels; From 5599db20cd1a362d22a950d50691c8768c4228c9 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Tue, 3 Sep 2024 16:05:05 +0200 Subject: [PATCH 0408/1397] Change use{Suspense}APIQuery key to an array Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/api/api.ts | 8 ++++---- web/ui/mantine-ui/src/components/ReadinessWrapper.tsx | 6 +++--- web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx | 2 +- web/ui/mantine-ui/src/pages/query/Graph.tsx | 2 +- web/ui/mantine-ui/src/pages/query/TableTab.tsx | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/web/ui/mantine-ui/src/api/api.ts b/web/ui/mantine-ui/src/api/api.ts index baef0b72c5..d7446d6896 100644 --- a/web/ui/mantine-ui/src/api/api.ts +++ b/web/ui/mantine-ui/src/api/api.ts @@ -1,4 +1,4 @@ -import { useQuery, useSuspenseQuery } from "@tanstack/react-query"; +import { QueryKey, useQuery, useSuspenseQuery } from "@tanstack/react-query"; import { useSettings } from "../state/settingsSlice"; export const API_PATH = "api/v1"; @@ -89,7 +89,7 @@ const createQueryFn = }; type QueryOptions = { - key?: string; + key?: QueryKey; path: string; params?: Record; enabled?: boolean; @@ -106,7 +106,7 @@ export const useAPIQuery = ({ const { pathPrefix } = useSettings(); return useQuery>({ - queryKey: key ? [key] : [path, params], + queryKey: key !== undefined ? key : [path, params], retry: false, refetchOnWindowFocus: false, gcTime: 0, @@ -119,7 +119,7 @@ export const useSuspenseAPIQuery = ({ key, path, params }: QueryOptions) => { const { pathPrefix } = useSettings(); return useSuspenseQuery>({ - queryKey: key ? [key] : [path, params], + queryKey: key !== undefined ? key : [path, params], retry: false, refetchOnWindowFocus: false, gcTime: 0, diff --git a/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx b/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx index 800195e98c..52ae0485cb 100644 --- a/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx +++ b/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx @@ -15,7 +15,7 @@ const ReadinessLoader: FC = () => { // Query readiness status. const { data: ready } = useSuspenseQuery({ - queryKey: [`ready-${queryKey}`], + queryKey: ["ready", queryKey], retry: false, refetchOnWindowFocus: false, gcTime: 0, @@ -46,8 +46,8 @@ const ReadinessLoader: FC = () => { data: { min, max, current }, }, } = useSuspenseAPIQuery({ - path: `/status/walreplay`, - key: `walreplay-${queryKey}`, + path: "/status/walreplay", + key: ["walreplay", queryKey], }); useEffect(() => { diff --git a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx index b13a81f61f..b74d745963 100644 --- a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx +++ b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx @@ -144,7 +144,7 @@ const ExpressionInput: FC = ({ isFetching: isFormatting, refetch: formatQuery, } = useAPIQuery({ - key: expr, + key: [expr], path: "/format_query", params: { query: expr, diff --git a/web/ui/mantine-ui/src/pages/query/Graph.tsx b/web/ui/mantine-ui/src/pages/query/Graph.tsx index d6e413cfda..3fc9a6c510 100644 --- a/web/ui/mantine-ui/src/pages/query/Graph.tsx +++ b/web/ui/mantine-ui/src/pages/query/Graph.tsx @@ -44,7 +44,7 @@ const Graph: FC = ({ const { data, error, isFetching, isLoading, refetch } = useAPIQuery({ - key: useId(), + key: [useId()], path: "/query_range", params: { query: expr, diff --git a/web/ui/mantine-ui/src/pages/query/TableTab.tsx b/web/ui/mantine-ui/src/pages/query/TableTab.tsx index f65936e6dc..353509d8a9 100644 --- a/web/ui/mantine-ui/src/pages/query/TableTab.tsx +++ b/web/ui/mantine-ui/src/pages/query/TableTab.tsx @@ -28,7 +28,7 @@ const TableTab: FC = ({ panelIdx, retriggerIdx }) => { const { endTime, range } = visualizer; const { data, error, isFetching, refetch } = useAPIQuery({ - key: useId(), + key: [useId()], path: "/query", params: { query: expr, From 857e59c8b6720da4c50cc464705eae6e8d165fa9 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Tue, 3 Sep 2024 16:05:30 +0200 Subject: [PATCH 0409/1397] Improve EndpointLink styling Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/components/EndpointLink.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/ui/mantine-ui/src/components/EndpointLink.tsx b/web/ui/mantine-ui/src/components/EndpointLink.tsx index 5454031634..c9b6a8989c 100644 --- a/web/ui/mantine-ui/src/components/EndpointLink.tsx +++ b/web/ui/mantine-ui/src/components/EndpointLink.tsx @@ -35,13 +35,13 @@ const EndpointLink: FC = ({ endpoint, globalUrl }) => { {displayLink} {params.length > 0 && ( - + {params.map(([labelName, labelValue]: [string, string]) => { return ( From eb175c88f6655cf1a8a883c40574cec65b0ba781 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Tue, 3 Sep 2024 16:07:04 +0200 Subject: [PATCH 0410/1397] Correctly pass custom title into ResettingErrorBoundary Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/components/ErrorBoundary.tsx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/web/ui/mantine-ui/src/components/ErrorBoundary.tsx b/web/ui/mantine-ui/src/components/ErrorBoundary.tsx index 45ffc1023a..3416623d34 100644 --- a/web/ui/mantine-ui/src/components/ErrorBoundary.tsx +++ b/web/ui/mantine-ui/src/components/ErrorBoundary.tsx @@ -49,7 +49,9 @@ class ErrorBoundary extends Component { const ResettingErrorBoundary = (props: Props) => { const location = useLocation(); return ( - {props.children} + + {props.children} + ); }; From d0ccbd2eb5a505791a5017a30a0f8d1cb3c55fd7 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Tue, 3 Sep 2024 16:09:50 +0200 Subject: [PATCH 0411/1397] Top-align all targets page table cell contents, other styling Signed-off-by: Julius Volz --- .../src/pages/targets/ScrapePoolsList.tsx | 19 +++++++++++++------ .../src/pages/targets/TargetLabels.tsx | 2 +- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx b/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx index 1e399b8a18..290543fc5b 100644 --- a/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx +++ b/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx @@ -325,7 +325,7 @@ const ScrapePoolList: FC = ({ : undefined, }} > - + {/* TODO: Process target URL like in old UI */} = ({ /> - + - - + + = ({ withArrow > = ({ - + {target.health} @@ -391,7 +398,7 @@ const ScrapePoolList: FC = ({ {target.lastError && ( - + = ({ discoveredLabels, labels }) => { const [showDiscovered, { toggle: toggleDiscovered }] = useDisclosure(false); return ( - + From 2304be9dd5cbba1418857d3f5d3709192d25937b Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 3 Sep 2024 22:09:07 +0200 Subject: [PATCH 0412/1397] promql: Improve the rate extrapolation explanation Signed-off-by: beorn7 --- promql/functions.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index 9fa7fbe190..8271ef04ea 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -130,10 +130,18 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod sampledInterval := float64(lastT-firstT) / 1000 averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne) - // If the first/last samples are close to the boundaries of the range, - // extrapolate the result. This is as we expect that another sample - // will exist given the spacing between samples we've seen thus far, - // with an allowance for noise. + // If samples are close enough to the (lower or upper) boundary of the + // range, we extrapolate the rate all the way to the boundary in + // question. "Close enough" is defined as "up to 10% more than the + // average duration between samples within the range", see + // extrapolationThreshold below. Essentially, we are assuming a more or + // less regular spacing between samples, and if we don't see a sample + // where we would expect one, we assume the series does not cover the + // whole range, but starts and/or ends within the range. We still + // extrapolate the rate in this case, but not all the way to the + // boundary, but only by half of the average duration between samples + // (which is our guess for where the series actually starts or ends). + extrapolationThreshold := averageDurationBetweenSamples * 1.1 extrapolateToInterval := sampledInterval From e67358d203864018ecbbe8c74c1cb3af3be4b2b4 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 4 Sep 2024 15:39:05 +1000 Subject: [PATCH 0413/1397] histogram: include counter reset hint in test expression output Signed-off-by: Charles Korn --- model/histogram/float_histogram.go | 11 +++++++++++ promql/parser/parse_test.go | 16 ++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 2a37ea66d4..1777afdbf1 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -230,6 +230,17 @@ func (h *FloatHistogram) TestExpression() string { res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues)) } + switch m.CounterResetHint { + case UnknownCounterReset: + // Unknown is the default, don't add anything. + case CounterReset: + res = append(res, fmt.Sprintf("counter_reset_hint:reset")) + case NotCounterReset: + res = append(res, fmt.Sprintf("counter_reset_hint:not_reset")) + case GaugeType: + res = append(res, fmt.Sprintf("counter_reset_hint:gauge")) + } + addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { if len(spans) > 1 { panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind)) diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index 37748323ce..40e6809183 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -4385,6 +4385,22 @@ func TestHistogramTestExpression(t *testing.T) { }, expected: `{{offset:-3 buckets:[5.1 0 0 0 0 10 7] n_offset:-1 n_buckets:[4.1 5 0 0 7 8 9]}}`, }, + { + name: "known counter reset hint", + input: histogram.FloatHistogram{ + Schema: 1, + Sum: -0.3, + Count: 3.1, + ZeroCount: 7.1, + ZeroThreshold: 0.05, + PositiveBuckets: []float64{5.1, 10, 7}, + PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}}, + NegativeBuckets: []float64{4.1, 5}, + NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}}, + CounterResetHint: histogram.CounterReset, + }, + expected: `{{schema:1 count:3.1 sum:-0.3 z_bucket:7.1 z_bucket_w:0.05 counter_reset_hint:reset offset:-3 buckets:[5.1 10 7] n_offset:-5 n_buckets:[4.1 5]}}`, + }, } { t.Run(test.name, func(t *testing.T) { expression := test.input.TestExpression() From 90dc1b45dbb448f6ce0ff9349dcd06e76db4f525 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 4 Sep 2024 15:47:07 +1000 Subject: [PATCH 0414/1397] promqltest: use test expression format for histograms in assertion failure messages Signed-off-by: Charles Korn --- promql/promqltest/test.go | 10 ++++++++-- promql/promqltest/test_test.go | 6 +++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index 065e52e33f..bab8388622 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -779,7 +779,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { } if !compareNativeHistogram(expected.H.Compact(0), actual.H.Compact(0)) { - return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H, actual.H, formatSeriesResult(s)) + return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H.TestExpression(), actual.H.TestExpression(), formatSeriesResult(s)) } } } @@ -995,7 +995,13 @@ func formatSeriesResult(s promql.Series) string { histogramPlural = "" } - return fmt.Sprintf("%v float point%s %v and %v histogram point%s %v", len(s.Floats), floatPlural, s.Floats, len(s.Histograms), histogramPlural, s.Histograms) + histograms := make([]string, 0, len(s.Histograms)) + + for _, p := range s.Histograms { + histograms = append(histograms, fmt.Sprintf("%v @[%v]", p.H.TestExpression(), p.T)) + } + + return fmt.Sprintf("%v float point%s %v and %v histogram point%s %v", len(s.Floats), floatPlural, s.Floats, len(s.Histograms), histogramPlural, histograms) } // HistogramTestExpression returns TestExpression() for the given histogram or "" if the histogram is nil. diff --git a/promql/promqltest/test_test.go b/promql/promqltest/test_test.go index 49b43eb126..bd965b00b5 100644 --- a/promql/promqltest/test_test.go +++ b/promql/promqltest/test_test.go @@ -381,7 +381,7 @@ load 5m eval range from 0 to 10m step 5m testmetric testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:7 count:1 buckets:[1] offset:1}} {{schema:-1 sum:8 count:1 buckets:[1] offset:1}} `, - expectedError: `error in eval testmetric (line 5): expected histogram value at index 1 (t=300000) for {__name__="testmetric"} to be {count:1, sum:7, (1,4]:1}, but got {count:1, sum:5, (1,4]:1} (result has 0 float points [] and 3 histogram points [{count:1, sum:4, (1,4]:1} @[0] {count:1, sum:5, (1,4]:1} @[300000] {count:1, sum:6, (1,4]:1} @[600000]])`, + expectedError: `error in eval testmetric (line 5): expected histogram value at index 1 (t=300000) for {__name__="testmetric"} to be {{schema:-1 count:1 sum:7 offset:1 buckets:[1]}}, but got {{schema:-1 count:1 sum:5 counter_reset_hint:not_reset offset:1 buckets:[1]}} (result has 0 float points [] and 3 histogram points [{{schema:-1 count:1 sum:4 offset:1 buckets:[1]}} @[0] {{schema:-1 count:1 sum:5 counter_reset_hint:not_reset offset:1 buckets:[1]}} @[300000] {{schema:-1 count:1 sum:6 counter_reset_hint:not_reset offset:1 buckets:[1]}} @[600000]])`, }, "range query with too many points for query time range": { input: testData + ` @@ -532,7 +532,7 @@ load 5m eval range from 0 to 5m step 5m testmetric testmetric 2 3 `, - expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 0 float points [] and 2 histogram points [{count:0, sum:0} @[0] {count:0, sum:0} @[300000]]`, + expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 0 float points [] and 2 histogram points [{{}} @[0] {{counter_reset_hint:not_reset}} @[300000]]`, }, "range query with expected mixed results": { input: ` @@ -552,7 +552,7 @@ load 5m eval range from 0 to 5m step 5m testmetric testmetric {{}} 3 `, - expectedError: `error in eval testmetric (line 5): expected float value at index 0 for {__name__="testmetric"} to have timestamp 300000, but it had timestamp 0 (result has 1 float point [3 @[0]] and 1 histogram point [{count:0, sum:0} @[300000]])`, + expectedError: `error in eval testmetric (line 5): expected float value at index 0 for {__name__="testmetric"} to have timestamp 300000, but it had timestamp 0 (result has 1 float point [3 @[0]] and 1 histogram point [{{}} @[300000]])`, }, "instant query with expected scalar result": { input: ` From 9b451abec735cbab73627f2c56689e8a8f4faa41 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 4 Sep 2024 16:08:05 +1000 Subject: [PATCH 0415/1397] Make positive and negative bucket counts different in existing test cases Signed-off-by: Charles Korn --- promql/promqltest/testdata/native_histograms.test | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 71e102dcee..4f60d2cbd8 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -720,27 +720,27 @@ eval instant at 10m histogram_sum(scalar(histogram_fraction(-Inf, +Inf, sum(hist # Apply multiplication and division operator to histogram. load 10m - histogram_mul_div {{schema:0 count:21 sum:33 z_bucket:3 z_bucket_w:0.001 buckets:[3 3 3] n_buckets:[3 3 3]}}x1 + histogram_mul_div {{schema:0 count:30 sum:33 z_bucket:3 z_bucket_w:0.001 buckets:[3 3 3] n_buckets:[6 6 6]}}x1 float_series_3 3+0x1 float_series_0 0+0x1 eval instant at 10m histogram_mul_div*3 - {} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}} + {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m 3*histogram_mul_div - {} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}} + {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m histogram_mul_div*float_series_3 - {} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}} + {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m float_series_3*histogram_mul_div - {} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}} + {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m histogram_mul_div/3 - {} {{schema:0 count:7 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[1 1 1]}} + {} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}} eval instant at 10m histogram_mul_div/float_series_3 - {} {{schema:0 count:7 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[1 1 1]}} + {} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}} eval instant at 10m histogram_mul_div*0 {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} From 113de6301c41c64819e8eeddc8c1f862d720b7af Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 4 Sep 2024 16:20:28 +1000 Subject: [PATCH 0416/1397] Add failing test cases for unary negation and multiplication and division with negative scalars Signed-off-by: Charles Korn --- promql/promqltest/testdata/native_histograms.test | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 4f60d2cbd8..ee2ae7759c 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -727,6 +727,15 @@ load 10m eval instant at 10m histogram_mul_div*3 {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} +eval instant at 10m histogram_mul_div*-1 + {} {{schema:0 count:30 sum:-33 z_bucket:3 z_bucket_w:0.001 buckets:[6 6 6] n_buckets:[3 3 3]}} + +eval instant at 10m -histogram_mul_div + {} {{schema:0 count:30 sum:-33 z_bucket:3 z_bucket_w:0.001 buckets:[6 6 6] n_buckets:[3 3 3]}} + +eval instant at 10m histogram_mul_div*-3 + {} {{schema:0 count:90 sum:-99 z_bucket:9 z_bucket_w:0.001 buckets:[18 18 18] n_buckets:[9 9 9]}} + eval instant at 10m 3*histogram_mul_div {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} @@ -739,6 +748,9 @@ eval instant at 10m float_series_3*histogram_mul_div eval instant at 10m histogram_mul_div/3 {} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}} +eval instant at 10m histogram_mul_div/-3 + {} {{schema:0 count:10 sum:-11 z_bucket:1 z_bucket_w:0.001 buckets:[2 2 2] n_buckets:[1 1 1]}} + eval instant at 10m histogram_mul_div/float_series_3 {} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}} From 4da551578c691591d54bddf38fd8b1620c5faa73 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 4 Sep 2024 16:33:18 +1000 Subject: [PATCH 0417/1397] Fix test broken by inclusion of `counter_reset_hint` Signed-off-by: Charles Korn --- cmd/promtool/testdata/unittest.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/promtool/testdata/unittest.yml b/cmd/promtool/testdata/unittest.yml index ff511729ba..d6224d785f 100644 --- a/cmd/promtool/testdata/unittest.yml +++ b/cmd/promtool/testdata/unittest.yml @@ -69,13 +69,13 @@ tests: eval_time: 2m exp_samples: - labels: "test_histogram_repeat" - histogram: "{{count:2 sum:3 buckets:[2]}}" + histogram: "{{count:2 sum:3 counter_reset_hint:not_reset buckets:[2]}}" - expr: test_histogram_increase eval_time: 2m exp_samples: - labels: "test_histogram_increase" - histogram: "{{count:4 sum:5.6 buckets:[4]}}" + histogram: "{{count:4 sum:5.6 counter_reset_hint:not_reset buckets:[4]}}" # Ensure a value is stale as soon as it is marked as such. - expr: test_stale From f2064c7987d34d44c368b48968f845842dfd973d Mon Sep 17 00:00:00 2001 From: Joshua Hesketh Date: Wed, 4 Sep 2024 20:07:16 +1000 Subject: [PATCH 0418/1397] NH: Do not re-use spans between histograms (#14771) promql, tsdb (histograms): Do not re-use spans between histograms When multiple points exist with the same native histogram schemas they share their spans. This causes a problem when a native histogram (NH) schema is modified (for example, during a Sum) then the other NH's with the same spans are also modified. As such, we should create a new Span for each NH. This will ensure NH's interfaces are safe to use without considering the effect on other histograms. At the moment this doesn't present itself as a problem because in all aggregations and functions operating on native histograms they are copied by the promql query engine first. Signed-off-by: Joshua Hesketh --------- Signed-off-by: Joshua Hesketh --- tsdb/chunkenc/float_histogram.go | 17 ++++- tsdb/chunkenc/float_histogram_test.go | 51 +++++++++++++ tsdb/chunkenc/histogram.go | 21 +++++- tsdb/chunkenc/histogram_test.go | 102 ++++++++++++++++++++++++++ 4 files changed, 189 insertions(+), 2 deletions(-) diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index a5f123bc93..f18eb77dad 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -974,6 +974,7 @@ func (it *floatHistogramIterator) Reset(b []byte) { if it.atFloatHistogramCalled { it.atFloatHistogramCalled = false it.pBuckets, it.nBuckets = nil, nil + it.pSpans, it.nSpans = nil, nil } else { it.pBuckets, it.nBuckets = it.pBuckets[:0], it.nBuckets[:0] } @@ -1069,7 +1070,7 @@ func (it *floatHistogramIterator) Next() ValueType { // The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code, // so we don't need a separate single delta logic for the 2nd sample. - // Recycle bucket slices that have not been returned yet. Otherwise, copy them. + // Recycle bucket and span slices that have not been returned yet. Otherwise, copy them. // We can always recycle the slices for leading and trailing bits as they are // never returned to the caller. if it.atFloatHistogramCalled { @@ -1088,6 +1089,20 @@ func (it *floatHistogramIterator) Next() ValueType { } else { it.nBuckets = nil } + if len(it.pSpans) > 0 { + newSpans := make([]histogram.Span, len(it.pSpans)) + copy(newSpans, it.pSpans) + it.pSpans = newSpans + } else { + it.pSpans = nil + } + if len(it.nSpans) > 0 { + newSpans := make([]histogram.Span, len(it.nSpans)) + copy(newSpans, it.nSpans) + it.nSpans = newSpans + } else { + it.nSpans = nil + } } tDod, err := readVarbitInt(&it.br) diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go index 689696f5ae..6092c0f636 100644 --- a/tsdb/chunkenc/float_histogram_test.go +++ b/tsdb/chunkenc/float_histogram_test.go @@ -1306,3 +1306,54 @@ func TestFloatHistogramAppendOnlyErrors(t *testing.T) { require.EqualError(t, err, "float histogram counter reset") }) } + +func TestFloatHistogramUniqueSpansAfterNext(t *testing.T) { + // Create two histograms with the same schema and spans. + h1 := &histogram.FloatHistogram{ + Schema: 1, + ZeroThreshold: 1e-100, + Count: 10, + ZeroCount: 2, + Sum: 15.0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2, 3, 4}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 1}, + }, + NegativeBuckets: []float64{2}, + } + + h2 := h1.Copy() + + // Create a chunk and append both histograms. + c := NewFloatHistogramChunk() + app, err := c.Appender() + require.NoError(t, err) + + _, _, _, err = app.AppendFloatHistogram(nil, 0, h1, false) + require.NoError(t, err) + + _, _, _, err = app.AppendFloatHistogram(nil, 1, h2, false) + require.NoError(t, err) + + // Create an iterator and advance to the first histogram. + it := c.Iterator(nil) + require.Equal(t, ValFloatHistogram, it.Next()) + _, rh1 := it.AtFloatHistogram(nil) + + // Advance to the second histogram and retrieve it. + require.Equal(t, ValFloatHistogram, it.Next()) + _, rh2 := it.AtFloatHistogram(nil) + + require.Equal(t, rh1.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh1.NegativeSpans, h1.NegativeSpans, "Returned negative spans are as expected") + require.Equal(t, rh2.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh2.NegativeSpans, h1.NegativeSpans, "Returned negative spans are as expected") + + // Check that the spans for h1 and h2 are unique slices. + require.NotSame(t, &rh1.PositiveSpans[0], &rh2.PositiveSpans[0], "PositiveSpans should be unique between histograms") + require.NotSame(t, &rh1.NegativeSpans[0], &rh2.NegativeSpans[0], "NegativeSpans should be unique between histograms") +} diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index fafae48d3c..f8796d64ec 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -1073,6 +1073,7 @@ func (it *histogramIterator) Reset(b []byte) { if it.atHistogramCalled { it.atHistogramCalled = false it.pBuckets, it.nBuckets = nil, nil + it.pSpans, it.nSpans = nil, nil } else { it.pBuckets = it.pBuckets[:0] it.nBuckets = it.nBuckets[:0] @@ -1185,8 +1186,25 @@ func (it *histogramIterator) Next() ValueType { // The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code, // so we don't need a separate single delta logic for the 2nd sample. - // Recycle bucket slices that have not been returned yet. Otherwise, + // Recycle bucket and span slices that have not been returned yet. Otherwise, copy them. // copy them. + if it.atFloatHistogramCalled || it.atHistogramCalled { + if len(it.pSpans) > 0 { + newSpans := make([]histogram.Span, len(it.pSpans)) + copy(newSpans, it.pSpans) + it.pSpans = newSpans + } else { + it.pSpans = nil + } + if len(it.nSpans) > 0 { + newSpans := make([]histogram.Span, len(it.nSpans)) + copy(newSpans, it.nSpans) + it.nSpans = newSpans + } else { + it.nSpans = nil + } + } + if it.atHistogramCalled { it.atHistogramCalled = false if len(it.pBuckets) > 0 { @@ -1204,6 +1222,7 @@ func (it *histogramIterator) Next() ValueType { it.nBuckets = nil } } + // FloatBuckets are set from scratch, so simply create empty ones. if it.atFloatHistogramCalled { it.atFloatHistogramCalled = false diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index 59187ed17e..29b77b1583 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -1487,6 +1487,108 @@ func TestHistogramAppendOnlyErrors(t *testing.T) { }) } +func TestHistogramUniqueSpansAfterNext(t *testing.T) { + // Create two histograms with the same schema and spans. + h1 := &histogram.Histogram{ + Schema: 1, + ZeroThreshold: 1e-100, + Count: 10, + ZeroCount: 2, + Sum: 15.0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 2, 3, 4}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 1}, + }, + NegativeBuckets: []int64{2}, + } + + h2 := h1.Copy() + + // Create a chunk and append both histograms. + c := NewHistogramChunk() + app, err := c.Appender() + require.NoError(t, err) + + _, _, _, err = app.AppendHistogram(nil, 0, h1, false) + require.NoError(t, err) + + _, _, _, err = app.AppendHistogram(nil, 1, h2, false) + require.NoError(t, err) + + // Create an iterator and advance to the first histogram. + it := c.Iterator(nil) + require.Equal(t, ValHistogram, it.Next()) + _, rh1 := it.AtHistogram(nil) + + // Advance to the second histogram and retrieve it. + require.Equal(t, ValHistogram, it.Next()) + _, rh2 := it.AtHistogram(nil) + + require.Equal(t, rh1.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh1.NegativeSpans, h1.NegativeSpans, "Returned negative spans are as expected") + require.Equal(t, rh2.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh2.NegativeSpans, h1.NegativeSpans, "Returned negative spans are as expected") + + // Check that the spans for h1 and h2 are unique slices. + require.NotSame(t, &rh1.PositiveSpans[0], &rh2.PositiveSpans[0], "PositiveSpans should be unique between histograms") + require.NotSame(t, &rh1.NegativeSpans[0], &rh2.NegativeSpans[0], "NegativeSpans should be unique between histograms") +} + +func TestHistogramUniqueSpansAfterNextWithAtFloatHistogram(t *testing.T) { + // Create two histograms with the same schema and spans. + h1 := &histogram.Histogram{ + Schema: 1, + ZeroThreshold: 1e-100, + Count: 10, + ZeroCount: 2, + Sum: 15.0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 2, 3, 4}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 1}, + }, + NegativeBuckets: []int64{2}, + } + + h2 := h1.Copy() + + // Create a chunk and append both histograms. + c := NewHistogramChunk() + app, err := c.Appender() + require.NoError(t, err) + + _, _, _, err = app.AppendHistogram(nil, 0, h1, false) + require.NoError(t, err) + + _, _, _, err = app.AppendHistogram(nil, 1, h2, false) + require.NoError(t, err) + + // Create an iterator and advance to the first histogram. + it := c.Iterator(nil) + require.Equal(t, ValHistogram, it.Next()) + _, rh1 := it.AtFloatHistogram(nil) + + // Advance to the second histogram and retrieve it. + require.Equal(t, ValHistogram, it.Next()) + _, rh2 := it.AtFloatHistogram(nil) + + require.Equal(t, rh1.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh1.NegativeSpans, h1.NegativeSpans, "Returned negative spans are as expected") + require.Equal(t, rh2.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh2.NegativeSpans, h1.NegativeSpans, "Returned negative spans are as expected") + + // Check that the spans for h1 and h2 are unique slices. + require.NotSame(t, &rh1.PositiveSpans[0], &rh2.PositiveSpans[0], "PositiveSpans should be unique between histograms") + require.NotSame(t, &rh1.NegativeSpans[0], &rh2.NegativeSpans[0], "NegativeSpans should be unique between histograms") +} + func BenchmarkAppendable(b *testing.B) { // Create a histogram with a bunch of spans and buckets. const ( From f5971bf292aa40547e0ba1a718212493a76382b4 Mon Sep 17 00:00:00 2001 From: Antoine Pultier <45740+fungiboletus@users.noreply.github.com> Date: Wed, 4 Sep 2024 14:57:30 +0200 Subject: [PATCH 0419/1397] tsdb documenation: More details about chunks Signed-off-by: Antoine Pultier <45740+fungiboletus@users.noreply.github.com> --- tsdb/docs/format/chunks.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tsdb/docs/format/chunks.md b/tsdb/docs/format/chunks.md index 8318e0a540..8a35721358 100644 --- a/tsdb/docs/format/chunks.md +++ b/tsdb/docs/format/chunks.md @@ -29,15 +29,16 @@ in-file offset (lower 4 bytes) and segment sequence number (upper 4 bytes). # Chunk ``` -┌───────────────┬───────────────────┬──────────────┬────────────────┐ -│ len │ encoding <1 byte> │ data │ CRC32 <4 byte> │ -└───────────────┴───────────────────┴──────────────┴────────────────┘ +┌───────────────┬───────────────────┬──────────────┬─────────────────┐ +│ len │ encoding <1 byte> │ data │ CRC32C <4 byte> │ +└───────────────┴───────────────────┴──────────────┴─────────────────┘ ``` Notes: -* `` has 1 to 10 bytes. -* `encoding`: Currently either `XOR` or `histogram`. +* `` has 1 to 10 bytes, from [`encoding/binary`](https://go.dev/src/encoding/binary/varint.go). +* `encoding`: Currently either `XOR` (1), `histogram` (2), or `float histogram` (3). * `data`: See below for each encoding. +* `CRC32C`: CRC32 Castagnoli of `encoding` and `data`, serialised as an unsigned 32 bits big endian. ## XOR chunk data From f3f324be89125ca133cbfe84f91818a0d86c21a9 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 4 Sep 2024 17:18:41 +0200 Subject: [PATCH 0420/1397] Don't update pathPrefix in settings on every App render Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/App.tsx | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index 2ca61761fa..3197cc2c61 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -52,7 +52,7 @@ import TSDBStatusPage from "./pages/TSDBStatusPage"; import FlagsPage from "./pages/FlagsPage"; import ConfigPage from "./pages/ConfigPage"; import AgentPage from "./pages/AgentPage"; -import { Suspense } from "react"; +import { Suspense, useEffect } from "react"; import ErrorBoundary from "./components/ErrorBoundary"; import { ThemeSelector } from "./components/ThemeSelector"; import { Notifications } from "@mantine/notifications"; @@ -192,7 +192,10 @@ function App() { const pathPrefix = getPathPrefix(window.location.pathname); const dispatch = useAppDispatch(); - dispatch(updateSettings({ pathPrefix })); + + useEffect(() => { + dispatch(updateSettings({ pathPrefix })); + }, [pathPrefix]); const { agentMode } = useSettings(); From a99c01b53f085c7f7f91b24bd2371a6906aed8b2 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 4 Sep 2024 17:25:34 +0200 Subject: [PATCH 0421/1397] Implement query history Signed-off-by: Julius Volz --- .../src/pages/query/ExpressionInput.tsx | 18 ++++++++----- .../mantine-ui/src/pages/query/QueryPanel.tsx | 5 ++++ .../src/state/localStorageMiddleware.ts | 18 +++++++++++-- web/ui/mantine-ui/src/state/queryPageSlice.ts | 25 +++++++++++++++++-- 4 files changed, 56 insertions(+), 10 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx index b74d745963..7a6b1f8436 100644 --- a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx +++ b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx @@ -68,6 +68,7 @@ import { notifications } from "@mantine/notifications"; import { useSettings } from "../../state/settingsSlice"; import MetricsExplorer from "./MetricsExplorer/MetricsExplorer"; import ErrorBoundary from "../../components/ErrorBoundary"; +import { useAppSelector } from "../../state/hooks"; const promqlExtension = new PromQLExtension(); @@ -127,11 +128,13 @@ const ExpressionInput: FC = ({ removePanel, }) => { const theme = useComputedColorScheme(); + const { queryHistory } = useAppSelector((state) => state.queryPage); const { pathPrefix, enableAutocomplete, enableSyntaxHighlighting, enableLinter, + enableQueryHistory, } = useSettings(); const [expr, setExpr] = useState(initialExpr); useEffect(() => { @@ -175,10 +178,6 @@ const ExpressionInput: FC = ({ const [showMetricsExplorer, setShowMetricsExplorer] = useState(false); - // TODO: Implement query history. - // This is just a placeholder until query history is implemented, so disable the linter warning. - const queryHistory = useMemo(() => [], []); - // (Re)initialize editor based on settings / setting changes. useEffect(() => { // Build the dynamic part of the config. @@ -193,10 +192,17 @@ const ExpressionInput: FC = ({ cache: { initialMetricList: metricNames }, }, }), - queryHistory + enableQueryHistory ? queryHistory : [] ), }); - }, [pathPrefix, metricNames, enableAutocomplete, enableLinter, queryHistory]); // TODO: Make this depend on external settings changes, maybe use dynamic config compartment again. + }, [ + pathPrefix, + metricNames, + enableAutocomplete, + enableLinter, + enableQueryHistory, + queryHistory, + ]); // TODO: Make this depend on external settings changes, maybe use dynamic config compartment again. return ( diff --git a/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx b/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx index 4918e87fea..3b693e19b3 100644 --- a/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx +++ b/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx @@ -17,6 +17,7 @@ import { import { FC, useCallback, useState } from "react"; import { useAppDispatch, useAppSelector } from "../../state/hooks"; import { + addQueryToHistory, GraphDisplayMode, GraphResolution, removePanel, @@ -73,6 +74,10 @@ const QueryPanel: FC = ({ idx, metricNames }) => { executeQuery={(expr: string) => { setRetriggerIdx((idx) => idx + 1); dispatch(setExpr({ idx, expr })); + + if (!metricNames.includes(expr) && expr.trim() !== "") { + dispatch(addQueryToHistory(expr)); + } }} removePanel={() => { dispatch(removePanel(idx)); diff --git a/web/ui/mantine-ui/src/state/localStorageMiddleware.ts b/web/ui/mantine-ui/src/state/localStorageMiddleware.ts index 64ddfafe44..93eae950ea 100644 --- a/web/ui/mantine-ui/src/state/localStorageMiddleware.ts +++ b/web/ui/mantine-ui/src/state/localStorageMiddleware.ts @@ -6,9 +6,13 @@ import { } from "./targetsPageSlice"; import { localStorageKeyCollapsedPools as localStorageKeyServiceDiscoveryPageCollapsedPools, - setCollapsedPools as ServiceDiscoveryPageSetCollapsedPools, + setCollapsedPools as serviceDiscoveryPageSetCollapsedPools, } from "./serviceDiscoveryPageSlice"; import { updateSettings } from "./settingsSlice"; +import { + addQueryToHistory, + localStorageKeyQueryHistory, +} from "./queryPageSlice"; const persistToLocalStorage = (key: string, value: T) => { localStorage.setItem(key, JSON.stringify(value)); @@ -29,7 +33,7 @@ startAppListening({ }); startAppListening({ - actionCreator: ServiceDiscoveryPageSetCollapsedPools, + actionCreator: serviceDiscoveryPageSetCollapsedPools, effect: ({ payload }) => { persistToLocalStorage( localStorageKeyServiceDiscoveryPageCollapsedPools, @@ -38,6 +42,16 @@ startAppListening({ }, }); +startAppListening({ + actionCreator: addQueryToHistory, + effect: (_, { getState }) => { + persistToLocalStorage( + localStorageKeyQueryHistory, + getState().queryPage.queryHistory + ); + }, +}); + startAppListening({ actionCreator: updateSettings, effect: ({ payload }) => { diff --git a/web/ui/mantine-ui/src/state/queryPageSlice.ts b/web/ui/mantine-ui/src/state/queryPageSlice.ts index 0de75807f8..4f58d549d3 100644 --- a/web/ui/mantine-ui/src/state/queryPageSlice.ts +++ b/web/ui/mantine-ui/src/state/queryPageSlice.ts @@ -1,6 +1,9 @@ import { randomId } from "@mantine/hooks"; import { PayloadAction, createSlice } from "@reduxjs/toolkit"; import { encodePanelOptionsToURLParams } from "../pages/query/urlStateEncoding"; +import { initializeFromLocalStorage } from "./initializeFromLocalStorage"; + +export const localStorageKeyQueryHistory = "queryPage.queryHistory"; export enum GraphDisplayMode { Lines = "lines", @@ -68,6 +71,7 @@ export type Panel = { interface QueryPageState { panels: Panel[]; + queryHistory: string[]; } export const newDefaultPanel = (): Panel => ({ @@ -87,6 +91,10 @@ export const newDefaultPanel = (): Panel => ({ const initialState: QueryPageState = { panels: [newDefaultPanel()], + queryHistory: initializeFromLocalStorage( + localStorageKeyQueryHistory, + [] + ), }; const updateURL = (panels: Panel[]) => { @@ -116,6 +124,12 @@ export const queryPageSlice = createSlice({ state.panels[payload.idx].expr = payload.expr; updateURL(state.panels); }, + addQueryToHistory: (state, { payload: query }: PayloadAction) => { + state.queryHistory = [ + query, + ...state.queryHistory.filter((q) => q !== query), + ].slice(0, 50); + }, setVisualizer: ( state, { payload }: PayloadAction<{ idx: number; visualizer: Visualizer }> @@ -126,7 +140,14 @@ export const queryPageSlice = createSlice({ }, }); -export const { setPanels, addPanel, removePanel, setExpr, setVisualizer } = - queryPageSlice.actions; +export const { + setPanels, + addPanel, + removePanel, + setExpr, + addQueryToHistory, + setShowTree, + setVisualizer, +} = queryPageSlice.actions; export default queryPageSlice.reducer; From fef4c15283651319983c2801c9ca0fc6d4dd22d8 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 4 Sep 2024 17:34:35 +0200 Subject: [PATCH 0422/1397] Remove commented-out line Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/components/SettingsMenu.tsx | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/src/components/SettingsMenu.tsx b/web/ui/mantine-ui/src/components/SettingsMenu.tsx index 7552693d78..0d004bd4b2 100644 --- a/web/ui/mantine-ui/src/components/SettingsMenu.tsx +++ b/web/ui/mantine-ui/src/components/SettingsMenu.tsx @@ -18,12 +18,7 @@ const SettingsMenu: FC = () => { return ( - + From e5e212dad2cbf2820c475106c04c18e13838bddc Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 4 Sep 2024 17:35:17 +0200 Subject: [PATCH 0423/1397] Remove wrong query key for format_query Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx | 1 - 1 file changed, 1 deletion(-) diff --git a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx index 7a6b1f8436..fca715dca9 100644 --- a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx +++ b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx @@ -147,7 +147,6 @@ const ExpressionInput: FC = ({ isFetching: isFormatting, refetch: formatQuery, } = useAPIQuery({ - key: [expr], path: "/format_query", params: { query: expr, From c0ba4ee2bb865e179eee00d7179bbb1d23d49792 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 4 Sep 2024 17:42:15 +0200 Subject: [PATCH 0424/1397] Add beginnings of a PromLens-style tree view Signed-off-by: Julius Volz --- web/api/v1/api.go | 12 + web/api/v1/parse_expr.go | 153 ++++++++ web/ui/mantine-ui/package.json | 1 + .../src/pages/query/ExpressionInput.tsx | 13 + .../mantine-ui/src/pages/query/QueryPanel.tsx | 56 ++- .../mantine-ui/src/pages/query/TableTab.tsx | 5 +- .../src/pages/query/TreeNode.module.css | 36 ++ .../mantine-ui/src/pages/query/TreeNode.tsx | 354 ++++++++++++++++++ .../mantine-ui/src/pages/query/TreeView.tsx | 45 +++ .../src/pages/query/urlStateEncoding.ts | 4 + web/ui/mantine-ui/src/promql.css | 2 +- web/ui/mantine-ui/src/state/queryPageSlice.ts | 11 +- web/ui/package-lock.json | 1 + 13 files changed, 684 insertions(+), 9 deletions(-) create mode 100644 web/api/v1/parse_expr.go create mode 100644 web/ui/mantine-ui/src/pages/query/TreeNode.module.css create mode 100644 web/ui/mantine-ui/src/pages/query/TreeNode.tsx create mode 100644 web/ui/mantine-ui/src/pages/query/TreeView.tsx diff --git a/web/api/v1/api.go b/web/api/v1/api.go index d58be211f2..0ec8467faa 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -366,6 +366,9 @@ func (api *API) Register(r *route.Router) { r.Get("/format_query", wrapAgent(api.formatQuery)) r.Post("/format_query", wrapAgent(api.formatQuery)) + r.Get("/parse_query", wrapAgent(api.parseQuery)) + r.Post("/parse_query", wrapAgent(api.parseQuery)) + r.Get("/labels", wrapAgent(api.labelNames)) r.Post("/labels", wrapAgent(api.labelNames)) r.Get("/label/:name/values", wrapAgent(api.labelValues)) @@ -485,6 +488,15 @@ func (api *API) formatQuery(r *http.Request) (result apiFuncResult) { return apiFuncResult{expr.Pretty(0), nil, nil, nil} } +func (api *API) parseQuery(r *http.Request) apiFuncResult { + expr, err := parser.ParseExpr(r.FormValue("query")) + if err != nil { + return invalidParamError(err, "query") + } + + return apiFuncResult{data: translateAST(expr), err: nil, warnings: nil, finalizer: nil} +} + func extractQueryOpts(r *http.Request) (promql.QueryOpts, error) { var duration time.Duration diff --git a/web/api/v1/parse_expr.go b/web/api/v1/parse_expr.go new file mode 100644 index 0000000000..ed14b6829a --- /dev/null +++ b/web/api/v1/parse_expr.go @@ -0,0 +1,153 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "strconv" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" +) + +func getStartOrEnd(startOrEnd parser.ItemType) interface{} { + if startOrEnd == 0 { + return nil + } + + return startOrEnd.String() +} + +func translateAST(node parser.Expr) interface{} { + if node == nil { + return nil + } + + switch n := node.(type) { + case *parser.AggregateExpr: + return map[string]interface{}{ + "type": "aggregation", + "op": n.Op.String(), + "expr": translateAST(n.Expr), + "param": translateAST(n.Param), + "grouping": sanitizeList(n.Grouping), + "without": n.Without, + } + case *parser.BinaryExpr: + var matching interface{} + if m := n.VectorMatching; m != nil { + matching = map[string]interface{}{ + "card": m.Card.String(), + "labels": sanitizeList(m.MatchingLabels), + "on": m.On, + "include": sanitizeList(m.Include), + } + } + + return map[string]interface{}{ + "type": "binaryExpr", + "op": n.Op.String(), + "lhs": translateAST(n.LHS), + "rhs": translateAST(n.RHS), + "matching": matching, + "bool": n.ReturnBool, + } + case *parser.Call: + args := []interface{}{} + for _, arg := range n.Args { + args = append(args, translateAST(arg)) + } + + return map[string]interface{}{ + "type": "call", + "func": map[string]interface{}{ + "name": n.Func.Name, + "argTypes": n.Func.ArgTypes, + "variadic": n.Func.Variadic, + "returnType": n.Func.ReturnType, + }, + "args": args, + } + case *parser.MatrixSelector: + vs := n.VectorSelector.(*parser.VectorSelector) + return map[string]interface{}{ + "type": "matrixSelector", + "name": vs.Name, + "range": n.Range.Milliseconds(), + "offset": vs.OriginalOffset.Milliseconds(), + "matchers": translateMatchers(vs.LabelMatchers), + "timestamp": vs.Timestamp, + "startOrEnd": getStartOrEnd(vs.StartOrEnd), + } + case *parser.SubqueryExpr: + return map[string]interface{}{ + "type": "subquery", + "expr": translateAST(n.Expr), + "range": n.Range.Milliseconds(), + "offset": n.OriginalOffset.Milliseconds(), + "step": n.Step.Milliseconds(), + "timestamp": n.Timestamp, + "startOrEnd": getStartOrEnd(n.StartOrEnd), + } + case *parser.NumberLiteral: + return map[string]string{ + "type": "numberLiteral", + "val": strconv.FormatFloat(n.Val, 'f', -1, 64), + } + case *parser.ParenExpr: + return map[string]interface{}{ + "type": "parenExpr", + "expr": translateAST(n.Expr), + } + case *parser.StringLiteral: + return map[string]interface{}{ + "type": "stringLiteral", + "val": n.Val, + } + case *parser.UnaryExpr: + return map[string]interface{}{ + "type": "unaryExpr", + "op": n.Op.String(), + "expr": translateAST(n.Expr), + } + case *parser.VectorSelector: + return map[string]interface{}{ + "type": "vectorSelector", + "name": n.Name, + "offset": n.OriginalOffset.Milliseconds(), + "matchers": translateMatchers(n.LabelMatchers), + "timestamp": n.Timestamp, + "startOrEnd": getStartOrEnd(n.StartOrEnd), + } + } + panic("unsupported node type") +} + +func sanitizeList(l []string) []string { + if l == nil { + return []string{} + } + return l +} + +func translateMatchers(in []*labels.Matcher) interface{} { + out := []map[string]interface{}{} + for _, m := range in { + out = append(out, map[string]interface{}{ + "name": m.Name, + "value": m.Value, + "type": m.Type.String(), + }) + } + return out +} diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index c4afcdc927..bb7e5534ac 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -35,6 +35,7 @@ "@types/lodash": "^4.17.7", "@types/sanitize-html": "^2.13.0", "@uiw/react-codemirror": "^4.21.22", + "clsx": "^2.1.1", "dayjs": "^1.11.10", "lodash": "^4.17.21", "react": "^18.3.1", diff --git a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx index fca715dca9..75a578f918 100644 --- a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx +++ b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx @@ -58,6 +58,7 @@ import { highlightSelectionMatches } from "@codemirror/search"; import { lintKeymap } from "@codemirror/lint"; import { IconAlignJustified, + IconBinaryTree, IconDotsVertical, IconSearch, IconTerminal, @@ -118,6 +119,8 @@ interface ExpressionInputProps { initialExpr: string; metricNames: string[]; executeQuery: (expr: string) => void; + treeShown: boolean; + setShowTree: (showTree: boolean) => void; removePanel: () => void; } @@ -126,6 +129,8 @@ const ExpressionInput: FC = ({ metricNames, executeQuery, removePanel, + treeShown, + setShowTree, }) => { const theme = useComputedColorScheme(); const { queryHistory } = useAppSelector((state) => state.queryPage); @@ -245,6 +250,14 @@ const ExpressionInput: FC = ({ > Format expression + + } + onClick={() => setShowTree(!treeShown)} + > + {treeShown ? "Hide" : "Show"} tree view + = ({ idx, metricNames }) => { const panel = useAppSelector((state) => state.queryPage.panels[idx]); const dispatch = useAppDispatch(); + const [selectedNode, setSelectedNode] = useState<{ + id: string; + node: ASTNode; + } | null>(null); + + const expr = useMemo( + () => + selectedNode !== null ? serializeNode(selectedNode.node) : panel.expr, + [selectedNode, panel.expr] + ); + const onSelectRange = useCallback( (start: number, end: number) => dispatch( @@ -69,6 +88,8 @@ const QueryPanel: FC = ({ idx, metricNames }) => { return ( { @@ -79,10 +100,37 @@ const QueryPanel: FC = ({ idx, metricNames }) => { dispatch(addQueryToHistory(expr)); } }} + treeShown={panel.showTree} + setShowTree={(showTree: boolean) => { + dispatch(setShowTree({ idx, showTree })); + if (!showTree) { + setSelectedNode(null); + } + }} removePanel={() => { dispatch(removePanel(idx)); }} /> + {panel.expr.trim() !== "" && panel.showTree && ( + + + {Array.from(Array(20), (_, i) => ( + + ))} + + } + > + + + + )} @@ -107,7 +155,7 @@ const QueryPanel: FC = ({ idx, metricNames }) => { - + = ({ idx, metricNames }) => { = ({ panelIdx, retriggerIdx }) => { +const TableTab: FC = ({ panelIdx, retriggerIdx, expr }) => { const [responseTime, setResponseTime] = useState(0); const [limitResults, setLimitResults] = useState(true); - const { expr, visualizer } = useAppSelector( + const { visualizer } = useAppSelector( (state) => state.queryPage.panels[panelIdx] ); const dispatch = useAppDispatch(); diff --git a/web/ui/mantine-ui/src/pages/query/TreeNode.module.css b/web/ui/mantine-ui/src/pages/query/TreeNode.module.css new file mode 100644 index 0000000000..da3b8436b4 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/query/TreeNode.module.css @@ -0,0 +1,36 @@ +.nodeText { + cursor: pointer; + background-color: light-dark( + var(--mantine-color-gray-1), + var(--mantine-color-dark-5) + ); + border: 2px solid transparent; +} + +.nodeText.nodeTextSelected, +.nodeText.nodeTextSelected:hover { + background-color: light-dark( + var(--mantine-color-gray-3), + var(--mantine-color-dark-3) + ); + border: 2px solid + light-dark(var(--mantine-color-gray-5), var(--mantine-color-dark-2)); +} + +.nodeText:hover { + background-color: light-dark( + var(--mantine-color-gray-2), + var(--mantine-color-dark-4) + ); +} + +.nodeText.nodeTextError { + background-color: light-dark( + var(--mantine-color-red-1), + darken(var(--mantine-color-red-5), 70%) + ); +} + +.errorText { + color: light-dark(var(--mantine-color-red-9), var(--mantine-color-red-3)); +} diff --git a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx new file mode 100644 index 0000000000..087f58a917 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx @@ -0,0 +1,354 @@ +import { + FC, + useEffect, + useLayoutEffect, + useMemo, + useRef, + useState, +} from "react"; +import ASTNode, { nodeType } from "../../promql/ast"; +import { getNodeChildren } from "../../promql/utils"; +import { formatNode } from "../../promql/format"; +import { Box, CSSProperties, Group, Loader, Text } from "@mantine/core"; +import { useAPIQuery } from "../../api/api"; +import { + InstantQueryResult, + InstantSample, + RangeSamples, +} from "../../api/responseTypes/query"; +import serializeNode from "../../promql/serialize"; +import { + IconAlertTriangle, + IconPoint, + IconPointFilled, +} from "@tabler/icons-react"; +import classes from "./TreeNode.module.css"; +import clsx from "clsx"; +import { useId } from "@mantine/hooks"; + +const nodeIndent = 20; + +type NodeState = "waiting" | "running" | "error" | "success"; + +const mergeChildStates = (states: NodeState[]): NodeState => { + if (states.includes("error")) { + return "error"; + } + if (states.includes("waiting")) { + return "waiting"; + } + if (states.includes("running")) { + return "running"; + } + + return "success"; +}; + +const TreeNode: FC<{ + node: ASTNode; + selectedNode: { id: string; node: ASTNode } | null; + setSelectedNode: (Node: { id: string; node: ASTNode } | null) => void; + parentRef?: React.RefObject; + reportNodeState?: (state: NodeState) => void; + reverse: boolean; +}> = ({ + node, + selectedNode, + setSelectedNode, + parentRef, + reportNodeState, + reverse, +}) => { + const nodeID = useId(); + const nodeRef = useRef(null); + const [connectorStyle, setConnectorStyle] = useState({ + borderColor: + "light-dark(var(--mantine-color-gray-4), var(--mantine-color-dark-3))", + borderLeftStyle: "solid", + borderLeftWidth: 2, + width: nodeIndent - 7, + left: -nodeIndent + 7, + }); + const [responseTime, setResponseTime] = useState(0); + const [resultStats, setResultStats] = useState<{ + numSeries: number; + labelCardinalities: Record; + labelExamples: Record; + }>({ + numSeries: 0, + labelCardinalities: {}, + labelExamples: {}, + }); + + const children = getNodeChildren(node); + + const [childStates, setChildStates] = useState( + children.map(() => "waiting") + ); + const mergedChildState = useMemo( + () => mergeChildStates(childStates), + [childStates] + ); + + const { data, error, isFetching, refetch } = useAPIQuery({ + key: [useId()], + path: "/query", + params: { + query: serializeNode(node), + }, + recordResponseTime: setResponseTime, + enabled: mergedChildState === "success", + }); + + useEffect(() => { + if (mergedChildState === "error") { + reportNodeState && reportNodeState("error"); + } + }, [mergedChildState, reportNodeState]); + + useEffect(() => { + if (error) { + reportNodeState && reportNodeState("error"); + } + }, [error]); + + useEffect(() => { + if (isFetching) { + reportNodeState && reportNodeState("running"); + } + }, [isFetching]); + + // Update the size and position of tree connector lines based on the node's and its parent's position. + useLayoutEffect(() => { + if (parentRef === undefined) { + // We're the root node. + return; + } + + if (parentRef.current === null || nodeRef.current === null) { + return; + } + const parentRect = parentRef.current.getBoundingClientRect(); + const nodeRect = nodeRef.current.getBoundingClientRect(); + if (reverse) { + setConnectorStyle((prevStyle) => ({ + ...prevStyle, + top: "calc(50% - 1px)", + bottom: nodeRect.bottom - parentRect.top, + borderTopLeftRadius: 3, + borderTopStyle: "solid", + borderBottomLeftRadius: undefined, + })); + } else { + setConnectorStyle((prevStyle) => ({ + ...prevStyle, + top: parentRect.bottom - nodeRect.top, + bottom: "calc(50% - 1px)", + borderBottomLeftRadius: 3, + borderBottomStyle: "solid", + borderTopLeftRadius: undefined, + })); + } + }, [parentRef, reverse, nodeRef]); + + // Update the node info state based on the query result. + useEffect(() => { + if (!data) { + return; + } + + reportNodeState && reportNodeState("success"); + + let resultSeries = 0; + const labelValuesByName: Record> = {}; + const { resultType, result } = data.data; + + if (resultType === "scalar" || resultType === "string") { + resultSeries = 1; + } else if (result && result.length > 0) { + resultSeries = result.length; + result.forEach((s: InstantSample | RangeSamples) => { + Object.entries(s.metric).forEach(([ln, lv]) => { + // TODO: If we ever want to include __name__ here again, we cannot use the + // count_over_time(foo[7d]) optimization since that removes the metric name. + if (ln !== "__name__") { + if (!labelValuesByName.hasOwnProperty(ln)) { + labelValuesByName[ln] = { [lv]: 1 }; + } else { + if (!labelValuesByName[ln].hasOwnProperty(lv)) { + labelValuesByName[ln][lv] = 1; + } else { + labelValuesByName[ln][lv]++; + } + } + } + }); + }); + } + + const labelCardinalities: Record = {}; + const labelExamples: Record = + {}; + Object.entries(labelValuesByName).forEach(([ln, lvs]) => { + labelCardinalities[ln] = Object.keys(lvs).length; + // Sort label values by their number of occurrences within this label name. + labelExamples[ln] = Object.entries(lvs) + .sort(([, aCnt], [, bCnt]) => bCnt - aCnt) + .slice(0, 5) + .map(([lv, cnt]) => ({ value: lv, count: cnt })); + }); + + setResultStats({ + numSeries: resultSeries, + labelCardinalities, + labelExamples, + }); + }, [data]); + + const innerNode = ( + + {parentRef && ( + // Connector line between this node and its parent. + + )} + {/* The node itself. */} + { + if (selectedNode?.id === nodeID) { + setSelectedNode(null); + } else { + setSelectedNode({ id: nodeID, node: node }); + } + }} + > + {formatNode(node, false, 1)} + + {mergedChildState === "waiting" ? ( + + + + ) : mergedChildState === "running" ? ( + + ) : mergedChildState === "error" ? ( + + Blocked on child query error + + ) : isFetching ? ( + + ) : error ? ( + + + + Error executing query: {error.message} + + + ) : ( + + {resultStats.numSeries} result{resultStats.numSeries !== 1 && "s"} –{" "} + {responseTime}ms + {/* {resultStats.numSeries > 0 && ( + <> + {labelNames.length > 0 ? ( + labelNames.map((v, idx) => ( + + {idx !== 0 && ", "} + {v} + + )) + ) : ( + <>no labels + )} + + )} */} + + )} + + ); + + if (node.type === nodeType.binaryExpr) { + return ( +
+ + { + setChildStates((prev) => { + const newStates = [...prev]; + newStates[0] = state; + return newStates; + }); + }} + /> + + {innerNode} + + { + setChildStates((prev) => { + const newStates = [...prev]; + newStates[1] = state; + return newStates; + }); + }} + /> + +
+ ); + } else { + return ( +
+ {innerNode} + {children.map((child, idx) => ( + + { + setChildStates((prev) => { + const newStates = [...prev]; + newStates[idx] = state; + return newStates; + }); + }} + /> + + ))} +
+ ); + } +}; + +export default TreeNode; diff --git a/web/ui/mantine-ui/src/pages/query/TreeView.tsx b/web/ui/mantine-ui/src/pages/query/TreeView.tsx new file mode 100644 index 0000000000..900acaca52 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/query/TreeView.tsx @@ -0,0 +1,45 @@ +import { FC, useState } from "react"; +import { useSuspenseAPIQuery } from "../../api/api"; +import { useAppSelector } from "../../state/hooks"; +import ASTNode from "../../promql/ast"; +import TreeNode from "./TreeNode"; +import { Box } from "@mantine/core"; + +const TreeView: FC<{ + panelIdx: number; + // TODO: Do we need retriggerIdx for the tree view AST parsing? Maybe for children! + retriggerIdx: number; + selectedNode: { + id: string; + node: ASTNode; + } | null; + setSelectedNode: ( + node: { + id: string; + node: ASTNode; + } | null + ) => void; +}> = ({ panelIdx, selectedNode, setSelectedNode }) => { + const { expr } = useAppSelector((state) => state.queryPage.panels[panelIdx]); + + const { data } = useSuspenseAPIQuery({ + path: "/parse_query", + params: { + query: expr, + }, + enabled: expr !== "", + }); + + return ( + + + + ); +}; + +export default TreeView; diff --git a/web/ui/mantine-ui/src/pages/query/urlStateEncoding.ts b/web/ui/mantine-ui/src/pages/query/urlStateEncoding.ts index 5d07bde201..86b85b3c8a 100644 --- a/web/ui/mantine-ui/src/pages/query/urlStateEncoding.ts +++ b/web/ui/mantine-ui/src/pages/query/urlStateEncoding.ts @@ -35,6 +35,9 @@ export const decodePanelOptionsFromURLParams = (query: string): Panel[] => { decodeSetting("expr", (value) => { panel.expr = value; }); + decodeSetting("show_tree", (value) => { + panel.showTree = value === "1"; + }); decodeSetting("tab", (value) => { panel.visualizer.activeTab = value === "0" ? "graph" : "table"; }); @@ -121,6 +124,7 @@ export const encodePanelOptionsToURLParams = ( panels.forEach((p, idx) => { addParam(idx, "expr", p.expr); + addParam(idx, "show_tree", p.showTree ? "1" : "0"); addParam(idx, "tab", p.visualizer.activeTab === "graph" ? "0" : "1"); if (p.visualizer.endTime !== null) { addParam(idx, "end_input", formatTime(p.visualizer.endTime)); diff --git a/web/ui/mantine-ui/src/promql.css b/web/ui/mantine-ui/src/promql.css index 58dcaf3d3e..5337590bb0 100644 --- a/web/ui/mantine-ui/src/promql.css +++ b/web/ui/mantine-ui/src/promql.css @@ -7,7 +7,7 @@ } .promql-metric-name { - color: light-dark(#000, #fff); + /* color: var(--input-color); */ } .promql-label-name { diff --git a/web/ui/mantine-ui/src/state/queryPageSlice.ts b/web/ui/mantine-ui/src/state/queryPageSlice.ts index 4f58d549d3..253b3ee9c6 100644 --- a/web/ui/mantine-ui/src/state/queryPageSlice.ts +++ b/web/ui/mantine-ui/src/state/queryPageSlice.ts @@ -64,7 +64,7 @@ export type Panel = { // The id is helpful as a stable key for React. id: string; expr: string; - exprStale: boolean; + showTree: boolean; showMetricsExplorer: boolean; visualizer: Visualizer; }; @@ -77,7 +77,7 @@ interface QueryPageState { export const newDefaultPanel = (): Panel => ({ id: randomId(), expr: "", - exprStale: false, + showTree: false, showMetricsExplorer: false, visualizer: { activeTab: "table", @@ -130,6 +130,13 @@ export const queryPageSlice = createSlice({ ...state.queryHistory.filter((q) => q !== query), ].slice(0, 50); }, + setShowTree: ( + state, + { payload }: PayloadAction<{ idx: number; showTree: boolean }> + ) => { + state.panels[payload.idx].showTree = payload.showTree; + updateURL(state.panels); + }, setVisualizer: ( state, { payload }: PayloadAction<{ idx: number; visualizer: Visualizer }> diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 27268dcd2b..26cadf8bcd 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -48,6 +48,7 @@ "@types/lodash": "^4.17.7", "@types/sanitize-html": "^2.13.0", "@uiw/react-codemirror": "^4.21.22", + "clsx": "^2.1.1", "dayjs": "^1.11.10", "lodash": "^4.17.21", "react": "^18.3.1", From 88519152e588a65364a32c8f5c9753724d76e210 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 4 Sep 2024 17:44:54 +0200 Subject: [PATCH 0425/1397] Minor cleanups Signed-off-by: Julius Volz --- .../src/pages/query/MetricsExplorer/MetricsExplorer.tsx | 1 - web/ui/mantine-ui/src/promql.css | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/MetricsExplorer/MetricsExplorer.tsx b/web/ui/mantine-ui/src/pages/query/MetricsExplorer/MetricsExplorer.tsx index 3c56256b57..091b3cb746 100644 --- a/web/ui/mantine-ui/src/pages/query/MetricsExplorer/MetricsExplorer.tsx +++ b/web/ui/mantine-ui/src/pages/query/MetricsExplorer/MetricsExplorer.tsx @@ -38,7 +38,6 @@ const MetricsExplorer: FC = ({ insertText, close, }) => { - console.log("metricNames"); // Fetch the alerting rules data. const { data } = useSuspenseAPIQuery({ path: `/metadata`, diff --git a/web/ui/mantine-ui/src/promql.css b/web/ui/mantine-ui/src/promql.css index 5337590bb0..4f64aeb6c1 100644 --- a/web/ui/mantine-ui/src/promql.css +++ b/web/ui/mantine-ui/src/promql.css @@ -7,7 +7,7 @@ } .promql-metric-name { - /* color: var(--input-color); */ + /* Should already inherit the right color from the theme. */ } .promql-label-name { From 7103b35568bb7046f162c30fcd3e8a8f4f54c532 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 4 Sep 2024 17:45:31 +0200 Subject: [PATCH 0426/1397] Add "Show exemplars" button (doesn't do anything yet) Signed-off-by: Julius Volz --- .../mantine-ui/src/pages/query/QueryPanel.tsx | 158 +++++++++--------- 1 file changed, 82 insertions(+), 76 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx b/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx index f5e2bffbfb..628183bd6a 100644 --- a/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx +++ b/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx @@ -205,82 +205,88 @@ const QueryPanel: FC = ({ idx, metricNames }) => { />
- - dispatch( - setVisualizer({ - idx, - visualizer: { - ...panel.visualizer, - displayMode: value as GraphDisplayMode, - }, - }) - ) - } - value={panel.visualizer.displayMode} - data={[ - { - value: GraphDisplayMode.Lines, - label: ( -
- - Unstacked -
- ), - }, - { - value: GraphDisplayMode.Stacked, - label: ( -
- - Stacked -
- ), - }, - { - value: GraphDisplayMode.Heatmap, - label: ( -
- - Heatmap -
- ), - }, - ]} - /> - {/* */} - {/* - dispatch( - setVisualizer({ - idx, - visualizer: { - ...panel.visualizer, - showExemplars: event.currentTarget.checked, - }, - }) - ) - } - color={"rgba(34,139,230,.1)"} - size="md" - label="Show exemplars" - thumbIcon={ - panel.visualizer.showExemplars ? ( - - ) : ( - - ) - } - /> */} + + + + + dispatch( + setVisualizer({ + idx, + visualizer: { + ...panel.visualizer, + displayMode: value as GraphDisplayMode, + }, + }) + ) + } + value={panel.visualizer.displayMode} + data={[ + { + value: GraphDisplayMode.Lines, + label: ( +
+ + Unstacked +
+ ), + }, + { + value: GraphDisplayMode.Stacked, + label: ( +
+ + Stacked +
+ ), + }, + // { + // value: GraphDisplayMode.Heatmap, + // label: ( + //
+ // + // Heatmap + //
+ // ), + // }, + ]} + /> +
Date: Wed, 4 Sep 2024 17:49:08 +0200 Subject: [PATCH 0427/1397] Fix lint errors by removing unused imports and vars Signed-off-by: Julius Volz --- .../src/pages/AlertmanagerDiscoveryPage.tsx | 39 +------------------ .../src/pages/query/ExpressionInput.tsx | 2 +- .../mantine-ui/src/pages/query/TreeNode.tsx | 8 +--- .../mantine-ui/src/pages/query/TreeView.tsx | 2 +- 4 files changed, 6 insertions(+), 45 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/AlertmanagerDiscoveryPage.tsx b/web/ui/mantine-ui/src/pages/AlertmanagerDiscoveryPage.tsx index 8fd12511ee..2ea248117b 100644 --- a/web/ui/mantine-ui/src/pages/AlertmanagerDiscoveryPage.tsx +++ b/web/ui/mantine-ui/src/pages/AlertmanagerDiscoveryPage.tsx @@ -1,42 +1,7 @@ -import { - ActionIcon, - Alert, - Box, - Card, - Group, - Select, - Skeleton, - Stack, - Table, - Text, -} from "@mantine/core"; -import { - IconBell, - IconBellOff, - IconInfoCircle, - IconLayoutNavbarCollapse, - IconLayoutNavbarExpand, - IconSearch, -} from "@tabler/icons-react"; -import { Suspense } from "react"; -import { useAppDispatch, useAppSelector } from "../state/hooks"; +import { Alert, Card, Group, Stack, Table, Text } from "@mantine/core"; +import { IconBell, IconBellOff, IconInfoCircle } from "@tabler/icons-react"; -import { - ArrayParam, - StringParam, - useQueryParam, - withDefault, -} from "use-query-params"; -import ErrorBoundary from "../components/ErrorBoundary"; -import ScrapePoolList from "./ServiceDiscoveryPoolsList"; import { useSuspenseAPIQuery } from "../api/api"; -import { ScrapePoolsResult } from "../api/responseTypes/scrapePools"; -import { - setCollapsedPools, - setShowLimitAlert, -} from "../state/serviceDiscoveryPageSlice"; -import { StateMultiSelect } from "../components/StateMultiSelect"; -import badgeClasses from "../Badge.module.css"; import { AlertmanagersResult } from "../api/responseTypes/alertmanagers"; import EndpointLink from "../components/EndpointLink"; diff --git a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx index 75a578f918..d7dbf0dd69 100644 --- a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx +++ b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx @@ -16,7 +16,7 @@ import { PromQLExtension, newCompleteStrategy, } from "@prometheus-io/codemirror-promql"; -import { FC, Suspense, useEffect, useMemo, useRef, useState } from "react"; +import { FC, Suspense, useEffect, useRef, useState } from "react"; import CodeMirror, { EditorState, EditorView, diff --git a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx index 087f58a917..5e73d040c0 100644 --- a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx +++ b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx @@ -17,11 +17,7 @@ import { RangeSamples, } from "../../api/responseTypes/query"; import serializeNode from "../../promql/serialize"; -import { - IconAlertTriangle, - IconPoint, - IconPointFilled, -} from "@tabler/icons-react"; +import { IconPointFilled } from "@tabler/icons-react"; import classes from "./TreeNode.module.css"; import clsx from "clsx"; import { useId } from "@mantine/hooks"; @@ -90,7 +86,7 @@ const TreeNode: FC<{ [childStates] ); - const { data, error, isFetching, refetch } = useAPIQuery({ + const { data, error, isFetching } = useAPIQuery({ key: [useId()], path: "/query", params: { diff --git a/web/ui/mantine-ui/src/pages/query/TreeView.tsx b/web/ui/mantine-ui/src/pages/query/TreeView.tsx index 900acaca52..c621e5df9c 100644 --- a/web/ui/mantine-ui/src/pages/query/TreeView.tsx +++ b/web/ui/mantine-ui/src/pages/query/TreeView.tsx @@ -1,4 +1,4 @@ -import { FC, useState } from "react"; +import { FC } from "react"; import { useSuspenseAPIQuery } from "../../api/api"; import { useAppSelector } from "../../state/hooks"; import ASTNode from "../../promql/ast"; From befcfadf78f37b7fde32e42f61303e31b67712e9 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Wed, 4 Sep 2024 18:51:09 +0200 Subject: [PATCH 0428/1397] Fix merge conflicts Fix call to newTestEngine(t) in promql/engine_test.go:3214. `agent` feature-flag it's own cmdline flag now. Remove `scrape.name-escaping-scheme` argument. Signed-off-by: Jan Fajerski --- cmd/prometheus/main.go | 6 +++--- docs/command-line/prometheus.md | 4 ++-- promql/engine_test.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 77c7b5f5dd..12e06c7d6c 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -466,11 +466,11 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("scrape.name-escaping-scheme", `Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots".`).Default(scrape.DefaultNameEscapingScheme.String()).StringVar(&cfg.nameEscapingScheme) - - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) + a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) + promlogflag.AddFlags(a, &cfg.promlogConfig) a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error { diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 35de1ecdb8..d637312a30 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -55,8 +55,8 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --scrape.name-escaping-scheme | Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots". | `values` | -| --enable-feature ... | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --agent | Run Prometheus in 'Agent mode'. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/promql/engine_test.go b/promql/engine_test.go index 4600acee5b..4015d08748 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3211,7 +3211,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { seriesName := "sparse_histogram_series" seriesNameOverTime := "sparse_histogram_series_over_time" - engine := newTestEngine() + engine := newTestEngine(t) ts := idx0 * int64(10*time.Minute/time.Millisecond) app := storage.Appender(context.Background()) From ca1f30bb6e8c025c8f82ae7ad39060aa66975536 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Thu, 29 Aug 2024 14:45:09 -0300 Subject: [PATCH 0429/1397] Promote Agent mode to it's own cmdline flag Signed-off-by: Arthur Silva Sens --- cmd/prometheus/main.go | 3 --- cmd/prometheus/main_test.go | 8 ++++---- docs/command-line/prometheus.md | 4 ++++ 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 12e06c7d6c..c0bd136fa8 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -200,9 +200,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "new-service-discovery-manager": c.enableNewSDManager = true level.Info(logger).Log("msg", "Experimental service discovery manager") - case "agent": - agentMode = true - level.Info(logger).Log("msg", "Experimental agent mode enabled.") case "promql-per-step-stats": c.enablePerStepStats = true level.Info(logger).Log("msg", "Experimental per-step statistics reporting") diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index c827812e60..bc16f26d73 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -348,7 +348,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames } func TestAgentSuccessfulStartup(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig) + prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig) require.NoError(t, prom.Start()) actualExitStatus := 0 @@ -366,7 +366,7 @@ func TestAgentSuccessfulStartup(t *testing.T) { } func TestAgentFailedStartupWithServerFlag(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) + prom := exec.Command(promPath, "-test.main", "--agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) output := bytes.Buffer{} prom.Stderr = &output @@ -393,7 +393,7 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) { } func TestAgentFailedStartupWithInvalidConfig(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) + prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) require.NoError(t, prom.Start()) actualExitStatus := 0 @@ -431,7 +431,7 @@ func TestModeSpecificFlags(t *testing.T) { args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"} if tc.mode == "agent" { - args = append(args, "--enable-feature=agent", "--config.file="+agentConfig) + args = append(args, "--agent", "--config.file="+agentConfig) } else { args = append(args, "--config.file="+promConfig) } diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index d637312a30..4e5c444076 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -55,7 +55,11 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | +<<<<<<< HEAD | --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +======= +| --enable-feature | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +>>>>>>> 60b9e9fc1 (Promote Agent mode to it's own cmdline flag) | --agent | Run Prometheus in 'Agent mode'. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | From 1f3de9c59975bc3da136df39037acb460ddb6293 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 4 Sep 2024 20:43:33 +0200 Subject: [PATCH 0430/1397] Change range input from Input to TextInput Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/pages/query/RangeInput.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/RangeInput.tsx b/web/ui/mantine-ui/src/pages/query/RangeInput.tsx index e09ddf8d2a..c201b14f98 100644 --- a/web/ui/mantine-ui/src/pages/query/RangeInput.tsx +++ b/web/ui/mantine-ui/src/pages/query/RangeInput.tsx @@ -1,5 +1,5 @@ import { FC, useEffect, useState } from "react"; -import { ActionIcon, Group, Input } from "@mantine/core"; +import { ActionIcon, Group, Text, TextInput } from "@mantine/core"; import { IconMinus, IconPlus } from "@tabler/icons-react"; import { formatPrometheusDuration, @@ -78,7 +78,7 @@ const RangeInput: FC = ({ range, onChangeRange }) => { return ( - setRangeInput(event.currentTarget.value)} From 8ee70c5fa0e7715de0dc65696ccaaa2fa9c7360d Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 4 Sep 2024 21:03:00 +0200 Subject: [PATCH 0431/1397] Remove unused import Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/pages/query/RangeInput.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/ui/mantine-ui/src/pages/query/RangeInput.tsx b/web/ui/mantine-ui/src/pages/query/RangeInput.tsx index c201b14f98..ec70cff969 100644 --- a/web/ui/mantine-ui/src/pages/query/RangeInput.tsx +++ b/web/ui/mantine-ui/src/pages/query/RangeInput.tsx @@ -1,5 +1,5 @@ import { FC, useEffect, useState } from "react"; -import { ActionIcon, Group, Text, TextInput } from "@mantine/core"; +import { ActionIcon, Group, TextInput } from "@mantine/core"; import { IconMinus, IconPlus } from "@tabler/icons-react"; import { formatPrometheusDuration, From d0ee5427bdfac6c30ca00103f419bc1f12e05242 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 4 Sep 2024 21:21:29 +0200 Subject: [PATCH 0432/1397] Redirect /graph to /query in new UI, preserving params Signed-off-by: Julius Volz --- web/web.go | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/web/web.go b/web/web.go index fb8d3608d6..426e609032 100644 --- a/web/web.go +++ b/web/web.go @@ -63,8 +63,9 @@ import ( "github.com/prometheus/prometheus/web/ui" ) -// Paths that are handled by the React / Reach router that should all be served the main React app's index.html. -var reactRouterPaths = []string{ +// Paths handled by the React router that should all serve the main React app's index.html, +// no matter if agent mode is enabled or not. +var oldUIReactRouterPaths = []string{ "/config", "/flags", "/service-discovery", @@ -72,16 +73,31 @@ var reactRouterPaths = []string{ "/targets", } +var newUIReactRouterPaths = []string{ + "/config", + "/flags", + "/service-discovery", + "/alertmanager-discovery", + "/status", + "/targets", +} + // Paths that are handled by the React router when the Agent mode is set. var reactRouterAgentPaths = []string{ "/agent", } // Paths that are handled by the React router when the Agent mode is not set. -var reactRouterServerPaths = []string{ +var oldUIReactRouterServerPaths = []string{ "/alerts", "/graph", - "/query", + "/rules", + "/tsdb-status", +} + +var newUIReactRouterServerPaths = []string{ + "/alerts", + "/query", // The old /graph redirects to /query on the server side. "/rules", "/tsdb-status", } @@ -382,6 +398,12 @@ func New(logger log.Logger, o *Options) *Handler { http.Redirect(w, r, path.Join(o.ExternalURL.Path, homePage), http.StatusFound) }) + if o.UseNewUI { + router.Get("/graph", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/query?"+r.URL.RawQuery), http.StatusFound) + }) + } + reactAssetsRoot := "/static/react-app" if h.options.UseNewUI { reactAssetsRoot = "/static/mantine-ui" @@ -426,6 +448,13 @@ func New(logger log.Logger, o *Options) *Handler { } // Serve the React app. + reactRouterPaths := oldUIReactRouterPaths + reactRouterServerPaths := oldUIReactRouterServerPaths + if h.options.UseNewUI { + reactRouterPaths = newUIReactRouterPaths + reactRouterServerPaths = newUIReactRouterServerPaths + } + for _, p := range reactRouterPaths { router.Get(p, serveReactApp) } From c73c3e24d7f0d3326aeb288d2ef806f0b00de8f1 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 4 Sep 2024 21:33:37 +0200 Subject: [PATCH 0433/1397] Default to serving new (Prometheus 3.0) UI Signed-off-by: Julius Volz --- cmd/prometheus/main.go | 8 +++--- docs/command-line/prometheus.md | 2 +- docs/feature_flags.md | 6 ++--- web/ui/mantine-ui/src/pages/RulesPage.tsx | 11 +++++++- web/web.go | 32 +++++++++++------------ 5 files changed, 34 insertions(+), 25 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 01c8f336bd..654a891f7e 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -250,9 +250,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { continue case "promql-at-modifier", "promql-negative-offset": level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o) - case "new-ui": - c.web.UseNewUI = true - level.Info(logger).Log("msg", "Serving experimental new web UI.") + case "old-ui": + c.web.UseOldUI = true + level.Info(logger).Log("msg", "Serving previous version of the Prometheus web UI.") default: level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o) } @@ -495,7 +495,7 @@ func main() { a.Flag("scrape.name-escaping-scheme", `Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots".`).Default(scrape.DefaultNameEscapingScheme.String()).StringVar(&cfg.nameEscapingScheme) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, new-ui, no-default-scrape-port, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, no-default-scrape-port, old-ui, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) promlogflag.AddFlags(a, &cfg.promlogConfig) diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 87cf5a4310..8060067ffe 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -57,7 +57,7 @@ The Prometheus monitoring server | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | | --scrape.name-escaping-scheme | Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots". | `values` | -| --enable-feature ... | Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, new-ui, no-default-scrape-port, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature ... | Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, no-default-scrape-port, old-ui, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/feature_flags.md b/docs/feature_flags.md index c9289e445a..e5c0eec8a1 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -226,11 +226,11 @@ This has the potential to improve rule group evaluation latency and resource uti The number of concurrent rule evaluations can be configured with `--rules.max-concurrent-rule-evals`, which is set to `4` by default. -## Experimental new web UI +## Serve old Prometheus UI -Enables the new experimental web UI instead of the old and stable web UI. The new UI is a complete rewrite and aims to be cleaner, less cluttered, and more modern under the hood. It is not feature complete yet and is still under active development. +Fall back to serving the old (Prometheus 2.x) web UI instead of the new UI. The new UI that was released as part of Prometheus 3.0 is a complete rewrite and aims to be cleaner, less cluttered, and more modern under the hood. However, it is not fully feature complete and battle-tested yet, so some users may still prefer using the old UI. -`--enable-feature=new-ui` +`--enable-feature=old-ui` ## Metadata WAL Records diff --git a/web/ui/mantine-ui/src/pages/RulesPage.tsx b/web/ui/mantine-ui/src/pages/RulesPage.tsx index a335228f3a..8247ef810a 100644 --- a/web/ui/mantine-ui/src/pages/RulesPage.tsx +++ b/web/ui/mantine-ui/src/pages/RulesPage.tsx @@ -141,7 +141,16 @@ export default function RulesPage() {
)} - {r.name} + + {r.name} +
diff --git a/web/web.go b/web/web.go index 426e609032..91e5de1c43 100644 --- a/web/web.go +++ b/web/web.go @@ -270,7 +270,7 @@ type Options struct { UserAssetsPath string ConsoleTemplatesPath string ConsoleLibrariesPath string - UseNewUI bool + UseOldUI bool EnableLifecycle bool EnableAdminAPI bool PageTitle string @@ -384,9 +384,9 @@ func New(logger log.Logger, o *Options) *Handler { router = router.WithPrefix(o.RoutePrefix) } - homePage := "/graph" - if o.UseNewUI { - homePage = "/query" + homePage := "/query" + if o.UseOldUI { + homePage = "/graph" } if o.IsAgent { homePage = "/agent" @@ -398,15 +398,15 @@ func New(logger log.Logger, o *Options) *Handler { http.Redirect(w, r, path.Join(o.ExternalURL.Path, homePage), http.StatusFound) }) - if o.UseNewUI { + if !o.UseOldUI { router.Get("/graph", func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/query?"+r.URL.RawQuery), http.StatusFound) }) } - reactAssetsRoot := "/static/react-app" - if h.options.UseNewUI { - reactAssetsRoot = "/static/mantine-ui" + reactAssetsRoot := "/static/mantine-ui" + if h.options.UseOldUI { + reactAssetsRoot = "/static/react-app" } // The console library examples at 'console_libraries/prom.lib' still depend on old asset files being served under `classic`. @@ -448,11 +448,11 @@ func New(logger log.Logger, o *Options) *Handler { } // Serve the React app. - reactRouterPaths := oldUIReactRouterPaths - reactRouterServerPaths := oldUIReactRouterServerPaths - if h.options.UseNewUI { - reactRouterPaths = newUIReactRouterPaths - reactRouterServerPaths = newUIReactRouterServerPaths + reactRouterPaths := newUIReactRouterPaths + reactRouterServerPaths := newUIReactRouterServerPaths + if h.options.UseOldUI { + reactRouterPaths = oldUIReactRouterPaths + reactRouterServerPaths = oldUIReactRouterServerPaths } for _, p := range reactRouterPaths { @@ -480,9 +480,9 @@ func New(logger log.Logger, o *Options) *Handler { }) } - reactStaticAssetsDir := "/static" - if h.options.UseNewUI { - reactStaticAssetsDir = "/assets" + reactStaticAssetsDir := "/assets" + if h.options.UseOldUI { + reactStaticAssetsDir = "/static" } // Static files required by the React app. router.Get(reactStaticAssetsDir+"/*filepath", func(w http.ResponseWriter, r *http.Request) { From 53affc64dbb08967399eba1705648769a5c8d3f4 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 4 Sep 2024 21:41:27 +0200 Subject: [PATCH 0434/1397] Clear the query page when navigating away from it Signed-off-by: Julius Volz --- .../mantine-ui/src/pages/query/QueryPage.tsx | 32 +++++++++++++------ 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/QueryPage.tsx b/web/ui/mantine-ui/src/pages/query/QueryPage.tsx index c6b3d3b7af..d3131efa22 100644 --- a/web/ui/mantine-ui/src/pages/query/QueryPage.tsx +++ b/web/ui/mantine-ui/src/pages/query/QueryPage.tsx @@ -5,7 +5,11 @@ import { IconPlus, } from "@tabler/icons-react"; import { useAppDispatch, useAppSelector } from "../../state/hooks"; -import { addPanel, setPanels } from "../../state/queryPageSlice"; +import { + addPanel, + newDefaultPanel, + setPanels, +} from "../../state/queryPageSlice"; import Panel from "./QueryPanel"; import { LabelValuesResult } from "../../api/responseTypes/labelValues"; import { useAPIQuery } from "../../api/api"; @@ -19,6 +23,7 @@ export default function QueryPage() { const dispatch = useAppDispatch(); const [timeDelta, setTimeDelta] = useState(0); + // Update the panels whenever the URL params change. useEffect(() => { const handleURLChange = () => { const panels = decodePanelOptionsFromURLParams(window.location.search); @@ -36,6 +41,13 @@ export default function QueryPage() { }; }, [dispatch]); + // Clear the query page when navigating away from it. + useEffect(() => { + return () => { + dispatch(setPanels([newDefaultPanel()])); + }; + }, [dispatch]); + const { data: metricNamesResult, error: metricNamesError } = useAPIQuery({ path: "/label/__name__/values", @@ -50,15 +62,17 @@ export default function QueryPage() { }); useEffect(() => { - if (timeResult) { - if (timeResult.data.resultType !== "scalar") { - throw new Error("Unexpected result type from time query"); - } - - const browserTime = new Date().getTime() / 1000; - const serverTime = timeResult.data.result[0]; - setTimeDelta(Math.abs(browserTime - serverTime)); + if (!timeResult) { + return; } + + if (timeResult.data.resultType !== "scalar") { + throw new Error("Unexpected result type from time query"); + } + + const browserTime = new Date().getTime() / 1000; + const serverTime = timeResult.data.result[0]; + setTimeDelta(Math.abs(browserTime - serverTime)); }, [timeResult]); return ( From 1f8a6b4d6e0df7922f203ea36f28870063f7ba62 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 4 Sep 2024 21:50:44 +0200 Subject: [PATCH 0435/1397] Show Consoles link if configured Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/App.tsx | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index 3197cc2c61..57701914fd 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -26,6 +26,7 @@ import { IconChevronRight, IconCloudDataConnection, IconDatabase, + IconDeviceDesktopAnalytics, IconFlag, IconHeartRateMonitor, IconInfoCircle, @@ -197,10 +198,22 @@ function App() { dispatch(updateSettings({ pathPrefix })); }, [pathPrefix]); - const { agentMode } = useSettings(); + const { agentMode, consolesLink } = useSettings(); const navLinks = ( <> + {consolesLink && ( + + )} + {mainNavPages .filter((p) => !agentMode || p.inAgentMode) .map((p) => ( From 2d9dad1daa9a6799384bd1a933d4474dfbf30dae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 08:45:23 +0000 Subject: [PATCH 0436/1397] Bump golang.org/x/sys from 0.23.0 to 0.25.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.23.0 to 0.25.0. - [Commits](https://github.com/golang/sys/compare/v0.23.0...v0.25.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1a9cc9555a..0f93b0a17f 100644 --- a/go.mod +++ b/go.mod @@ -77,7 +77,7 @@ require ( go.uber.org/multierr v1.11.0 golang.org/x/oauth2 v0.22.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.23.0 + golang.org/x/sys v0.25.0 golang.org/x/text v0.17.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.23.0 diff --git a/go.sum b/go.sum index 01676cb4d3..8896276929 100644 --- a/go.sum +++ b/go.sum @@ -947,8 +947,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= From ce0f09b125b888ad0c4061c4506cd47ddce5c20f Mon Sep 17 00:00:00 2001 From: Julien Date: Mon, 26 Aug 2024 11:41:56 +0200 Subject: [PATCH 0437/1397] Scrape: Add scrape_failure_log_file to log Scrape Failures Signed-off-by: Julien --- cmd/prometheus/main.go | 1 + cmd/prometheus/scrape_failure_log_test.go | 193 ++++++++++++++++++++++ config/config.go | 10 ++ config/config_test.go | 41 ++++- config/testdata/conf.good.yml | 3 + docs/configuration/configuration.md | 8 + scrape/manager.go | 73 ++++++-- scrape/manager_test.go | 12 +- scrape/scrape.go | 43 +++++ scrape/scrape_test.go | 5 +- 10 files changed, 363 insertions(+), 26 deletions(-) create mode 100644 cmd/prometheus/scrape_failure_log_test.go diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 65ffd7de5a..a021259f9a 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -755,6 +755,7 @@ func main() { scrapeManager, err := scrape.NewManager( &cfg.scrape, log.With(logger, "component", "scrape manager"), + func(s string) (log.Logger, error) { return logging.NewJSONFileLogger(s) }, fanoutStorage, prometheus.DefaultRegisterer, ) diff --git a/cmd/prometheus/scrape_failure_log_test.go b/cmd/prometheus/scrape_failure_log_test.go new file mode 100644 index 0000000000..8d86d719f9 --- /dev/null +++ b/cmd/prometheus/scrape_failure_log_test.go @@ -0,0 +1,193 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bytes" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "os" + "os/exec" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/prometheus/prometheus/util/testutil" +) + +func TestScrapeFailureLogFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + // Tracks the number of requests made to the mock server. + var requestCount atomic.Int32 + + // Starts a server that always returns HTTP 500 errors. + mockServerAddress := startGarbageServer(t, &requestCount) + + // Create a temporary directory for Prometheus configuration and logs. + tempDir := t.TempDir() + + // Define file paths for the scrape failure log and Prometheus configuration. + // Like other files, the scrape failure log file should be relative to the + // config file. Therefore, we split the name we put in the file and the full + // path used to check the content of the file. + scrapeFailureLogFileName := "scrape_failure.log" + scrapeFailureLogFile := filepath.Join(tempDir, scrapeFailureLogFileName) + promConfigFile := filepath.Join(tempDir, "prometheus.yml") + + // Step 1: Set up an initial Prometheus configuration that globally + // specifies a scrape failure log file. + promConfig := fmt.Sprintf(` +global: + scrape_interval: 500ms + scrape_failure_log_file: %s + +scrape_configs: + - job_name: 'test_job' + static_configs: + - targets: ['%s'] +`, scrapeFailureLogFileName, mockServerAddress) + + err := os.WriteFile(promConfigFile, []byte(promConfig), 0o644) + require.NoError(t, err, "Failed to write Prometheus configuration file") + + // Start Prometheus with the generated configuration and a random port, enabling the lifecycle API. + port := testutil.RandomUnprivilegedPort(t) + params := []string{ + "-test.main", + "--config.file=" + promConfigFile, + "--storage.tsdb.path=" + filepath.Join(tempDir, "data"), + fmt.Sprintf("--web.listen-address=127.0.0.1:%d", port), + "--web.enable-lifecycle", + } + prometheusProcess := exec.Command(promPath, params...) + prometheusProcess.Stdout = os.Stdout + prometheusProcess.Stderr = os.Stderr + + err = prometheusProcess.Start() + require.NoError(t, err, "Failed to start Prometheus") + defer prometheusProcess.Process.Kill() + + // Wait until the mock server receives at least two requests from Prometheus. + require.Eventually(t, func() bool { + return requestCount.Load() >= 2 + }, 30*time.Second, 500*time.Millisecond, "Expected at least two requests to the mock server") + + // Verify that the scrape failures have been logged to the specified file. + content, err := os.ReadFile(scrapeFailureLogFile) + require.NoError(t, err, "Failed to read scrape failure log") + require.Contains(t, string(content), "server returned HTTP status 500 Internal Server Error", "Expected scrape failure log entry not found") + + // Step 2: Update the Prometheus configuration to remove the scrape failure + // log file setting. + promConfig = fmt.Sprintf(` +global: + scrape_interval: 1s + +scrape_configs: + - job_name: 'test_job' + static_configs: + - targets: ['%s'] +`, mockServerAddress) + + err = os.WriteFile(promConfigFile, []byte(promConfig), 0o644) + require.NoError(t, err, "Failed to update Prometheus configuration file") + + // Reload Prometheus with the updated configuration. + reloadPrometheus(t, port) + + // Count the number of lines in the scrape failure log file before any + // further requests. + preReloadLogLineCount := countLinesInFile(scrapeFailureLogFile) + + // Wait for at least two more requests to the mock server to ensure + // Prometheus continues scraping. + requestsBeforeReload := requestCount.Load() + require.Eventually(t, func() bool { + return requestCount.Load() >= requestsBeforeReload+2 + }, 30*time.Second, 500*time.Millisecond, "Expected two more requests to the mock server after configuration reload") + + // Ensure that no new lines were added to the scrape failure log file after + // the configuration change. + require.Equal(t, preReloadLogLineCount, countLinesInFile(scrapeFailureLogFile), "No new lines should be added to the scrape failure log file after removing the log setting") + + // Step 3: Re-add the scrape failure log file setting, but this time under + // scrape_configs, and reload Prometheus. + promConfig = fmt.Sprintf(` +global: + scrape_interval: 1s + +scrape_configs: + - job_name: 'test_job' + scrape_failure_log_file: %s + static_configs: + - targets: ['%s'] +`, scrapeFailureLogFileName, mockServerAddress) + + err = os.WriteFile(promConfigFile, []byte(promConfig), 0o644) + require.NoError(t, err, "Failed to update Prometheus configuration file") + + // Reload Prometheus with the updated configuration. + reloadPrometheus(t, port) + + // Wait for at least two more requests to the mock server and verify that + // new log entries are created. + postReloadLogLineCount := countLinesInFile(scrapeFailureLogFile) + requestsBeforeReAddingLog := requestCount.Load() + require.Eventually(t, func() bool { + return requestCount.Load() >= requestsBeforeReAddingLog+2 + }, 30*time.Second, 500*time.Millisecond, "Expected two additional requests after re-adding the log setting") + + // Confirm that new lines were added to the scrape failure log file. + require.Greater(t, countLinesInFile(scrapeFailureLogFile), postReloadLogLineCount, "New lines should be added to the scrape failure log file after re-adding the log setting") +} + +// reloadPrometheus sends a reload request to the Prometheus server to apply +// updated configurations. +func reloadPrometheus(t *testing.T, port int) { + resp, err := http.Post(fmt.Sprintf("http://127.0.0.1:%d/-/reload", port), "", nil) + require.NoError(t, err, "Failed to reload Prometheus") + require.Equal(t, http.StatusOK, resp.StatusCode, "Unexpected status code when reloading Prometheus") +} + +// startGarbageServer sets up a mock server that returns a 500 Internal Server Error +// for all requests. It also increments the request count each time it's hit. +func startGarbageServer(t *testing.T, requestCount *atomic.Int32) string { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestCount.Inc() + w.WriteHeader(http.StatusInternalServerError) + })) + t.Cleanup(server.Close) + + parsedURL, err := url.Parse(server.URL) + require.NoError(t, err, "Failed to parse mock server URL") + + return parsedURL.Host +} + +// countLinesInFile counts and returns the number of lines in the specified file. +func countLinesInFile(filePath string) int { + data, err := os.ReadFile(filePath) + if err != nil { + return 0 // Return 0 if the file doesn't exist or can't be read. + } + return bytes.Count(data, []byte{'\n'}) +} diff --git a/config/config.go b/config/config.go index c9e8efbf3e..4f80b551bc 100644 --- a/config/config.go +++ b/config/config.go @@ -429,6 +429,8 @@ type GlobalConfig struct { RuleQueryOffset model.Duration `yaml:"rule_query_offset,omitempty"` // File to which PromQL queries are logged. QueryLogFile string `yaml:"query_log_file,omitempty"` + // File to which scrape failures are logged. + ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. ExternalLabels labels.Labels `yaml:"external_labels,omitempty"` // An uncompressed response body larger than this many bytes will cause the @@ -529,6 +531,7 @@ func validateAcceptScrapeProtocols(sps []ScrapeProtocol) error { // SetDirectory joins any relative file paths with dir. func (c *GlobalConfig) SetDirectory(dir string) { c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile) + c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -591,6 +594,7 @@ func (c *GlobalConfig) isZero() bool { c.EvaluationInterval == 0 && c.RuleQueryOffset == 0 && c.QueryLogFile == "" && + c.ScrapeFailureLogFile == "" && c.ScrapeProtocols == nil } @@ -632,6 +636,8 @@ type ScrapeConfig struct { ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"` // Whether to scrape a classic histogram that is also exposed as a native histogram. ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"` + // File to which scrape failures are logged. + ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The HTTP resource path on which to fetch metrics from targets. MetricsPath string `yaml:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. @@ -684,6 +690,7 @@ type ScrapeConfig struct { func (c *ScrapeConfig) SetDirectory(dir string) { c.ServiceDiscoveryConfigs.SetDirectory(dir) c.HTTPClientConfig.SetDirectory(dir) + c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -765,6 +772,9 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { if c.KeepDroppedTargets == 0 { c.KeepDroppedTargets = globalConfig.KeepDroppedTargets } + if c.ScrapeFailureLogFile == "" { + c.ScrapeFailureLogFile = globalConfig.ScrapeFailureLogFile + } if c.ScrapeProtocols == nil { c.ScrapeProtocols = globalConfig.ScrapeProtocols diff --git a/config/config_test.go b/config/config_test.go index 2219061823..726b233cce 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -78,14 +78,16 @@ const ( globLabelNameLengthLimit = 200 globLabelValueLengthLimit = 200 globalGoGC = 42 + globScrapeFailureLogFile = "testdata/fail.log" ) var expectedConf = &Config{ GlobalConfig: GlobalConfig{ - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, - EvaluationInterval: model.Duration(30 * time.Second), - QueryLogFile: "", + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EvaluationInterval: model.Duration(30 * time.Second), + QueryLogFile: "testdata/query.log", + ScrapeFailureLogFile: globScrapeFailureLogFile, ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"), @@ -211,6 +213,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: "testdata/fail_prom.log", MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -314,6 +317,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: 210, LabelValueLengthLimit: 210, ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4}, + ScrapeFailureLogFile: globScrapeFailureLogFile, HTTPClientConfig: config.HTTPClientConfig{ BasicAuth: &config.BasicAuth{ @@ -411,6 +415,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -466,6 +471,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: "/metrics", Scheme: "http", @@ -499,6 +505,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -538,6 +545,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -577,6 +585,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -606,6 +615,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -643,6 +653,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -677,6 +688,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -718,6 +730,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -749,6 +762,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -783,6 +797,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -810,6 +825,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -840,6 +856,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: "/federate", Scheme: DefaultScrapeConfig.Scheme, @@ -870,6 +887,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -900,6 +918,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -927,6 +946,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -962,6 +982,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -996,6 +1017,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1027,6 +1049,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1057,6 +1080,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1091,6 +1115,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1128,6 +1153,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1184,6 +1210,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1211,6 +1238,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1249,6 +1277,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1293,6 +1322,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1328,6 +1358,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1357,6 +1388,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1389,6 +1421,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + ScrapeFailureLogFile: globScrapeFailureLogFile, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 56741822c2..8da6e5c566 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -8,6 +8,8 @@ global: label_limit: 30 label_name_length_limit: 200 label_value_length_limit: 200 + query_log_file: query.log + scrape_failure_log_file: fail.log # scrape_timeout is set to the global default (10s). external_labels: @@ -72,6 +74,7 @@ scrape_configs: # metrics_path defaults to '/metrics' # scheme defaults to 'http'. + scrape_failure_log_file: fail_prom.log file_sd_configs: - files: - foo/*.slow.json diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index a42126cf22..a8c8d6e264 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -84,6 +84,10 @@ global: # Reloading the configuration will reopen the file. [ query_log_file: ] + # File to which scrape failures are logged. + # Reloading the configuration will reopen the file. + [ scrape_failure_log_file: ] + # An uncompressed response body larger than this many bytes will cause the # scrape to fail. 0 means no limit. Example: 100MB. # This is an experimental feature, this behaviour could @@ -319,6 +323,10 @@ http_headers: # Files to read header values from. [ files: [, ...] ] ] +# File to which scrape failures are logged. +# Reloading the configuration will reopen the file. +[ scrape_failure_log_file: ] + # List of Azure service discovery configurations. azure_sd_configs: [ - ... ] diff --git a/scrape/manager.go b/scrape/manager.go index e3dba5f0ee..d7786a082b 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -17,6 +17,7 @@ import ( "errors" "fmt" "hash/fnv" + "io" "reflect" "sync" "time" @@ -36,7 +37,7 @@ import ( ) // NewManager is the Manager constructor. -func NewManager(o *Options, logger log.Logger, app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { +func NewManager(o *Options, logger log.Logger, newScrapeFailureLogger func(string) (log.Logger, error), app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { if o == nil { o = &Options{} } @@ -50,15 +51,16 @@ func NewManager(o *Options, logger log.Logger, app storage.Appendable, registere } m := &Manager{ - append: app, - opts: o, - logger: logger, - scrapeConfigs: make(map[string]*config.ScrapeConfig), - scrapePools: make(map[string]*scrapePool), - graceShut: make(chan struct{}), - triggerReload: make(chan struct{}, 1), - metrics: sm, - buffers: pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }), + append: app, + opts: o, + logger: logger, + newScrapeFailureLogger: newScrapeFailureLogger, + scrapeConfigs: make(map[string]*config.ScrapeConfig), + scrapePools: make(map[string]*scrapePool), + graceShut: make(chan struct{}), + triggerReload: make(chan struct{}, 1), + metrics: sm, + buffers: pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }), } m.metrics.setTargetMetadataCacheGatherer(m) @@ -103,12 +105,14 @@ type Manager struct { append storage.Appendable graceShut chan struct{} - offsetSeed uint64 // Global offsetSeed seed is used to spread scrape workload across HA setup. - mtxScrape sync.Mutex // Guards the fields below. - scrapeConfigs map[string]*config.ScrapeConfig - scrapePools map[string]*scrapePool - targetSets map[string][]*targetgroup.Group - buffers *pool.Pool + offsetSeed uint64 // Global offsetSeed seed is used to spread scrape workload across HA setup. + mtxScrape sync.Mutex // Guards the fields below. + scrapeConfigs map[string]*config.ScrapeConfig + scrapePools map[string]*scrapePool + newScrapeFailureLogger func(string) (log.Logger, error) + scrapeFailureLoggers map[string]log.Logger + targetSets map[string][]*targetgroup.Group + buffers *pool.Pool triggerReload chan struct{} @@ -183,6 +187,11 @@ func (m *Manager) reload() { continue } m.scrapePools[setName] = sp + if l, ok := m.scrapeFailureLoggers[scrapeConfig.ScrapeFailureLogFile]; ok { + sp.SetScrapeFailureLogger(l) + } else { + level.Error(sp.logger).Log("msg", "No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", setName) + } } wg.Add(1) @@ -238,11 +247,36 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { } c := make(map[string]*config.ScrapeConfig) + scrapeFailureLoggers := map[string]log.Logger{ + "": nil, // Emptying the file name sets the scrape logger to nil. + } for _, scfg := range scfgs { c[scfg.JobName] = scfg + if _, ok := scrapeFailureLoggers[scfg.ScrapeFailureLogFile]; !ok { + // We promise to reopen the file on each reload. + var ( + l log.Logger + err error + ) + if m.newScrapeFailureLogger != nil { + if l, err = m.newScrapeFailureLogger(scfg.ScrapeFailureLogFile); err != nil { + return err + } + } + scrapeFailureLoggers[scfg.ScrapeFailureLogFile] = l + } } m.scrapeConfigs = c + oldScrapeFailureLoggers := m.scrapeFailureLoggers + for _, s := range oldScrapeFailureLoggers { + if closer, ok := s.(io.Closer); ok { + defer closer.Close() + } + } + + m.scrapeFailureLoggers = scrapeFailureLoggers + if err := m.setOffsetSeed(cfg.GlobalConfig.ExternalLabels); err != nil { return err } @@ -260,6 +294,13 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name) failed = true } + fallthrough + case ok: + if l, ok := m.scrapeFailureLoggers[cfg.ScrapeFailureLogFile]; ok { + sp.SetScrapeFailureLogger(l) + } else { + level.Error(sp.logger).Log("msg", "No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", name) + } } } diff --git a/scrape/manager_test.go b/scrape/manager_test.go index c71691c95d..ba32f36cf4 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -511,7 +511,7 @@ scrape_configs: ) opts := Options{} - scrapeManager, err := NewManager(&opts, nil, nil, testRegistry) + scrapeManager, err := NewManager(&opts, nil, nil, nil, testRegistry) require.NoError(t, err) newLoop := func(scrapeLoopOptions) loop { ch <- struct{}{} @@ -576,7 +576,7 @@ scrape_configs: func TestManagerTargetsUpdates(t *testing.T) { opts := Options{} testRegistry := prometheus.NewRegistry() - m, err := NewManager(&opts, nil, nil, testRegistry) + m, err := NewManager(&opts, nil, nil, nil, testRegistry) require.NoError(t, err) ts := make(chan map[string][]*targetgroup.Group) @@ -629,7 +629,7 @@ global: opts := Options{} testRegistry := prometheus.NewRegistry() - scrapeManager, err := NewManager(&opts, nil, nil, testRegistry) + scrapeManager, err := NewManager(&opts, nil, nil, nil, testRegistry) require.NoError(t, err) // Load the first config. @@ -706,7 +706,7 @@ scrape_configs: } opts := Options{} - scrapeManager, err := NewManager(&opts, nil, nil, testRegistry) + scrapeManager, err := NewManager(&opts, nil, nil, nil, testRegistry) require.NoError(t, err) reload(scrapeManager, cfg1) @@ -758,6 +758,7 @@ func TestManagerCTZeroIngestion(t *testing.T) { skipOffsetting: true, }, log.NewLogfmtLogger(os.Stderr), + nil, &collectResultAppendable{app}, prometheus.NewRegistry(), ) @@ -857,7 +858,7 @@ func TestUnregisterMetrics(t *testing.T) { // Check that all metrics can be unregistered, allowing a second manager to be created. for i := 0; i < 2; i++ { opts := Options{} - manager, err := NewManager(&opts, nil, nil, reg) + manager, err := NewManager(&opts, nil, nil, nil, reg) require.NotNil(t, manager) require.NoError(t, err) // Unregister all metrics. @@ -901,6 +902,7 @@ func runManagers(t *testing.T, ctx context.Context) (*discovery.Manager, *Manage scrapeManager, err := NewManager( &Options{DiscoveryReloadInterval: model.Duration(100 * time.Millisecond)}, nil, + nil, nopAppendable{}, prometheus.NewRegistry(), ) diff --git a/scrape/scrape.go b/scrape/scrape.go index 2abd4691d6..ea98432be6 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -90,6 +90,9 @@ type scrapePool struct { noDefaultPort bool metrics *scrapeMetrics + + scrapeFailureLogger log.Logger + scrapeFailureLoggerMtx sync.RWMutex } type labelLimits struct { @@ -218,6 +221,27 @@ func (sp *scrapePool) DroppedTargetsCount() int { return sp.droppedTargetsCount } +func (sp *scrapePool) SetScrapeFailureLogger(l log.Logger) { + sp.scrapeFailureLoggerMtx.Lock() + defer sp.scrapeFailureLoggerMtx.Unlock() + if l != nil { + l = log.With(l, "job_name", sp.config.JobName) + } + sp.scrapeFailureLogger = l + + sp.targetMtx.Lock() + defer sp.targetMtx.Unlock() + for _, s := range sp.loops { + s.setScrapeFailureLogger(sp.scrapeFailureLogger) + } +} + +func (sp *scrapePool) getScrapeFailureLogger() log.Logger { + sp.scrapeFailureLoggerMtx.RLock() + defer sp.scrapeFailureLoggerMtx.RUnlock() + return sp.scrapeFailureLogger +} + // stop terminates all scrape loops and returns after they all terminated. func (sp *scrapePool) stop() { sp.mtx.Lock() @@ -361,6 +385,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { wg.Done() newLoop.setForcedError(forcedErr) + newLoop.setScrapeFailureLogger(sp.getScrapeFailureLogger()) newLoop.run(nil) }(oldLoop, newLoop) @@ -503,6 +528,7 @@ func (sp *scrapePool) sync(targets []*Target) { if err != nil { l.setForcedError(err) } + l.setScrapeFailureLogger(sp.scrapeFailureLogger) sp.activeTargets[hash] = t sp.loops[hash] = l @@ -825,6 +851,7 @@ func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w type loop interface { run(errc chan<- error) setForcedError(err error) + setScrapeFailureLogger(log.Logger) stop() getCache() *scrapeCache disableEndOfRunStalenessMarkers() @@ -840,6 +867,8 @@ type cacheEntry struct { type scrapeLoop struct { scraper scraper l log.Logger + scrapeFailureLogger log.Logger + scrapeFailureLoggerMtx sync.RWMutex cache *scrapeCache lastScrapeSize int buffers *pool.Pool @@ -1223,6 +1252,15 @@ func newScrapeLoop(ctx context.Context, return sl } +func (sl *scrapeLoop) setScrapeFailureLogger(l log.Logger) { + sl.scrapeFailureLoggerMtx.Lock() + defer sl.scrapeFailureLoggerMtx.Unlock() + if ts, ok := sl.scraper.(fmt.Stringer); ok && l != nil { + l = log.With(l, "target", ts.String()) + } + sl.scrapeFailureLogger = l +} + func (sl *scrapeLoop) run(errc chan<- error) { if !sl.skipOffsetting { select { @@ -1366,6 +1404,11 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er bytesRead = len(b) } else { level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr) + sl.scrapeFailureLoggerMtx.RLock() + if sl.scrapeFailureLogger != nil { + sl.scrapeFailureLogger.Log("err", scrapeErr) + } + sl.scrapeFailureLoggerMtx.RUnlock() if errc != nil { errc <- scrapeErr } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index b703f21d46..a69a19d7f7 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -158,6 +158,9 @@ type testLoop struct { timeout time.Duration } +func (l *testLoop) setScrapeFailureLogger(log.Logger) { +} + func (l *testLoop) run(errc chan<- error) { if l.runOnce { panic("loop must be started only once") @@ -3782,7 +3785,7 @@ scrape_configs: s.DB.EnableNativeHistograms() reg := prometheus.NewRegistry() - mng, err := NewManager(&Options{EnableNativeHistogramsIngestion: true}, nil, s, reg) + mng, err := NewManager(&Options{EnableNativeHistogramsIngestion: true}, nil, nil, s, reg) require.NoError(t, err) cfg, err := config.Load(configStr, false, log.NewNopLogger()) require.NoError(t, err) From 786025fada37c07c45a96acb89281a3ea99fba6c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 09:07:06 +0000 Subject: [PATCH 0438/1397] Bump github.com/prometheus/client_golang Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.19.1 to 1.20.2. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.19.1...v1.20.2) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 2 +- documentation/examples/remote_storage/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 22a292db0f..f7bebc8cc1 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -8,7 +8,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.6 - github.com/prometheus/client_golang v1.20.0 + github.com/prometheus/client_golang v1.20.2 github.com/prometheus/common v0.57.0 github.com/prometheus/prometheus v0.53.1 github.com/stretchr/testify v1.9.0 diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 8f71c3c4a4..1abeff7eb1 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -253,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= -github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= From 3d44a33e540b151563c9a563029f83f72fec50f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 09:10:02 +0000 Subject: [PATCH 0439/1397] Bump google.golang.org/api from 0.190.0 to 0.196.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.190.0 to 0.196.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.190.0...v0.196.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 22 +++++++++++----------- go.sum | 43 ++++++++++++++++++++++--------------------- 2 files changed, 33 insertions(+), 32 deletions(-) diff --git a/go.mod b/go.mod index 6a6056dc5f..deaccf6a67 100644 --- a/go.mod +++ b/go.mod @@ -64,13 +64,13 @@ require ( github.com/vultr/govultr/v2 v2.17.2 go.opentelemetry.io/collector/pdata v1.12.0 go.opentelemetry.io/collector/semconv v0.105.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 - go.opentelemetry.io/otel v1.28.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 + go.opentelemetry.io/otel v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 go.opentelemetry.io/otel/sdk v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/otel/trace v1.29.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 @@ -79,10 +79,10 @@ require ( golang.org/x/sync v0.8.0 golang.org/x/sys v0.25.0 golang.org/x/text v0.17.0 - golang.org/x/time v0.5.0 + golang.org/x/time v0.6.0 golang.org/x/tools v0.23.0 - google.golang.org/api v0.190.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f + google.golang.org/api v0.196.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed google.golang.org/grpc v1.66.0 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 @@ -95,8 +95,8 @@ require ( ) require ( - cloud.google.com/go/auth v0.7.3 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect + cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect @@ -140,7 +140,7 @@ require ( github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.8 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.3 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect @@ -185,14 +185,14 @@ require ( github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/crypto v0.26.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.19.0 // indirect golang.org/x/net v0.28.0 // indirect golang.org/x/term v0.23.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect diff --git a/go.sum b/go.sum index bb0ae67043..d80e4e14e3 100644 --- a/go.sum +++ b/go.sum @@ -12,10 +12,10 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY= -cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA= -cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= -cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= +cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= +cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -328,8 +328,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.3 h1:QRje2j5GZimBzlbhGA2V2QlGNgL8G6e+wGo/+/2bWI0= +github.com/googleapis/enterprise-certificate-proxy v0.3.3/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= @@ -730,22 +730,22 @@ go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimK go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g= go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -976,8 +976,9 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1049,8 +1050,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.190.0 h1:ASM+IhLY1zljNdLu19W1jTmU6A+gMk6M46Wlur61s+Q= -google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo= +google.golang.org/api v0.196.0 h1:k/RafYqebaIJBO3+SMnfEGtFVlvp5vSgqTUF54UN/zg= +google.golang.org/api v0.196.0/go.mod h1:g9IL21uGkYgvQ5BZg6BAtoGJQIm8r6EgaAbpNey5wBE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1087,10 +1088,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= From bd9129117ef642e498b6b0bf8f1c5b516f66c93c Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Fri, 15 Jul 2022 15:20:44 +0200 Subject: [PATCH 0440/1397] Target parameter labels should not be overridden by config params The following configuration snippet calls http://127.0.0.1:8000/?foo=value1 instead of http://127.0.0.1:8000/?foo=value2. This is incorrect, the labels of the target should be prefered. ```yaml - job_name: local params: foo: [value1] static_configs: - targets: ['127.0.0.1:8000'] labels: __param_foo: value2 ``` Signed-off-by: Julien Pivotto Signed-off-by: Julien --- scrape/scrape_test.go | 158 ++++++++++++++++++++++++++++++++++++++++++ scrape/target.go | 4 +- 2 files changed, 160 insertions(+), 2 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index b703f21d46..15ec71497a 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -3838,3 +3838,161 @@ scrape_configs: require.Equal(t, expectedSchema, h.Schema) } } + +func TestTargetScrapeConfigWithLabels(t *testing.T) { + const ( + configTimeout = 1500 * time.Millisecond + expectedTimeout = "1.5" + expectedTimeoutLabel = "1s500ms" + secondTimeout = 500 * time.Millisecond + secondTimeoutLabel = "500ms" + expectedParam = "value1" + secondParam = "value2" + expectedPath = "/metric-ok" + secondPath = "/metric-nok" + httpScheme = "http" + paramLabel = "__param_param" + jobName = "test" + ) + + createTestServer := func(t *testing.T, done chan struct{}) *url.URL { + server := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer close(done) + require.Equal(t, expectedTimeout, r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds")) + require.Equal(t, expectedParam, r.URL.Query().Get("param")) + require.Equal(t, expectedPath, r.URL.Path) + + w.Header().Set("Content-Type", `text/plain; version=0.0.4`) + w.Write([]byte("metric_a 1\nmetric_b 2\n")) + }), + ) + t.Cleanup(server.Close) + serverURL, err := url.Parse(server.URL) + require.NoError(t, err) + return serverURL + } + + run := func(t *testing.T, cfg *config.ScrapeConfig, targets []*targetgroup.Group) chan struct{} { + done := make(chan struct{}) + srvURL := createTestServer(t, done) + + // Update target addresses to use the dynamically created server URL. + for _, target := range targets { + for i := range target.Targets { + target.Targets[i][model.AddressLabel] = model.LabelValue(srvURL.Host) + } + } + + sp, err := newScrapePool(cfg, &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) + require.NoError(t, err) + t.Cleanup(sp.stop) + + sp.Sync(targets) + return done + } + + cases := []struct { + name string + cfg *config.ScrapeConfig + targets []*targetgroup.Group + }{ + { + name: "Everything in scrape config", + cfg: &config.ScrapeConfig{ + ScrapeInterval: model.Duration(2 * time.Second), + ScrapeTimeout: model.Duration(configTimeout), + Params: url.Values{"param": []string{expectedParam}}, + JobName: jobName, + Scheme: httpScheme, + MetricsPath: expectedPath, + }, + targets: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + {model.AddressLabel: model.LabelValue("")}, + }, + }, + }, + }, + { + name: "Overridden in target", + cfg: &config.ScrapeConfig{ + ScrapeInterval: model.Duration(2 * time.Second), + ScrapeTimeout: model.Duration(secondTimeout), + JobName: jobName, + Scheme: httpScheme, + MetricsPath: secondPath, + Params: url.Values{"param": []string{secondParam}}, + }, + targets: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + { + model.AddressLabel: model.LabelValue(""), + model.ScrapeTimeoutLabel: expectedTimeoutLabel, + model.MetricsPathLabel: expectedPath, + paramLabel: expectedParam, + }, + }, + }, + }, + }, + { + name: "Overridden in relabel_config", + cfg: &config.ScrapeConfig{ + ScrapeInterval: model.Duration(2 * time.Second), + ScrapeTimeout: model.Duration(secondTimeout), + JobName: jobName, + Scheme: httpScheme, + MetricsPath: secondPath, + Params: url.Values{"param": []string{secondParam}}, + RelabelConfigs: []*relabel.Config{ + { + Action: relabel.DefaultRelabelConfig.Action, + Regex: relabel.DefaultRelabelConfig.Regex, + SourceLabels: relabel.DefaultRelabelConfig.SourceLabels, + TargetLabel: model.ScrapeTimeoutLabel, + Replacement: expectedTimeoutLabel, + }, + { + Action: relabel.DefaultRelabelConfig.Action, + Regex: relabel.DefaultRelabelConfig.Regex, + SourceLabels: relabel.DefaultRelabelConfig.SourceLabels, + TargetLabel: paramLabel, + Replacement: expectedParam, + }, + { + Action: relabel.DefaultRelabelConfig.Action, + Regex: relabel.DefaultRelabelConfig.Regex, + SourceLabels: relabel.DefaultRelabelConfig.SourceLabels, + TargetLabel: model.MetricsPathLabel, + Replacement: expectedPath, + }, + }, + }, + targets: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + { + model.AddressLabel: model.LabelValue(""), + model.ScrapeTimeoutLabel: secondTimeoutLabel, + model.MetricsPathLabel: secondPath, + paramLabel: secondParam, + }, + }, + }, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + select { + case <-run(t, c.cfg, c.targets): + case <-time.After(10 * time.Second): + t.Fatal("timeout after 10 seconds") + } + }) + } +} diff --git a/scrape/target.go b/scrape/target.go index 9ef4471fbd..3754398338 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -441,8 +441,8 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort } // Encode scrape query parameters as labels. for k, v := range cfg.Params { - if len(v) > 0 { - lb.Set(model.ParamLabelPrefix+k, v[0]) + if name := model.ParamLabelPrefix + k; len(v) > 0 && lb.Get(name) == "" { + lb.Set(name, v[0]) } } From d18fa62ae9041c8a6bd327eb2068dcc6d4de3d3f Mon Sep 17 00:00:00 2001 From: machine424 Date: Fri, 30 Aug 2024 13:46:16 +0200 Subject: [PATCH 0441/1397] chore(discovery): enable new-service-discovery-manager by default and drop legacymanager package Signed-off-by: machine424 --- cmd/prometheus/main.go | 65 +- discovery/legacymanager/manager.go | 332 ------- discovery/legacymanager/manager_test.go | 1185 ----------------------- discovery/legacymanager/registry.go | 261 ----- docs/command-line/prometheus.md | 2 +- docs/feature_flags.md | 14 - 6 files changed, 13 insertions(+), 1846 deletions(-) delete mode 100644 discovery/legacymanager/manager.go delete mode 100644 discovery/legacymanager/manager_test.go delete mode 100644 discovery/legacymanager/registry.go diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index c0bd136fa8..fa953874ae 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -58,8 +58,6 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/legacymanager" - "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -159,7 +157,6 @@ type flagConfig struct { // These options are extracted from featureList // for ease of use. enableExpandExternalLabels bool - enableNewSDManager bool enablePerStepStats bool enableAutoGOMAXPROCS bool enableAutoGOMEMLIMIT bool @@ -197,9 +194,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "metadata-wal-records": c.scrape.AppendMetadata = true level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0") - case "new-service-discovery-manager": - c.enableNewSDManager = true - level.Info(logger).Log("msg", "Experimental service discovery manager") case "promql-per-step-stats": c.enablePerStepStats = true level.Info(logger).Log("msg", "Experimental per-step statistics reporting") @@ -463,7 +457,7 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) @@ -651,8 +645,8 @@ func main() { ctxScrape, cancelScrape = context.WithCancel(context.Background()) ctxNotify, cancelNotify = context.WithCancel(context.Background()) - discoveryManagerScrape discoveryManager - discoveryManagerNotify discoveryManager + discoveryManagerScrape *discovery.Manager + discoveryManagerNotify *discovery.Manager ) // Kubernetes client metrics are used by Kubernetes SD. @@ -672,42 +666,16 @@ func main() { os.Exit(1) } - if cfg.enableNewSDManager { - { - discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager scrape") - os.Exit(1) - } - discoveryManagerScrape = discMgr - } + discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape")) + if discoveryManagerScrape == nil { + level.Error(logger).Log("msg", "failed to create a discovery manager scrape") + os.Exit(1) + } - { - discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager notify") - os.Exit(1) - } - discoveryManagerNotify = discMgr - } - } else { - { - discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("scrape")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager scrape") - os.Exit(1) - } - discoveryManagerScrape = discMgr - } - - { - discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("notify")) - if discMgr == nil { - level.Error(logger).Log("msg", "failed to create a discovery manager notify") - os.Exit(1) - } - discoveryManagerNotify = discMgr - } + discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify")) + if discoveryManagerNotify == nil { + level.Error(logger).Log("msg", "failed to create a discovery manager notify") + os.Exit(1) } scrapeManager, err := scrape.NewManager( @@ -1765,15 +1733,6 @@ func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Option } } -// discoveryManager interfaces the discovery manager. This is used to keep using -// the manager that restarts SD's on reload for a few releases until we feel -// the new manager can be enabled for all users. -type discoveryManager interface { - ApplyConfig(cfg map[string]discovery.Configs) error - Run() error - SyncCh() <-chan map[string][]*targetgroup.Group -} - // rwProtoMsgFlagParser is a custom parser for config.RemoteWriteProtoMsg enum. type rwProtoMsgFlagParser struct { msgs *[]config.RemoteWriteProtoMsg diff --git a/discovery/legacymanager/manager.go b/discovery/legacymanager/manager.go deleted file mode 100644 index 6fc61485d1..0000000000 --- a/discovery/legacymanager/manager.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "context" - "fmt" - "reflect" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" -) - -type poolKey struct { - setName string - provider string -} - -// provider holds a Discoverer instance, its configuration and its subscribers. -type provider struct { - name string - d discovery.Discoverer - subs []string - config interface{} -} - -// NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]discovery.DiscovererMetrics, options ...func(*Manager)) *Manager { - if logger == nil { - logger = log.NewNopLogger() - } - mgr := &Manager{ - logger: logger, - syncCh: make(chan map[string][]*targetgroup.Group), - targets: make(map[poolKey]map[string]*targetgroup.Group), - discoverCancel: []context.CancelFunc{}, - ctx: ctx, - updatert: 5 * time.Second, - triggerSend: make(chan struct{}, 1), - registerer: registerer, - sdMetrics: sdMetrics, - } - for _, option := range options { - option(mgr) - } - - // Register the metrics. - // We have to do this after setting all options, so that the name of the Manager is set. - if metrics, err := discovery.NewManagerMetrics(registerer, mgr.name); err == nil { - mgr.metrics = metrics - } else { - level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) - return nil - } - - return mgr -} - -// Name sets the name of the manager. -func Name(n string) func(*Manager) { - return func(m *Manager) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.name = n - } -} - -// Manager maintains a set of discovery providers and sends each update to a map channel. -// Targets are grouped by the target set name. -type Manager struct { - logger log.Logger - name string - mtx sync.RWMutex - ctx context.Context - discoverCancel []context.CancelFunc - - // Some Discoverers(eg. k8s) send only the updates for a given target group - // so we use map[tg.Source]*targetgroup.Group to know which group to update. - targets map[poolKey]map[string]*targetgroup.Group - // providers keeps track of SD providers. - providers []*provider - // The sync channel sends the updates as a map where the key is the job value from the scrape config. - syncCh chan map[string][]*targetgroup.Group - - // How long to wait before sending updates to the channel. The variable - // should only be modified in unit tests. - updatert time.Duration - - // The triggerSend channel signals to the manager that new updates have been received from providers. - triggerSend chan struct{} - - // A registerer for all service discovery metrics. - registerer prometheus.Registerer - - metrics *discovery.Metrics - sdMetrics map[string]discovery.DiscovererMetrics -} - -// Run starts the background processing. -func (m *Manager) Run() error { - go m.sender() - <-m.ctx.Done() - m.cancelDiscoverers() - return m.ctx.Err() -} - -// SyncCh returns a read only channel used by all the clients to receive target updates. -func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group { - return m.syncCh -} - -// ApplyConfig removes all running discovery providers and starts new ones using the provided config. -func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error { - m.mtx.Lock() - defer m.mtx.Unlock() - - for pk := range m.targets { - if _, ok := cfg[pk.setName]; !ok { - m.metrics.DiscoveredTargets.DeleteLabelValues(m.name, pk.setName) - } - } - m.cancelDiscoverers() - m.targets = make(map[poolKey]map[string]*targetgroup.Group) - m.providers = nil - m.discoverCancel = nil - - failedCount := 0 - for name, scfg := range cfg { - failedCount += m.registerProviders(scfg, name) - m.metrics.DiscoveredTargets.WithLabelValues(name).Set(0) - } - m.metrics.FailedConfigs.Set(float64(failedCount)) - - for _, prov := range m.providers { - m.startProvider(m.ctx, prov) - } - - return nil -} - -// StartCustomProvider is used for sdtool. Only use this if you know what you're doing. -func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker discovery.Discoverer) { - p := &provider{ - name: name, - d: worker, - subs: []string{name}, - } - m.providers = append(m.providers, p) - m.startProvider(ctx, p) -} - -func (m *Manager) startProvider(ctx context.Context, p *provider) { - level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) - ctx, cancel := context.WithCancel(ctx) - updates := make(chan []*targetgroup.Group) - - m.discoverCancel = append(m.discoverCancel, cancel) - - go p.d.Run(ctx, updates) - go m.updater(ctx, p, updates) -} - -func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) { - for { - select { - case <-ctx.Done(): - return - case tgs, ok := <-updates: - m.metrics.ReceivedUpdates.Inc() - if !ok { - level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) - return - } - - for _, s := range p.subs { - m.updateGroup(poolKey{setName: s, provider: p.name}, tgs) - } - - select { - case m.triggerSend <- struct{}{}: - default: - } - } - } -} - -func (m *Manager) sender() { - ticker := time.NewTicker(m.updatert) - defer ticker.Stop() - - for { - select { - case <-m.ctx.Done(): - return - case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker. - select { - case <-m.triggerSend: - m.metrics.SentUpdates.Inc() - select { - case m.syncCh <- m.allGroups(): - default: - m.metrics.DelayedUpdates.Inc() - level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") - select { - case m.triggerSend <- struct{}{}: - default: - } - } - default: - } - } - } -} - -func (m *Manager) cancelDiscoverers() { - for _, c := range m.discoverCancel { - c() - } -} - -func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if _, ok := m.targets[poolKey]; !ok { - m.targets[poolKey] = make(map[string]*targetgroup.Group) - } - for _, tg := range tgs { - if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. - m.targets[poolKey][tg.Source] = tg - } - } -} - -func (m *Manager) allGroups() map[string][]*targetgroup.Group { - m.mtx.RLock() - defer m.mtx.RUnlock() - - tSets := map[string][]*targetgroup.Group{} - n := map[string]int{} - for pkey, tsets := range m.targets { - for _, tg := range tsets { - // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' - // to signal that it needs to stop all scrape loops for this target set. - tSets[pkey.setName] = append(tSets[pkey.setName], tg) - n[pkey.setName] += len(tg.Targets) - } - } - for setName, v := range n { - m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v)) - } - return tSets -} - -// registerProviders returns a number of failed SD config. -func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int { - var ( - failed int - added bool - ) - add := func(cfg discovery.Config) { - for _, p := range m.providers { - if reflect.DeepEqual(cfg, p.config) { - p.subs = append(p.subs, setName) - added = true - return - } - } - typ := cfg.Name() - d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ, "config", setName), - Metrics: m.sdMetrics[typ], - }) - if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) - failed++ - return - } - m.providers = append(m.providers, &provider{ - name: fmt.Sprintf("%s/%d", typ, len(m.providers)), - d: d, - config: cfg, - subs: []string{setName}, - }) - added = true - } - for _, cfg := range cfgs { - add(cfg) - } - if !added { - // Add an empty target group to force the refresh of the corresponding - // scrape pool and to notify the receiver that this target set has no - // current targets. - // It can happen because the combined set of SD configurations is empty - // or because we fail to instantiate all the SD configurations. - add(discovery.StaticConfig{{}}) - } - return failed -} - -// StaticProvider holds a list of target groups that never change. -type StaticProvider struct { - TargetGroups []*targetgroup.Group -} - -// Run implements the Worker interface. -func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { - // We still have to consider that the consumer exits right away in which case - // the context will be canceled. - select { - case ch <- sd.TargetGroups: - case <-ctx.Done(): - } - close(ch) -} diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go deleted file mode 100644 index f1be963113..0000000000 --- a/discovery/legacymanager/manager_test.go +++ /dev/null @@ -1,1185 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "context" - "fmt" - "sort" - "strconv" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" - client_testutil "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/prometheus/common/model" - "github.com/stretchr/testify/require" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/prometheus/prometheus/util/testutil" -) - -func TestMain(m *testing.M) { - testutil.TolerantVerifyLeak(m) -} - -func newTestMetrics(t *testing.T, reg prometheus.Registerer) (*discovery.RefreshMetricsManager, map[string]discovery.DiscovererMetrics) { - refreshMetrics := discovery.NewRefreshMetrics(reg) - sdMetrics, err := discovery.RegisterSDMetrics(reg, refreshMetrics) - require.NoError(t, err) - return &refreshMetrics, sdMetrics -} - -// TestTargetUpdatesOrder checks that the target updates are received in the expected order. -func TestTargetUpdatesOrder(t *testing.T) { - // The order by which the updates are send is determined by the interval passed to the mock discovery adapter - // Final targets array is ordered alphabetically by the name of the discoverer. - // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. - testCases := []struct { - title string - updates map[string][]update - expectedTargets [][]*targetgroup.Group - }{ - { - title: "Single TP no updates", - updates: map[string][]update{ - "tp1": {}, - }, - expectedTargets: nil, - }, - { - title: "Multiple TPs no updates", - updates: map[string][]update{ - "tp1": {}, - "tp2": {}, - "tp3": {}, - }, - expectedTargets: nil, - }, - { - title: "Single TP empty initials", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{}, - interval: 5 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - {}, - }, - }, - { - title: "Multiple TPs empty initials", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{}, - interval: 5 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{}, - interval: 200 * time.Millisecond, - }, - }, - "tp3": { - { - targetGroups: []targetgroup.Group{}, - interval: 100 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - {}, - {}, - {}, - }, - }, - { - title: "Single TP initials only", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - { - title: "Multiple TPs initials only", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - }, - }, - }, - { - title: "Single TP initials followed by empty updates", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 0, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - }, - }, - { - title: "Single TP initials and new groups", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 0, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - title: "Multiple TPs initials and new groups", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group4", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 500 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - interval: 100 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - { - Source: "tp1_group3", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group4", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - { - Source: "tp2_group3", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group4", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - }, - }, - { - title: "One TP initials arrive after other TP updates.", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 150 * time.Millisecond, - }, - }, - "tp2": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - interval: 200 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - interval: 100 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "5"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "6"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - { - Source: "tp2_group1", - Targets: []model.LabelSet{{"__instance__": "7"}}, - }, - { - Source: "tp2_group2", - Targets: []model.LabelSet{{"__instance__": "8"}}, - }, - }, - }, - }, - - { - title: "Single TP empty update in between", - updates: map[string][]update{ - "tp1": { - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - interval: 30 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - interval: 10 * time.Millisecond, - }, - { - targetGroups: []targetgroup.Group{ - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - interval: 300 * time.Millisecond, - }, - }, - }, - expectedTargets: [][]*targetgroup.Group{ - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{}, - }, - }, - { - { - Source: "tp1_group1", - Targets: []model.LabelSet{{"__instance__": "3"}}, - }, - { - Source: "tp1_group2", - Targets: []model.LabelSet{{"__instance__": "4"}}, - }, - }, - }, - }, - } - - for i, tc := range testCases { - tc := tc - t.Run(tc.title, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - - var totalUpdatesCount int - for _, up := range tc.updates { - if len(up) > 0 { - totalUpdatesCount += len(up) - } - } - provUpdates := make(chan []*targetgroup.Group, totalUpdatesCount) - - for _, up := range tc.updates { - go newMockDiscoveryProvider(up...).Run(ctx, provUpdates) - } - - for x := 0; x < totalUpdatesCount; x++ { - select { - case <-ctx.Done(): - t.Fatalf("%d: no update arrived within the timeout limit", x) - case tgs := <-provUpdates: - discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) - for _, got := range discoveryManager.allGroups() { - assertEqualGroups(t, got, tc.expectedTargets[x]) - } - } - } - }) - } -} - -func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) { - t.Helper() - - // Need to sort by the groups's source as the received order is not guaranteed. - sort.Sort(byGroupSource(got)) - sort.Sort(byGroupSource(expected)) - - require.Equal(t, expected, got) -} - -func staticConfig(addrs ...string) discovery.StaticConfig { - var cfg discovery.StaticConfig - for i, addr := range addrs { - cfg = append(cfg, &targetgroup.Group{ - Source: strconv.Itoa(i), - Targets: []model.LabelSet{ - {model.AddressLabel: model.LabelValue(addr)}, - }, - }) - } - return cfg -} - -func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Group, poolKey poolKey, label string, present bool) { - t.Helper() - if _, ok := tSets[poolKey]; !ok { - t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets) - } - - match := false - var mergedTargets string - for _, targetGroup := range tSets[poolKey] { - for _, l := range targetGroup.Targets { - mergedTargets = mergedTargets + " " + l.String() - if l.String() == label { - match = true - } - } - } - if match != present { - msg := "" - if !present { - msg = "not" - } - t.Fatalf("%q should %s be present in Targets labels: %q", label, msg, mergedTargets) - } -} - -func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090", "bar:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) - - c["prometheus"] = discovery.Configs{ - staticConfig("foo:9090"), - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", false) -} - -func TestDiscovererConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090", "bar:9090"), - staticConfig("baz:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"bar:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/1"}, "{__address__=\"baz:9090\"}", true) -} - -// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after -// removing all targets from the static_configs sends an update with empty targetGroups. -// This is required to signal the receiver that this target set has no current targets. -func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - - c["prometheus"] = discovery.Configs{ - discovery.StaticConfig{{}}, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - - pkey := poolKey{setName: "prometheus", provider: "static/0"} - targetGroups, ok := discoveryManager.targets[pkey] - if !ok { - t.Fatalf("'%v' should be present in target groups", pkey) - } - group, ok := targetGroups[""] - if !ok { - t.Fatalf("missing '' key in target groups %v", targetGroups) - } - - if len(group.Targets) != 0 { - t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) - } -} - -func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, nil, reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - staticConfig("foo:9090"), - }, - "prometheus2": { - staticConfig("foo:9090"), - }, - } - discoveryManager.ApplyConfig(c) - - <-discoveryManager.SyncCh() - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - verifyPresence(t, discoveryManager.targets, poolKey{setName: "prometheus2", provider: "static/0"}, "{__address__=\"foo:9090\"}", true) - if len(discoveryManager.providers) != 1 { - t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers)) - } -} - -func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { - originalConfig := discovery.Configs{ - staticConfig("foo:9090", "bar:9090", "baz:9090"), - } - processedConfig := discovery.Configs{ - staticConfig("foo:9090", "bar:9090", "baz:9090"), - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - cfgs := map[string]discovery.Configs{ - "prometheus": processedConfig, - } - discoveryManager.ApplyConfig(cfgs) - <-discoveryManager.SyncCh() - - for _, cfg := range cfgs { - require.Equal(t, originalConfig, cfg) - } -} - -type errorConfig struct{ err error } - -func (e errorConfig) Name() string { return "error" } -func (e errorConfig) NewDiscoverer(discovery.DiscovererOptions) (discovery.Discoverer, error) { - return nil, e.err -} - -// NewDiscovererMetrics implements discovery.Config. -func (errorConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { - return &discovery.NoopDiscovererMetrics{} -} - -func TestGaugeFailedConfigs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) - require.NotNil(t, discoveryManager) - discoveryManager.updatert = 100 * time.Millisecond - go discoveryManager.Run() - - c := map[string]discovery.Configs{ - "prometheus": { - errorConfig{fmt.Errorf("tests error 0")}, - errorConfig{fmt.Errorf("tests error 1")}, - errorConfig{fmt.Errorf("tests error 2")}, - }, - } - discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - - failedCount := client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs) - if failedCount != 3 { - t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount) - } - - c["prometheus"] = discovery.Configs{ - staticConfig("foo:9090"), - } - discoveryManager.ApplyConfig(c) - <-discoveryManager.SyncCh() - - failedCount = client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs) - if failedCount != 0 { - t.Fatalf("Expected to get no failed config, got: %v", failedCount) - } -} - -func TestCoordinationWithReceiver(t *testing.T) { - updateDelay := 100 * time.Millisecond - - type expect struct { - delay time.Duration - tgs map[string][]*targetgroup.Group - } - - testCases := []struct { - title string - providers map[string]discovery.Discoverer - expected []expect - }{ - { - title: "Receiver should get all updates even when one provider closes its channel", - providers: map[string]discovery.Discoverer{ - "once1": &onceProvider{ - tgs: []*targetgroup.Group{ - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - "mock1": newMockDiscoveryProvider( - update{ - interval: 2 * updateDelay, - targetGroups: []targetgroup.Group{ - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - ), - }, - expected: []expect{ - { - tgs: map[string][]*targetgroup.Group{ - "once1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - tgs: map[string][]*targetgroup.Group{ - "once1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - "mock1": { - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - }, - { - title: "Receiver should get all updates even when the channel is blocked", - providers: map[string]discovery.Discoverer{ - "mock1": newMockDiscoveryProvider( - update{ - targetGroups: []targetgroup.Group{ - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - update{ - interval: 4 * updateDelay, - targetGroups: []targetgroup.Group{ - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - ), - }, - expected: []expect{ - { - delay: 2 * updateDelay, - tgs: map[string][]*targetgroup.Group{ - "mock1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - }, - }, - }, - { - delay: 4 * updateDelay, - tgs: map[string][]*targetgroup.Group{ - "mock1": { - { - Source: "tg1", - Targets: []model.LabelSet{{"__instance__": "1"}}, - }, - { - Source: "tg2", - Targets: []model.LabelSet{{"__instance__": "2"}}, - }, - }, - }, - }, - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.title, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - reg := prometheus.NewRegistry() - _, sdMetrics := newTestMetrics(t, reg) - - mgr := NewManager(ctx, nil, reg, sdMetrics) - require.NotNil(t, mgr) - mgr.updatert = updateDelay - go mgr.Run() - - for name, p := range tc.providers { - mgr.StartCustomProvider(ctx, name, p) - } - - for i, expected := range tc.expected { - time.Sleep(expected.delay) - select { - case <-ctx.Done(): - t.Fatalf("step %d: no update received in the expected timeframe", i) - case tgs, ok := <-mgr.SyncCh(): - if !ok { - t.Fatalf("step %d: discovery manager channel is closed", i) - } - if len(tgs) != len(expected.tgs) { - t.Fatalf("step %d: target groups mismatch, got: %d, expected: %d\ngot: %#v\nexpected: %#v", - i, len(tgs), len(expected.tgs), tgs, expected.tgs) - } - for k := range expected.tgs { - if _, ok := tgs[k]; !ok { - t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) - } - assertEqualGroups(t, tgs[k], expected.tgs[k]) - } - } - } - }) - } -} - -type update struct { - targetGroups []targetgroup.Group - interval time.Duration -} - -type mockdiscoveryProvider struct { - updates []update -} - -func newMockDiscoveryProvider(updates ...update) mockdiscoveryProvider { - tp := mockdiscoveryProvider{ - updates: updates, - } - return tp -} - -func (tp mockdiscoveryProvider) Run(ctx context.Context, upCh chan<- []*targetgroup.Group) { - for _, u := range tp.updates { - if u.interval > 0 { - select { - case <-ctx.Done(): - return - case <-time.After(u.interval): - } - } - tgs := make([]*targetgroup.Group, len(u.targetGroups)) - for i := range u.targetGroups { - tgs[i] = &u.targetGroups[i] - } - upCh <- tgs - } - <-ctx.Done() -} - -// byGroupSource implements sort.Interface so we can sort by the Source field. -type byGroupSource []*targetgroup.Group - -func (a byGroupSource) Len() int { return len(a) } -func (a byGroupSource) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byGroupSource) Less(i, j int) bool { return a[i].Source < a[j].Source } - -// onceProvider sends updates once (if any) and closes the update channel. -type onceProvider struct { - tgs []*targetgroup.Group -} - -func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) { - if len(o.tgs) > 0 { - ch <- o.tgs - } - close(ch) -} diff --git a/discovery/legacymanager/registry.go b/discovery/legacymanager/registry.go deleted file mode 100644 index 955705394d..0000000000 --- a/discovery/legacymanager/registry.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacymanager - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - "sync" - - "gopkg.in/yaml.v2" - - "github.com/prometheus/prometheus/discovery" - "github.com/prometheus/prometheus/discovery/targetgroup" -) - -const ( - configFieldPrefix = "AUTO_DISCOVERY_" - staticConfigsKey = "static_configs" - staticConfigsFieldName = configFieldPrefix + staticConfigsKey -) - -var ( - configNames = make(map[string]discovery.Config) - configFieldNames = make(map[reflect.Type]string) - configFields []reflect.StructField - - configTypesMu sync.Mutex - configTypes = make(map[reflect.Type]reflect.Type) - - emptyStructType = reflect.TypeOf(struct{}{}) - configsType = reflect.TypeOf(discovery.Configs{}) -) - -// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling. -func RegisterConfig(config discovery.Config) { - registerConfig(config.Name()+"_sd_configs", reflect.TypeOf(config), config) -} - -func init() { - // N.B.: static_configs is the only Config type implemented by default. - // All other types are registered at init by their implementing packages. - elemTyp := reflect.TypeOf(&targetgroup.Group{}) - registerConfig(staticConfigsKey, elemTyp, discovery.StaticConfig{}) -} - -func registerConfig(yamlKey string, elemType reflect.Type, config discovery.Config) { - name := config.Name() - if _, ok := configNames[name]; ok { - panic(fmt.Sprintf("discovery: Config named %q is already registered", name)) - } - configNames[name] = config - - fieldName := configFieldPrefix + yamlKey // Field must be exported. - configFieldNames[elemType] = fieldName - - // Insert fields in sorted order. - i := sort.Search(len(configFields), func(k int) bool { - return fieldName < configFields[k].Name - }) - configFields = append(configFields, reflect.StructField{}) // Add empty field at end. - copy(configFields[i+1:], configFields[i:]) // Shift fields to the right. - configFields[i] = reflect.StructField{ // Write new field in place. - Name: fieldName, - Type: reflect.SliceOf(elemType), - Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`), - } -} - -func getConfigType(out reflect.Type) reflect.Type { - configTypesMu.Lock() - defer configTypesMu.Unlock() - if typ, ok := configTypes[out]; ok { - return typ - } - // Initial exported fields map one-to-one. - var fields []reflect.StructField - for i, n := 0, out.NumField(); i < n; i++ { - switch field := out.Field(i); { - case field.PkgPath == "" && field.Type != configsType: - fields = append(fields, field) - default: - fields = append(fields, reflect.StructField{ - Name: "_" + field.Name, // Field must be unexported. - PkgPath: out.PkgPath(), - Type: emptyStructType, - }) - } - } - // Append extra config fields on the end. - fields = append(fields, configFields...) - typ := reflect.StructOf(fields) - configTypes[out] = typ - return typ -} - -// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs -// that have a Configs field that should be inlined. -func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error { - outVal := reflect.ValueOf(out) - if outVal.Kind() != reflect.Ptr { - return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) - } - outVal = outVal.Elem() - if outVal.Kind() != reflect.Struct { - return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) - } - outTyp := outVal.Type() - - cfgTyp := getConfigType(outTyp) - cfgPtr := reflect.New(cfgTyp) - cfgVal := cfgPtr.Elem() - - // Copy shared fields (defaults) to dynamic value. - var configs *discovery.Configs - for i, n := 0, outVal.NumField(); i < n; i++ { - if outTyp.Field(i).Type == configsType { - configs = outVal.Field(i).Addr().Interface().(*discovery.Configs) - continue - } - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - cfgVal.Field(i).Set(outVal.Field(i)) - } - if configs == nil { - return fmt.Errorf("discovery: Configs field not found in type: %T", out) - } - - // Unmarshal into dynamic value. - if err := unmarshal(cfgPtr.Interface()); err != nil { - return replaceYAMLTypeError(err, cfgTyp, outTyp) - } - - // Copy shared fields from dynamic value. - for i, n := 0, outVal.NumField(); i < n; i++ { - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - outVal.Field(i).Set(cfgVal.Field(i)) - } - - var err error - *configs, err = readConfigs(cfgVal, outVal.NumField()) - return err -} - -func readConfigs(structVal reflect.Value, startField int) (discovery.Configs, error) { - var ( - configs discovery.Configs - targets []*targetgroup.Group - ) - for i, n := startField, structVal.NumField(); i < n; i++ { - field := structVal.Field(i) - if field.Kind() != reflect.Slice { - panic("discovery: internal error: field is not a slice") - } - for k := 0; k < field.Len(); k++ { - val := field.Index(k) - if val.IsZero() || (val.Kind() == reflect.Ptr && val.Elem().IsZero()) { - key := configFieldNames[field.Type().Elem()] - key = strings.TrimPrefix(key, configFieldPrefix) - return nil, fmt.Errorf("empty or null section in %s", key) - } - switch c := val.Interface().(type) { - case *targetgroup.Group: - // Add index to the static config target groups for unique identification - // within scrape pool. - c.Source = strconv.Itoa(len(targets)) - // Coalesce multiple static configs into a single static config. - targets = append(targets, c) - case discovery.Config: - configs = append(configs, c) - default: - panic("discovery: internal error: slice element is not a Config") - } - } - } - if len(targets) > 0 { - configs = append(configs, discovery.StaticConfig(targets)) - } - return configs, nil -} - -// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs -// that have a Configs field that should be inlined. -func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) { - inVal := reflect.ValueOf(in) - for inVal.Kind() == reflect.Ptr { - inVal = inVal.Elem() - } - inTyp := inVal.Type() - - cfgTyp := getConfigType(inTyp) - cfgPtr := reflect.New(cfgTyp) - cfgVal := cfgPtr.Elem() - - // Copy shared fields to dynamic value. - var configs *discovery.Configs - for i, n := 0, inTyp.NumField(); i < n; i++ { - if inTyp.Field(i).Type == configsType { - configs = inVal.Field(i).Addr().Interface().(*discovery.Configs) - } - if cfgTyp.Field(i).PkgPath != "" { - continue // Field is unexported: ignore. - } - cfgVal.Field(i).Set(inVal.Field(i)) - } - if configs == nil { - return nil, fmt.Errorf("discovery: Configs field not found in type: %T", in) - } - - if err := writeConfigs(cfgVal, *configs); err != nil { - return nil, err - } - - return cfgPtr.Interface(), nil -} - -func writeConfigs(structVal reflect.Value, configs discovery.Configs) error { - targets := structVal.FieldByName(staticConfigsFieldName).Addr().Interface().(*[]*targetgroup.Group) - for _, c := range configs { - if sc, ok := c.(discovery.StaticConfig); ok { - *targets = append(*targets, sc...) - continue - } - fieldName, ok := configFieldNames[reflect.TypeOf(c)] - if !ok { - return fmt.Errorf("discovery: cannot marshal unregistered Config type: %T", c) - } - field := structVal.FieldByName(fieldName) - field.Set(reflect.Append(field, reflect.ValueOf(c))) - } - return nil -} - -func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { - var e *yaml.TypeError - if errors.As(err, &e) { - oldStr := oldTyp.String() - newStr := newTyp.String() - for i, s := range e.Errors { - e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr) - } - } - return err -} diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index d637312a30..f16fcbd7b5 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -55,7 +55,7 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | --agent | Run Prometheus in 'Agent mode'. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 1a8908548c..f027ffce6c 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -47,20 +47,6 @@ When enabled, for each instance scrape, Prometheus stores a sample in the follow to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`. - `scrape_body_size_bytes`. The uncompressed size of the most recent scrape response, if successful. Scrapes failing because `body_size_limit` is exceeded report `-1`, other scrape failures report `0`. -## New service discovery manager - -`--enable-feature=new-service-discovery-manager` - -When enabled, Prometheus uses a new service discovery manager that does not -restart unchanged discoveries upon reloading. This makes reloads faster and reduces -pressure on service discoveries' sources. - -Users are encouraged to test the new service discovery manager and report any -issues upstream. - -In future releases, this new service discovery manager will become the default and -this feature flag will be ignored. - ## Prometheus agent `--enable-feature=agent` From 1e01e97151c9db59178dfbabd5e60f4b3916cb83 Mon Sep 17 00:00:00 2001 From: machine424 Date: Tue, 20 Aug 2024 14:21:42 +0200 Subject: [PATCH 0442/1397] chore: update min go version to 1.22 and update to the 1.23 toolchain enforce the go version in test_go_oldest. Signed-off-by: machine424 --- .github/workflows/ci.yml | 19 +++++++++++-------- .promu.yml | 2 +- documentation/examples/remote_storage/go.mod | 2 +- go.mod | 4 ++-- scripts/golangci-lint.yml | 2 +- 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5934a3dafe..98d3d9a754 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,7 +11,7 @@ jobs: container: # Whenever the Go version is updated here, .promu.yml # should also be updated. - image: quay.io/prometheus/golang-builder:1.22-base + image: quay.io/prometheus/golang-builder:1.23-base steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 @@ -25,7 +25,7 @@ jobs: name: More Go tests runs-on: ubuntu-latest container: - image: quay.io/prometheus/golang-builder:1.22-base + image: quay.io/prometheus/golang-builder:1.23-base steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 @@ -39,9 +39,12 @@ jobs: test_go_oldest: name: Go tests with previous Go version runs-on: ubuntu-latest + env: + # Enforce the Go version. + GOTOOLCHAIN: local container: # The go version in this image should be N-1 wrt test_go. - image: quay.io/prometheus/golang-builder:1.21-base + image: quay.io/prometheus/golang-builder:1.22-base steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - run: make build @@ -54,7 +57,7 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. container: - image: quay.io/prometheus/golang-builder:1.22-base + image: quay.io/prometheus/golang-builder:1.23-base steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 @@ -77,7 +80,7 @@ jobs: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: 1.22.x + go-version: 1.23.x - run: | $TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"} go test $TestTargets -vet=off -v @@ -89,7 +92,7 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. container: - image: quay.io/prometheus/golang-builder:1.22-base + image: quay.io/prometheus/golang-builder:1.23-base steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - run: go install ./cmd/promtool/. @@ -165,7 +168,7 @@ jobs: uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: cache: false - go-version: 1.22.x + go-version: 1.23.x - name: Run goyacc and check for diff run: make install-goyacc check-generated-parser golangci: @@ -177,7 +180,7 @@ jobs: - name: Install Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: 1.22.x + go-version: 1.23.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' diff --git a/.promu.yml b/.promu.yml index 0aa51d6d31..699a7fa585 100644 --- a/.promu.yml +++ b/.promu.yml @@ -1,7 +1,7 @@ go: # Whenever the Go version is updated here, # .github/workflows should also be updated. - version: 1.22 + version: 1.23 repository: path: github.com/prometheus/prometheus build: diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 22a292db0f..7fa830c7ca 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -1,6 +1,6 @@ module github.com/prometheus/prometheus/documentation/examples/remote_storage -go 1.21.0 +go 1.22.0 require ( github.com/alecthomas/kingpin/v2 v2.4.0 diff --git a/go.mod b/go.mod index 83f3246f35..ebb4ae9a15 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/prometheus/prometheus -go 1.21.0 +go 1.22.0 -toolchain go1.22.5 +toolchain go1.23.0 require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index f4a7385bb5..a15cfc97f0 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -28,7 +28,7 @@ jobs: - name: Install Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: 1.22.x + go-version: 1.23.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' From a7f7e90167b1ac3bb07d1301f4f6b17e5cb17881 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 10:51:06 +0000 Subject: [PATCH 0443/1397] Bump github.com/linode/linodego from 1.38.0 to 1.40.0 Bumps [github.com/linode/linodego](https://github.com/linode/linodego) from 1.38.0 to 1.40.0. - [Release notes](https://github.com/linode/linodego/releases) - [Commits](https://github.com/linode/linodego/compare/v1.38.0...v1.40.0) --- updated-dependencies: - dependency-name: github.com/linode/linodego dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ebb4ae9a15..0fc834af28 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.9 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.38.0 + github.com/linode/linodego v1.40.0 github.com/miekg/dns v1.1.61 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f diff --git a/go.sum b/go.sum index 8d18e20049..56be678a9a 100644 --- a/go.sum +++ b/go.sum @@ -472,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.38.0 h1:wP3oW9OhGc6vhze8NPf2knbwH4TzSbrjzuCd9okjbTY= -github.com/linode/linodego v1.38.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ= +github.com/linode/linodego v1.40.0 h1:7ESY0PwK94hoggoCtIroT1Xk6b1flrFBNZ6KwqbTqlI= +github.com/linode/linodego v1.40.0/go.mod h1:NsUw4l8QrLdIofRg1NYFBbW5ZERnmbZykVBszPZLORM= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= From be3597c27295364d4592051bd1bbf7cb7ae51210 Mon Sep 17 00:00:00 2001 From: Julien <291750+roidelapluie@users.noreply.github.com> Date: Thu, 5 Sep 2024 13:50:20 +0200 Subject: [PATCH 0444/1397] Update autoreload documentation: recommend writing files atomically (#14836) Signed-off-by: Julien --- docs/feature_flags.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 1ef9efb9b1..51c2a9b314 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -276,4 +276,5 @@ specified interval. The interval is defined by the Configuration reloads are triggered by detecting changes in the checksum of the main configuration file or any referenced files, such as rule and scrape -configurations. +configurations. To ensure consistency and avoid issues during reloads, it's +recommended to update these files atomically. From 331a8e1d6c24642b4152daf90848bb003ad9715f Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Thu, 5 Sep 2024 14:44:45 +0200 Subject: [PATCH 0445/1397] Use infinite scroll in metrics explorer for faster rendering Signed-off-by: Julius Volz --- .../query/MetricsExplorer/MetricsExplorer.tsx | 184 +++++++++--------- 1 file changed, 95 insertions(+), 89 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/MetricsExplorer/MetricsExplorer.tsx b/web/ui/mantine-ui/src/pages/query/MetricsExplorer/MetricsExplorer.tsx index 091b3cb746..a50e913e51 100644 --- a/web/ui/mantine-ui/src/pages/query/MetricsExplorer/MetricsExplorer.tsx +++ b/web/ui/mantine-ui/src/pages/query/MetricsExplorer/MetricsExplorer.tsx @@ -9,6 +9,7 @@ import { IconCodePlus, IconCopy, IconZoomCode } from "@tabler/icons-react"; import LabelsExplorer from "./LabelsExplorer"; import { useDebouncedValue } from "@mantine/hooks"; import classes from "./MetricsExplorer.module.css"; +import CustomInfiniteScroll from "../../../components/CustomInfiniteScroll"; const fuz = new Fuzzy({ pre: '', @@ -83,95 +84,100 @@ const MetricsExplorer: FC = ({ } autoFocus /> - - - - Metric - Type - Help - - - - {searchMatches.map((m) => ( - - - - {debouncedFilterText === "" ? ( - m.original - ) : ( -
- )} - - { - setSelectedMetric(m.original); - }} - > - - - { - insertText(m.original); - close(); - }} - > - - - { - navigator.clipboard.writeText(m.original); - }} - > - - - - - - - {getMeta(m.original).map((meta, idx) => ( - - {meta.type} -
-
- ))} -
- - {getMeta(m.original).map((meta, idx) => ( - - {meta.help} -
-
- ))} -
- - ))} - -
+ ( + + + + Metric + Type + Help + + + + {items.map((m) => ( + + + + {debouncedFilterText === "" ? ( + m.original + ) : ( +
+ )} + + { + setSelectedMetric(m.original); + }} + > + + + { + insertText(m.original); + close(); + }} + > + + + { + navigator.clipboard.writeText(m.original); + }} + > + + + + + + + {getMeta(m.original).map((meta, idx) => ( + + {meta.type} +
+
+ ))} +
+ + {getMeta(m.original).map((meta, idx) => ( + + {meta.help} +
+
+ ))} +
+ + ))} + +
+ )} + /> ); }; From 2d9f79f59e0912bf8c2c18e1c7318eec28ed75f6 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Thu, 5 Sep 2024 14:45:09 +0200 Subject: [PATCH 0446/1397] Show label stats in tree view Signed-off-by: Julius Volz --- .../mantine-ui/src/pages/query/TreeNode.tsx | 134 +++++++++++++----- 1 file changed, 99 insertions(+), 35 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx index 5e73d040c0..5be9efd849 100644 --- a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx +++ b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx @@ -7,9 +7,18 @@ import { useState, } from "react"; import ASTNode, { nodeType } from "../../promql/ast"; -import { getNodeChildren } from "../../promql/utils"; +import { escapeString, getNodeChildren } from "../../promql/utils"; import { formatNode } from "../../promql/format"; -import { Box, CSSProperties, Group, Loader, Text } from "@mantine/core"; +import { + Box, + Code, + CSSProperties, + Group, + List, + Loader, + Text, + Tooltip, +} from "@mantine/core"; import { useAPIQuery } from "../../api/api"; import { InstantQueryResult, @@ -21,8 +30,10 @@ import { IconPointFilled } from "@tabler/icons-react"; import classes from "./TreeNode.module.css"; import clsx from "clsx"; import { useId } from "@mantine/hooks"; +import { functionSignatures } from "../../promql/functionSignatures"; const nodeIndent = 20; +const maxLabels = 10; type NodeState = "waiting" | "running" | "error" | "success"; @@ -68,12 +79,12 @@ const TreeNode: FC<{ const [responseTime, setResponseTime] = useState(0); const [resultStats, setResultStats] = useState<{ numSeries: number; - labelCardinalities: Record; labelExamples: Record; + sortedLabelCards: [string, number][]; }>({ numSeries: 0, - labelCardinalities: {}, labelExamples: {}, + sortedLabelCards: [], }); const children = getNodeChildren(node); @@ -86,11 +97,24 @@ const TreeNode: FC<{ [childStates] ); + // Optimize range vector selector fetches to give us the info we're looking for + // more cheaply. E.g. 'foo[7w]' can be expensive to fully fetch, but wrapping it + // in 'last_over_time(foo[7w])' is cheaper and also gives us all the info we + // need (number of series and labels). + let queryNode = node; + if (queryNode.type === nodeType.matrixSelector) { + queryNode = { + type: nodeType.call, + func: functionSignatures["last_over_time"], + args: [node], + }; + } + const { data, error, isFetching } = useAPIQuery({ key: [useId()], path: "/query", params: { - query: serializeNode(node), + query: serializeNode(queryNode), }, recordResponseTime: setResponseTime, enabled: mergedChildState === "success", @@ -145,7 +169,7 @@ const TreeNode: FC<{ borderTopLeftRadius: undefined, })); } - }, [parentRef, reverse, nodeRef]); + }, [parentRef, reverse, nodeRef, setConnectorStyle]); // Update the node info state based on the query result. useEffect(() => { @@ -166,17 +190,12 @@ const TreeNode: FC<{ result.forEach((s: InstantSample | RangeSamples) => { Object.entries(s.metric).forEach(([ln, lv]) => { // TODO: If we ever want to include __name__ here again, we cannot use the - // count_over_time(foo[7d]) optimization since that removes the metric name. + // last_over_time(foo[7d]) optimization since that removes the metric name. if (ln !== "__name__") { - if (!labelValuesByName.hasOwnProperty(ln)) { - labelValuesByName[ln] = { [lv]: 1 }; - } else { - if (!labelValuesByName[ln].hasOwnProperty(lv)) { - labelValuesByName[ln][lv] = 1; - } else { - labelValuesByName[ln][lv]++; - } + if (!labelValuesByName[ln]) { + labelValuesByName[ln] = {}; } + labelValuesByName[ln][lv] = (labelValuesByName[ln][lv] || 0) + 1; } }); }); @@ -196,7 +215,9 @@ const TreeNode: FC<{ setResultStats({ numSeries: resultSeries, - labelCardinalities, + sortedLabelCards: Object.entries(labelCardinalities).sort( + (a, b) => b[1] - a[1] + ), labelExamples, }); }, [data]); @@ -204,7 +225,7 @@ const TreeNode: FC<{ const innerNode = ( ) : ( - - {resultStats.numSeries} result{resultStats.numSeries !== 1 && "s"} –{" "} - {responseTime}ms - {/* {resultStats.numSeries > 0 && ( - <> - {labelNames.length > 0 ? ( - labelNames.map((v, idx) => ( - - {idx !== 0 && ", "} - {v} - - )) - ) : ( - <>no labels - )} - - )} */} - + + + {resultStats.numSeries} result{resultStats.numSeries !== 1 && "s"} +   –   + {responseTime}ms   –   + + + {resultStats.sortedLabelCards + .slice(0, maxLabels) + .map(([ln, cnt]) => ( + + + {resultStats.labelExamples[ln].map( + ({ value, count }) => ( + + + {escapeString(value)} + {" "} + ({count} + x) + + ) + )} + {cnt > 5 &&
  • ...
  • } +
    + + } + > + + + {ln} + + + : {cnt} + + +
    + ))} + {resultStats.sortedLabelCards.length > maxLabels ? ( + + ...{resultStats.sortedLabelCards.length - maxLabels} more... + + ) : null} +
    +
    )}
    ); From ef96e983d93e81165f6f6c54fa40392a30547895 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 13:10:26 +0000 Subject: [PATCH 0447/1397] Bump the go-opentelemetry-io group across 1 directory with 6 updates Bumps the go-opentelemetry-io group with 5 updates in the / directory: | Package | From | To | | --- | --- | --- | | [go.opentelemetry.io/collector/pdata](https://github.com/open-telemetry/opentelemetry-collector) | `1.12.0` | `1.14.1` | | [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector) | `0.105.0` | `0.108.1` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace](https://github.com/open-telemetry/opentelemetry-go) | `1.28.0` | `1.29.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://github.com/open-telemetry/opentelemetry-go) | `1.28.0` | `1.29.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://github.com/open-telemetry/opentelemetry-go) | `1.28.0` | `1.29.0` | Updates `go.opentelemetry.io/collector/pdata` from 1.12.0 to 1.14.1 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.12.0...pdata/v1.14.1) Updates `go.opentelemetry.io/collector/semconv` from 0.105.0 to 0.108.1 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.105.0...v0.108.1) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from 1.28.0 to 1.29.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...v1.29.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` from 1.28.0 to 1.29.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...v1.29.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` from 1.28.0 to 1.29.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...v1.29.0) Updates `go.opentelemetry.io/otel/sdk` from 1.28.0 to 1.29.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...v1.29.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/collector/pdata dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/semconv dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/sdk dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io ... Signed-off-by: dependabot[bot] --- go.mod | 14 +++++++------- go.sum | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index ebb4ae9a15..658422a541 100644 --- a/go.mod +++ b/go.mod @@ -62,14 +62,14 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v1.12.0 - go.opentelemetry.io/collector/semconv v0.105.0 + go.opentelemetry.io/collector/pdata v1.14.1 + go.opentelemetry.io/collector/semconv v0.108.1 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 go.opentelemetry.io/otel v1.29.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 - go.opentelemetry.io/otel/sdk v1.28.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 + go.opentelemetry.io/otel/sdk v1.29.0 go.opentelemetry.io/otel/trace v1.29.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 @@ -143,7 +143,7 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.3.3 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect diff --git a/go.sum b/go.sum index 8d18e20049..bc670f5c6c 100644 --- a/go.sum +++ b/go.sum @@ -350,8 +350,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE= github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= @@ -726,24 +726,24 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA= -go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= -go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g= -go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= +go.opentelemetry.io/collector/pdata v1.14.1 h1:wXZjtQA7Vy5HFqco+yA95ENyMQU5heBB1IxMHQf6mUk= +go.opentelemetry.io/collector/pdata v1.14.1/go.mod h1:z1dTjwwtcoXxZx2/nkHysjxMeaxe9pEmYTEr4SMNIx8= +go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= +go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 h1:JAv0Jwtl01UFiyWZEMiJZBiTlv5A50zNs8lsthXqIio= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0/go.mod h1:QNKLmUEAq2QUbPQUfvw4fmv0bgbK7UlOSFCnXyfvSNc= go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= From be5ceb2eaac0df32597ff3957fda1d1cd117070b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 13:10:26 +0000 Subject: [PATCH 0448/1397] Bump the k8s-io group across 1 directory with 3 updates Bumps the k8s-io group with 2 updates in the / directory: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go). Updates `k8s.io/api` from 0.29.3 to 0.31.0 - [Commits](https://github.com/kubernetes/api/compare/v0.29.3...v0.31.0) Updates `k8s.io/apimachinery` from 0.29.3 to 0.31.0 - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.29.3...v0.31.0) Updates `k8s.io/client-go` from 0.29.3 to 0.31.0 - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.29.3...v0.31.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-io - dependency-name: k8s.io/apimachinery dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-io - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-io ... Signed-off-by: dependabot[bot] --- go.mod | 14 ++++++++------ go.sum | 40 ++++++++++++++++++++++------------------ 2 files changed, 30 insertions(+), 24 deletions(-) diff --git a/go.mod b/go.mod index ebb4ae9a15..cd5940dbfa 100644 --- a/go.mod +++ b/go.mod @@ -87,9 +87,9 @@ require ( google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.29.3 - k8s.io/apimachinery v0.29.3 - k8s.io/client-go v0.29.3 + k8s.io/api v0.31.0 + k8s.io/apimachinery v0.31.0 + k8s.io/client-go v0.31.0 k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.130.1 ) @@ -115,9 +115,9 @@ require ( github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-logr/logr v1.4.2 // indirect @@ -182,6 +182,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect @@ -193,14 +194,15 @@ require ( golang.org/x/net v0.28.0 // indirect golang.org/x/term v0.23.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) replace ( diff --git a/go.sum b/go.sum index 8d18e20049..97a6a9f057 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,6 @@ github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155/go. github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -191,6 +189,8 @@ github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -236,8 +236,8 @@ github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= @@ -553,11 +553,11 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= @@ -705,6 +705,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -1133,6 +1135,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -1166,16 +1170,16 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= +k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= +k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= +k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= +k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= @@ -1184,6 +1188,6 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h6 sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= From e7518170b5a2cc65597382f86a7727f469917c50 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 13:10:28 +0000 Subject: [PATCH 0449/1397] Bump golang.org/x/tools from 0.23.0 to 0.24.0 Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.23.0 to 0.24.0. - [Release notes](https://github.com/golang/tools/releases) - [Commits](https://github.com/golang/tools/compare/v0.23.0...v0.24.0) --- updated-dependencies: - dependency-name: golang.org/x/tools dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index ebb4ae9a15..4e8b8d165a 100644 --- a/go.mod +++ b/go.mod @@ -80,7 +80,7 @@ require ( golang.org/x/sys v0.25.0 golang.org/x/text v0.17.0 golang.org/x/time v0.6.0 - golang.org/x/tools v0.23.0 + golang.org/x/tools v0.24.0 google.golang.org/api v0.196.0 google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed google.golang.org/grpc v1.66.0 @@ -189,7 +189,7 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/crypto v0.26.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect - golang.org/x/mod v0.19.0 // indirect + golang.org/x/mod v0.20.0 // indirect golang.org/x/net v0.28.0 // indirect golang.org/x/term v0.23.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect diff --git a/go.sum b/go.sum index 8d18e20049..3e043d4d2a 100644 --- a/go.sum +++ b/go.sum @@ -812,8 +812,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1029,8 +1029,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 2c016e3edf7ed164dabc365c81552e314b2e342f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 13:10:50 +0000 Subject: [PATCH 0450/1397] Bump github.com/scaleway/scaleway-sdk-go Bumps [github.com/scaleway/scaleway-sdk-go](https://github.com/scaleway/scaleway-sdk-go) from 1.0.0-beta.29 to 1.0.0-beta.30. - [Release notes](https://github.com/scaleway/scaleway-sdk-go/releases) - [Changelog](https://github.com/scaleway/scaleway-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/scaleway/scaleway-sdk-go/compare/v1.0.0-beta.29...v1.0.0-beta.30) --- updated-dependencies: - dependency-name: github.com/scaleway/scaleway-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ebb4ae9a15..4a39f24a52 100644 --- a/go.mod +++ b/go.mod @@ -58,7 +58,7 @@ require ( github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.11.0 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 github.com/vultr/govultr/v2 v2.17.2 diff --git a/go.sum b/go.sum index 8d18e20049..896f4426af 100644 --- a/go.sum +++ b/go.sum @@ -652,8 +652,8 @@ github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= From 7c1a423b97c7eb20ea4ccfd7ba3891090a6661b5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 13:10:57 +0000 Subject: [PATCH 0451/1397] Bump github.com/prometheus/client_golang from 1.20.2 to 1.20.3 Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.20.2 to 1.20.3. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/v1.20.3/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.20.2...v1.20.3) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ebb4ae9a15..3d3a6cf61d 100644 --- a/go.mod +++ b/go.mod @@ -52,7 +52,7 @@ require ( github.com/oklog/ulid v1.3.1 github.com/ovh/go-ovh v1.6.0 github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/client_golang v1.20.2 + github.com/prometheus/client_golang v1.20.3 github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.58.0 github.com/prometheus/common/assets v0.2.0 diff --git a/go.sum b/go.sum index 8d18e20049..4db0da48c5 100644 --- a/go.sum +++ b/go.sum @@ -610,8 +610,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= -github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= +github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= From 8645398260319d9f1ad12acb2b88734cc082d76f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 13:11:01 +0000 Subject: [PATCH 0452/1397] Bump golang.org/x/oauth2 from 0.22.0 to 0.23.0 Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.22.0 to 0.23.0. - [Commits](https://github.com/golang/oauth2/compare/v0.22.0...v0.23.0) --- updated-dependencies: - dependency-name: golang.org/x/oauth2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ebb4ae9a15..c5148cbaf0 100644 --- a/go.mod +++ b/go.mod @@ -75,7 +75,7 @@ require ( go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 - golang.org/x/oauth2 v0.22.0 + golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.8.0 golang.org/x/sys v0.25.0 golang.org/x/text v0.17.0 diff --git a/go.sum b/go.sum index 8d18e20049..2d9cead04f 100644 --- a/go.sum +++ b/go.sum @@ -867,8 +867,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 2d534290ee47665fe06eb01f264704020ddda3a6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 13:11:12 +0000 Subject: [PATCH 0453/1397] Bump github.com/envoyproxy/go-control-plane Bumps [github.com/envoyproxy/go-control-plane](https://github.com/envoyproxy/go-control-plane) from 0.12.1-0.20240621013728-1eb8caab5155 to 0.13.0. - [Release notes](https://github.com/envoyproxy/go-control-plane/releases) - [Changelog](https://github.com/envoyproxy/go-control-plane/blob/main/CHANGELOG.md) - [Commits](https://github.com/envoyproxy/go-control-plane/commits/v0.13.0) --- updated-dependencies: - dependency-name: github.com/envoyproxy/go-control-plane dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ebb4ae9a15..a967bbedf5 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/digitalocean/godo v1.121.0 github.com/docker/docker v27.1.1+incompatible github.com/edsrzf/mmap-go v1.1.0 - github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155 + github.com/envoyproxy/go-control-plane v0.13.0 github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/fsnotify/fsnotify v1.7.0 diff --git a/go.sum b/go.sum index 8d18e20049..74396a9949 100644 --- a/go.sum +++ b/go.sum @@ -168,8 +168,8 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155 h1:IgJPqnrlY2Mr4pYB6oaMKvFvwJ9H+X6CCY5x1vCTcpc= -github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155/go.mod h1:5Wkq+JduFtdAXihLmeTJf+tRYIT4KBc2vPXDhwVo1pA= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= From cc40b65ab4f68f47315b9512b8c2c4687465cee6 Mon Sep 17 00:00:00 2001 From: machine424 Date: Wed, 7 Aug 2024 16:53:47 +0200 Subject: [PATCH 0454/1397] fix(promtool): use the final database path for --sandbox-dir-root instead of the default value as it may be overridden add a regression test for that. Signed-off-by: machine424 --- cmd/promtool/main.go | 4 +-- cmd/promtool/main_test.go | 44 +++++++++++++++++++++++++++ cmd/promtool/tsdb_test.go | 57 ++++++++++++++++++++++++++--------- docs/command-line/promtool.md | 4 +-- 4 files changed, 90 insertions(+), 19 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index e713a177fd..d1b6c0fcd9 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -236,14 +236,14 @@ func main() { tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.") dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() - dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String() + dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String() dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.") dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() - dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String() + dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String() dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 78500fe937..2b086851f3 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -35,6 +35,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" + "github.com/prometheus/prometheus/promql/promqltest" ) var promtoolPath = os.Args[0] @@ -549,3 +550,46 @@ func TestCheckRulesWithRuleFiles(t *testing.T) { require.Equal(t, lintErrExitCode, exitCode, "") }) } + +func TestTSDBDumpCommand(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + storage := promqltest.LoadedStorage(t, ` + load 1m + metric{foo="bar"} 1 2 3 + `) + t.Cleanup(func() { storage.Close() }) + + for _, c := range []struct { + name string + subCmd string + sandboxDirRoot string + }{ + { + name: "dump", + subCmd: "dump", + }, + { + name: "dump with sandbox dir root", + subCmd: "dump", + sandboxDirRoot: t.TempDir(), + }, + { + name: "dump-openmetrics", + subCmd: "dump-openmetrics", + }, + { + name: "dump-openmetrics with sandbox dir root", + subCmd: "dump-openmetrics", + sandboxDirRoot: t.TempDir(), + }, + } { + t.Run(c.name, func(t *testing.T) { + args := []string{"-test.main", "tsdb", c.subCmd, storage.Dir()} + cmd := exec.Command(promtoolPath, args...) + require.NoError(t, cmd.Run()) + }) + } +} diff --git a/cmd/promtool/tsdb_test.go b/cmd/promtool/tsdb_test.go index ffc5467b47..90192e31ab 100644 --- a/cmd/promtool/tsdb_test.go +++ b/cmd/promtool/tsdb_test.go @@ -55,7 +55,7 @@ func TestGenerateBucket(t *testing.T) { } // getDumpedSamples dumps samples and returns them. -func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []string, formatter SeriesSetFormatter) string { +func getDumpedSamples(t *testing.T, databasePath, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) string { t.Helper() oldStdout := os.Stdout @@ -64,8 +64,8 @@ func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []strin err := dumpSamples( context.Background(), - path, - t.TempDir(), + databasePath, + sandboxDirRoot, mint, maxt, match, @@ -96,13 +96,15 @@ func TestTSDBDump(t *testing.T) { heavy_metric{foo="bar"} 5 4 3 2 1 heavy_metric{foo="foo"} 5 4 3 2 1 `) + t.Cleanup(func() { storage.Close() }) tests := []struct { - name string - mint int64 - maxt int64 - match []string - expectedDump string + name string + mint int64 + maxt int64 + sandboxDirRoot string + match []string + expectedDump string }{ { name: "default match", @@ -111,6 +113,14 @@ func TestTSDBDump(t *testing.T) { match: []string{"{__name__=~'(?s:.*)'}"}, expectedDump: "testdata/dump-test-1.prom", }, + { + name: "default match with sandbox dir root set", + mint: math.MinInt64, + maxt: math.MaxInt64, + sandboxDirRoot: t.TempDir(), + match: []string{"{__name__=~'(?s:.*)'}"}, + expectedDump: "testdata/dump-test-1.prom", + }, { name: "same matcher twice", mint: math.MinInt64, @@ -149,7 +159,7 @@ func TestTSDBDump(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.mint, tt.maxt, tt.match, formatSeriesSet) + dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.sandboxDirRoot, tt.mint, tt.maxt, tt.match, formatSeriesSet) expectedMetrics, err := os.ReadFile(tt.expectedDump) require.NoError(t, err) expectedMetrics = normalizeNewLine(expectedMetrics) @@ -171,12 +181,29 @@ func TestTSDBDumpOpenMetrics(t *testing.T) { my_counter{foo="bar", baz="abc"} 1 2 3 4 5 my_gauge{bar="foo", abc="baz"} 9 8 0 4 7 `) + t.Cleanup(func() { storage.Close() }) - expectedMetrics, err := os.ReadFile("testdata/dump-openmetrics-test.prom") - require.NoError(t, err) - expectedMetrics = normalizeNewLine(expectedMetrics) - dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) - require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics)) + tests := []struct { + name string + sandboxDirRoot string + }{ + { + name: "default match", + }, + { + name: "default match with sandbox dir root set", + sandboxDirRoot: t.TempDir(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + expectedMetrics, err := os.ReadFile("testdata/dump-openmetrics-test.prom") + require.NoError(t, err) + expectedMetrics = normalizeNewLine(expectedMetrics) + dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.sandboxDirRoot, math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) + require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics)) + }) + } } func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) { @@ -195,7 +222,7 @@ func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) { }) // Dump the blocks into OM format - dumpedMetrics := getDumpedSamples(t, dbDir, math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) + dumpedMetrics := getDumpedSamples(t, dbDir, "", math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) // Should get back the initial metrics. require.Equal(t, string(initialMetrics), dumpedMetrics) diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 6d74200e65..e48cede79c 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -575,7 +575,7 @@ Dump samples from a TSDB. | Flag | Description | Default | | --- | --- | --- | -| --sandbox-dir-root | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` | +| --sandbox-dir-root | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | | | --min-time | Minimum timestamp to dump. | `-9223372036854775808` | | --max-time | Maximum timestamp to dump. | `9223372036854775807` | | --match ... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | @@ -602,7 +602,7 @@ Dump samples from a TSDB. | Flag | Description | Default | | --- | --- | --- | -| --sandbox-dir-root | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` | +| --sandbox-dir-root | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | | | --min-time | Minimum timestamp to dump. | `-9223372036854775808` | | --max-time | Maximum timestamp to dump. | `9223372036854775807` | | --match ... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | From 05d89b7ffd192edf987b7598aa42000251350bbc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 13:48:32 +0000 Subject: [PATCH 0455/1397] Bump github.com/prometheus/common from 0.58.0 to 0.59.0 Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.58.0 to 0.59.0. - [Release notes](https://github.com/prometheus/common/releases) - [Changelog](https://github.com/prometheus/common/blob/main/RELEASE.md) - [Commits](https://github.com/prometheus/common/compare/v0.58.0...v0.59.0) --- updated-dependencies: - dependency-name: github.com/prometheus/common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3d3a6cf61d..dd366c0d5b 100644 --- a/go.mod +++ b/go.mod @@ -54,7 +54,7 @@ require ( github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/client_golang v1.20.3 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.58.0 + github.com/prometheus/common v0.59.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.11.0 diff --git a/go.sum b/go.sum index 4db0da48c5..acdf0536ff 100644 --- a/go.sum +++ b/go.sum @@ -627,8 +627,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.58.0 h1:N+N8vY4/23r6iYfD3UQZUoJPnUYAo7v6LG5XZxjZTXo= -github.com/prometheus/common v0.58.0/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= +github.com/prometheus/common v0.59.0 h1:o2eVCLbhf3sgisnD5e0/twUt25r8gNmMHNzTOAuY9bs= +github.com/prometheus/common v0.59.0/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= From c1c2e32137feed7b0d40598b88d5b7b00741b920 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Thu, 5 Sep 2024 16:19:12 +0200 Subject: [PATCH 0456/1397] More cleanups to tree view stats Signed-off-by: Julius Volz --- .../mantine-ui/src/pages/query/TreeNode.tsx | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx index 5be9efd849..f48bda3278 100644 --- a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx +++ b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx @@ -33,7 +33,8 @@ import { useId } from "@mantine/hooks"; import { functionSignatures } from "../../promql/functionSignatures"; const nodeIndent = 20; -const maxLabels = 10; +const maxLabelNames = 10; +const maxLabelValues = 10; type NodeState = "waiting" | "running" | "error" | "success"; @@ -209,7 +210,7 @@ const TreeNode: FC<{ // Sort label values by their number of occurrences within this label name. labelExamples[ln] = Object.entries(lvs) .sort(([, aCnt], [, bCnt]) => bCnt - aCnt) - .slice(0, 5) + .slice(0, maxLabelValues) .map(([lv, cnt]) => ({ value: lv, count: cnt })); }); @@ -285,11 +286,14 @@ const TreeNode: FC<{ {resultStats.numSeries} result{resultStats.numSeries !== 1 && "s"}   –   - {responseTime}ms   –   + {responseTime}ms + {resultStats.sortedLabelCards.length > 0 && ( + <>  –   + )} {resultStats.sortedLabelCards - .slice(0, maxLabels) + .slice(0, maxLabelNames) .map(([ln, cnt]) => ( ( - + {escapeString(value)} {" "} ({count} @@ -310,7 +314,7 @@ const TreeNode: FC<{ ) )} - {cnt > 5 &&
  • ...
  • } + {cnt > maxLabelValues &&
  • ...
  • } } @@ -330,14 +334,14 @@ const TreeNode: FC<{
    ))} - {resultStats.sortedLabelCards.length > maxLabels ? ( + {resultStats.sortedLabelCards.length > maxLabelNames ? ( - ...{resultStats.sortedLabelCards.length - maxLabels} more... + ...{resultStats.sortedLabelCards.length - maxLabelNames} more... ) : null}
    From 536d9f9ce989b670974e94b5db12f7bee0276e8e Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Thu, 5 Sep 2024 18:17:42 +0200 Subject: [PATCH 0457/1397] BUGFIX: TSDB: panic in query during truncation with OOO head (#14831) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check if headQuerier is nil before trying to use it. * TestQueryOOOHeadDuringTruncate: unit test to check query during truncate Regression test for #14822 * Simulate race between query and Compact() Signed-off-by: György Krajcsovits --- tsdb/head.go | 5 +++ tsdb/head_test.go | 87 +++++++++++++++++++++++++++++++++++++++++++ tsdb/ooo_head_read.go | 11 +++++- 3 files changed, 102 insertions(+), 1 deletion(-) diff --git a/tsdb/head.go b/tsdb/head.go index b7bfaa0fda..4ff7aab632 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -128,6 +128,7 @@ type Head struct { writeNotified wlog.WriteNotified memTruncationInProcess atomic.Bool + memTruncationCallBack func() // For testing purposes. } type ExemplarStorage interface { @@ -1129,6 +1130,10 @@ func (h *Head) truncateMemory(mint int64) (err error) { h.memTruncationInProcess.Store(true) defer h.memTruncationInProcess.Store(false) + if h.memTruncationCallBack != nil { + h.memTruncationCallBack() + } + // We wait for pending queries to end that overlap with this truncation. if initialized { h.WaitForPendingReadersInTimeRange(h.MinTime(), mint) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 18ec4f0aca..c338ddaddc 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -3492,6 +3492,93 @@ func TestWaitForPendingReadersInTimeRange(t *testing.T) { } } +func TestQueryOOOHeadDuringTruncate(t *testing.T) { + const maxT int64 = 6000 + + dir := t.TempDir() + opts := DefaultOptions() + opts.EnableNativeHistograms = true + opts.OutOfOrderTimeWindow = maxT + opts.MinBlockDuration = maxT / 2 // So that head will compact up to 3000. + + db, err := Open(dir, nil, nil, opts, nil) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + db.DisableCompactions() + + var ( + ref = storage.SeriesRef(0) + app = db.Appender(context.Background()) + ) + // Add in-order samples at every 100ms starting at 0ms. + for i := int64(0); i < maxT; i += 100 { + _, err := app.Append(ref, labels.FromStrings("a", "b"), i, 0) + require.NoError(t, err) + } + // Add out-of-order samples at every 100ms starting at 50ms. + for i := int64(50); i < maxT; i += 100 { + _, err := app.Append(ref, labels.FromStrings("a", "b"), i, 0) + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + + requireEqualOOOSamples(t, int(maxT/100-1), db) + + // Synchronization points. + allowQueryToStart := make(chan struct{}) + queryStarted := make(chan struct{}) + compactionFinished := make(chan struct{}) + + db.head.memTruncationCallBack = func() { + // Compaction has started, let the query start and wait for it to actually start to simulate race condition. + allowQueryToStart <- struct{}{} + <-queryStarted + } + + go func() { + db.Compact(context.Background()) // Compact and write blocks up to 3000 (maxtT/2). + compactionFinished <- struct{}{} + }() + + // Wait for the compaction to start. + <-allowQueryToStart + + q, err := db.Querier(1500, 2500) + require.NoError(t, err) + queryStarted <- struct{}{} // Unblock the compaction. + ctx := context.Background() + + // Label names. + res, annots, err := q.LabelNames(ctx, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) + require.NoError(t, err) + require.Empty(t, annots) + require.Equal(t, []string{"a"}, res) + + // Label values. + res, annots, err = q.LabelValues(ctx, "a", nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) + require.NoError(t, err) + require.Empty(t, annots) + require.Equal(t, []string{"b"}, res) + + // Samples + ss := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) + require.True(t, ss.Next()) + s := ss.At() + require.False(t, ss.Next()) // One series. + it := s.Iterator(nil) + require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. + require.Equal(t, int64(1500), it.AtT()) // It is an in-order sample. + require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. + require.Equal(t, int64(1550), it.AtT()) // it is an out-of-order sample. + require.NoError(t, it.Err()) + + require.NoError(t, q.Close()) // Cannot be deferred as the compaction waits for queries to close before finishing. + + <-compactionFinished // Wait for compaction otherwise Go test finds stray goroutines. +} + func TestAppendHistogram(t *testing.T) { l := labels.FromStrings("a", "b") for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} { diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 7b58ec566f..a3c959bc43 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -513,7 +513,7 @@ type HeadAndOOOQuerier struct { head *Head index IndexReader chunkr ChunkReader - querier storage.Querier + querier storage.Querier // Used for LabelNames, LabelValues, but may be nil if head was truncated in the mean time, in which case we ignore it and not close it in the end. } func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier { @@ -534,15 +534,24 @@ func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolatio } func (q *HeadAndOOOQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + if q.querier == nil { + return nil, nil, nil + } return q.querier.LabelValues(ctx, name, hints, matchers...) } func (q *HeadAndOOOQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + if q.querier == nil { + return nil, nil, nil + } return q.querier.LabelNames(ctx, hints, matchers...) } func (q *HeadAndOOOQuerier) Close() error { q.chunkr.Close() + if q.querier == nil { + return nil + } return q.querier.Close() } From c2593ab76be81ab66c130c87fbad08d20271e039 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 09:14:16 +0000 Subject: [PATCH 0458/1397] Bump github.com/prometheus/exporter-toolkit from 0.11.0 to 0.12.0 Bumps [github.com/prometheus/exporter-toolkit](https://github.com/prometheus/exporter-toolkit) from 0.11.0 to 0.12.0. - [Release notes](https://github.com/prometheus/exporter-toolkit/releases) - [Changelog](https://github.com/prometheus/exporter-toolkit/blob/master/CHANGELOG.md) - [Commits](https://github.com/prometheus/exporter-toolkit/compare/v0.11.0...v0.12.0) --- updated-dependencies: - dependency-name: github.com/prometheus/exporter-toolkit dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 +++- go.sum | 8 ++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 90f308e770..cf1b5fdd54 100644 --- a/go.mod +++ b/go.mod @@ -57,7 +57,7 @@ require ( github.com/prometheus/common v0.59.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 - github.com/prometheus/exporter-toolkit v0.11.0 + github.com/prometheus/exporter-toolkit v0.12.0 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 @@ -163,6 +163,8 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mdlayher/socket v0.4.1 // indirect + github.com/mdlayher/vsock v1.2.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect diff --git a/go.sum b/go.sum index 9d5722280e..a8a027f1ea 100644 --- a/go.sum +++ b/go.sum @@ -497,6 +497,10 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= +github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= @@ -633,8 +637,8 @@ github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/ github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= -github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= +github.com/prometheus/exporter-toolkit v0.12.0 h1:DkE5RcEZR3lQA2QD5JLVQIf41dFKNsVMXFhgqcif7fo= +github.com/prometheus/exporter-toolkit v0.12.0/go.mod h1:fQH0KtTn0yrrS0S82kqppRjDDiwMfIQUwT+RBRRhwUc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= From 9425c0a5632d5608ab4a265a7756c97d6d45b134 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 09:14:51 +0000 Subject: [PATCH 0459/1397] Bump github.com/digitalocean/godo from 1.121.0 to 1.122.0 Bumps [github.com/digitalocean/godo](https://github.com/digitalocean/godo) from 1.121.0 to 1.122.0. - [Release notes](https://github.com/digitalocean/godo/releases) - [Changelog](https://github.com/digitalocean/godo/blob/main/CHANGELOG.md) - [Commits](https://github.com/digitalocean/godo/compare/v1.121.0...v1.122.0) --- updated-dependencies: - dependency-name: github.com/digitalocean/godo dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 90f308e770..04b5cd8ed9 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.121.0 + github.com/digitalocean/godo v1.122.0 github.com/docker/docker v27.1.1+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.13.0 diff --git a/go.sum b/go.sum index 9d5722280e..ecb63344de 100644 --- a/go.sum +++ b/go.sum @@ -143,8 +143,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/digitalocean/godo v1.121.0 h1:ilXiHuEnhbJs2fmFEPX0r/QQ6KfiOIMAhJN3f8NiCfI= -github.com/digitalocean/godo v1.121.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= +github.com/digitalocean/godo v1.122.0 h1:ziytLQi8QKtDp2K1A+YrYl2dWLHLh2uaMzWvcz9HkKg= +github.com/digitalocean/godo v1.122.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= From 2036177e119f5e633f86143d664cf672cc98cafe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 09:15:01 +0000 Subject: [PATCH 0460/1397] Bump golang.org/x/text from 0.17.0 to 0.18.0 Bumps [golang.org/x/text](https://github.com/golang/text) from 0.17.0 to 0.18.0. - [Release notes](https://github.com/golang/text/releases) - [Commits](https://github.com/golang/text/compare/v0.17.0...v0.18.0) --- updated-dependencies: - dependency-name: golang.org/x/text dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 90f308e770..f6a4eb3f80 100644 --- a/go.mod +++ b/go.mod @@ -78,7 +78,7 @@ require ( golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.8.0 golang.org/x/sys v0.25.0 - golang.org/x/text v0.17.0 + golang.org/x/text v0.18.0 golang.org/x/time v0.6.0 golang.org/x/tools v0.24.0 google.golang.org/api v0.196.0 diff --git a/go.sum b/go.sum index 9d5722280e..efcc2b15f2 100644 --- a/go.sum +++ b/go.sum @@ -970,8 +970,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 48746b1a6c10ce18caf3d00cc876436707784e89 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 10:04:00 +0000 Subject: [PATCH 0461/1397] Bump github.com/prometheus/common from 0.58.0 to 0.59.1 Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.58.0 to 0.59.1. - [Release notes](https://github.com/prometheus/common/releases) - [Changelog](https://github.com/prometheus/common/blob/main/RELEASE.md) - [Commits](https://github.com/prometheus/common/compare/v0.58.0...v0.59.1) --- updated-dependencies: - dependency-name: github.com/prometheus/common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index cf1b5fdd54..0dff6df830 100644 --- a/go.mod +++ b/go.mod @@ -54,7 +54,7 @@ require ( github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/client_golang v1.20.3 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.59.0 + github.com/prometheus/common v0.59.1 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.12.0 diff --git a/go.sum b/go.sum index a8a027f1ea..f8c0cb6308 100644 --- a/go.sum +++ b/go.sum @@ -631,8 +631,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.59.0 h1:o2eVCLbhf3sgisnD5e0/twUt25r8gNmMHNzTOAuY9bs= -github.com/prometheus/common v0.59.0/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= +github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= +github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= From ec73d8dd29c18f6fe2972e6e168c39e527430a1d Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Fri, 9 Aug 2024 11:12:10 +0200 Subject: [PATCH 0462/1397] promqltest: run each eval command in a subtest Simpler approach to #14576 Signed-off-by: Julien Pivotto Signed-off-by: Julien --- promql/promqltest/test.go | 43 ++++++++++++++++++++++++++-------- promql/promqltest/test_test.go | 2 +- 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index ff709e4426..0c48a755f8 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -65,7 +65,7 @@ var testStartTime = time.Unix(0, 0).UTC() // LoadedStorage returns storage with generated data using the provided load statements. // Non-load statements will cause test errors. func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage { - test, err := newTest(t, input) + test, err := newTest(t, input, false) require.NoError(t, err) for _, cmd := range test.cmds { @@ -124,11 +124,17 @@ func RunBuiltinTests(t TBRun, engine promql.QueryEngine) { // RunTest parses and runs the test against the provided engine. func RunTest(t testutil.T, input string, engine promql.QueryEngine) { - require.NoError(t, runTest(t, input, engine)) + require.NoError(t, runTest(t, input, engine, false)) } -func runTest(t testutil.T, input string, engine promql.QueryEngine) error { - test, err := newTest(t, input) +// testTest allows tests to be run in "test-the-test" mode (true for +// testingMode). This is a special mode for testing test code execution itself. +func testTest(t testutil.T, input string, engine promql.QueryEngine) error { + return runTest(t, input, engine, true) +} + +func runTest(t testutil.T, input string, engine promql.QueryEngine, testingMode bool) error { + test, err := newTest(t, input, testingMode) // Why do this before checking err? newTest() can create the test storage and then return an error, // and we want to make sure to clean that up to avoid leaking goroutines. @@ -163,6 +169,8 @@ func runTest(t testutil.T, input string, engine promql.QueryEngine) error { // against a test storage. type test struct { testutil.T + // testingMode distinguishes between normal execution and test-execution mode. + testingMode bool cmds []testCommand @@ -173,10 +181,11 @@ type test struct { } // newTest returns an initialized empty Test. -func newTest(t testutil.T, input string) (*test, error) { +func newTest(t testutil.T, input string, testingMode bool) (*test, error) { test := &test{ - T: t, - cmds: []testCommand{}, + T: t, + cmds: []testCommand{}, + testingMode: testingMode, } err := test.parse(input) test.clear() @@ -1129,11 +1138,25 @@ func (t *test) exec(tc testCommand, engine promql.QueryEngine) error { } func (t *test) execEval(cmd *evalCmd, engine promql.QueryEngine) error { - if cmd.isRange { - return t.execRangeEval(cmd, engine) + do := func() error { + if cmd.isRange { + return t.execRangeEval(cmd, engine) + } + + return t.execInstantEval(cmd, engine) } - return t.execInstantEval(cmd, engine) + if t.testingMode { + return do() + } + + if tt, ok := t.T.(*testing.T); ok { + tt.Run(fmt.Sprintf("line %d/%s", cmd.line, cmd.expr), func(t *testing.T) { + require.NoError(t, do()) + }) + return nil + } + return errors.New("t.T is not testing.T") } func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error { diff --git a/promql/promqltest/test_test.go b/promql/promqltest/test_test.go index 5aff71fb14..ef49a7d1f6 100644 --- a/promql/promqltest/test_test.go +++ b/promql/promqltest/test_test.go @@ -595,7 +595,7 @@ eval range from 0 to 5m step 5m testmetric for name, testCase := range testCases { t.Run(name, func(t *testing.T) { - err := runTest(t, testCase.input, NewTestEngine(t, false, 0, DefaultMaxSamplesPerQuery)) + err := testTest(t, testCase.input, NewTestEngine(t, false, 0, DefaultMaxSamplesPerQuery)) if testCase.expectedError == "" { require.NoError(t, err) From 0dfe3264351601ee1a8f5c65b4af6bb8aadd38df Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 11:19:51 +0000 Subject: [PATCH 0463/1397] Bump github.com/docker/docker Bumps [github.com/docker/docker](https://github.com/docker/docker) from 27.1.1+incompatible to 27.2.0+incompatible. - [Release notes](https://github.com/docker/docker/releases) - [Commits](https://github.com/docker/docker/compare/v27.1.1...v27.2.0) --- updated-dependencies: - dependency-name: github.com/docker/docker dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 210a484401..ebf873ea35 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 github.com/digitalocean/godo v1.122.0 - github.com/docker/docker v27.1.1+incompatible + github.com/docker/docker v27.2.0+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.13.0 github.com/envoyproxy/protoc-gen-validate v1.1.0 diff --git a/go.sum b/go.sum index 2559014b7d..024088a4d4 100644 --- a/go.sum +++ b/go.sum @@ -149,8 +149,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= -github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4= +github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= From 92004f18d0e976399d14298f6c0b52856ea2f622 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 11:20:18 +0000 Subject: [PATCH 0464/1397] Bump github.com/miekg/dns from 1.1.61 to 1.1.62 Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.61 to 1.1.62. - [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release) - [Commits](https://github.com/miekg/dns/compare/v1.1.61...v1.1.62) --- updated-dependencies: - dependency-name: github.com/miekg/dns dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 210a484401..b44f123131 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( github.com/klauspost/compress v1.17.9 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/linode/linodego v1.40.0 - github.com/miekg/dns v1.1.61 + github.com/miekg/dns v1.1.62 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 diff --git a/go.sum b/go.sum index 2559014b7d..53a67b2c18 100644 --- a/go.sum +++ b/go.sum @@ -504,8 +504,8 @@ github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnE github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= -github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= From 88bb05c3e8d84de147f219f6e4c891d8ba87edc2 Mon Sep 17 00:00:00 2001 From: Owen Williams Date: Wed, 21 Aug 2024 10:38:27 -0400 Subject: [PATCH 0465/1397] utf8: enable utf-8 support by default This change causes Prometheus to allow all UTF-8 characters in metric and label names. This means that names that were previously invalid and would have been previously rejected will be allowed through. Signed-off-by: Owen Williams --- cmd/prometheus/main.go | 5 ++-- cmd/prometheus/main_test.go | 5 ++++ cmd/promtool/main.go | 5 ++++ cmd/promtool/main_test.go | 6 +++++ config/config.go | 6 ++--- config/config_test.go | 23 ++++++++++++++++--- config/testdata/jobname_dup.bad.yml | 2 ++ config/testdata/lowercase.bad.yml | 2 ++ .../scrape_config_global_validation_mode.yml | 2 +- ...pe_config_local_global_validation_mode.yml | 4 ++-- .../scrape_config_local_validation_mode.yml | 2 +- docs/configuration/configuration.md | 14 +++++------ docs/feature_flags.md | 7 ------ scrape/manager_test.go | 5 ++++ scrape/scrape.go | 12 +++++----- scrape/scrape_test.go | 2 ++ 16 files changed, 69 insertions(+), 33 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index c0bd136fa8..288d34f0a4 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -103,6 +103,8 @@ var ( ) func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation prometheus.MustRegister(versioncollector.NewCollector(strings.ReplaceAll(appName, "-", "_"))) var err error @@ -237,9 +239,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "promql-delayed-name-removal": c.promqlEnableDelayedNameRemoval = true level.Info(logger).Log("msg", "Experimental PromQL delayed name removal enabled.") - case "utf8-names": - model.NameValidationScheme = model.UTF8Validation - level.Info(logger).Log("msg", "Experimental UTF-8 support enabled") case "": continue default: diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index bc16f26d73..c16864cb8c 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -42,6 +42,11 @@ import ( "github.com/prometheus/prometheus/rules" ) +func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation +} + const startupTime = 10 * time.Second var ( diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index e4fcda3680..b723890c5b 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -62,6 +62,11 @@ import ( "github.com/prometheus/prometheus/util/documentcli" ) +func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation +} + const ( successExitCode = 0 failureExitCode = 1 diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 78500fe937..f74b326511 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -31,12 +31,18 @@ import ( "testing" "time" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" ) +func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation +} + var promtoolPath = os.Args[0] func TestMain(m *testing.M) { diff --git a/config/config.go b/config/config.go index c9e8efbf3e..a80ab331c3 100644 --- a/config/config.go +++ b/config/config.go @@ -774,10 +774,10 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { } switch globalConfig.MetricNameValidationScheme { - case "", LegacyValidationConfig: - case UTF8ValidationConfig: + case LegacyValidationConfig: + case "", UTF8ValidationConfig: if model.NameValidationScheme != model.UTF8Validation { - return fmt.Errorf("utf8 name validation requested but feature not enabled via --enable-feature=utf8-names") + panic("utf8 name validation requested but model.NameValidationScheme is not set to UTF8") } default: return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme) diff --git a/config/config_test.go b/config/config_test.go index 2219061823..09fe4187b5 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -62,6 +62,11 @@ import ( "github.com/prometheus/prometheus/util/testutil" ) +func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation +} + func mustParseURL(u string) *config.URL { parsed, err := url.Parse(u) if err != nil { @@ -2042,6 +2047,10 @@ var expectedErrors = []struct { } func TestBadConfigs(t *testing.T) { + model.NameValidationScheme = model.LegacyValidation + defer func() { + model.NameValidationScheme = model.UTF8Validation + }() for _, ee := range expectedErrors { _, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger()) require.Error(t, err, "%s", ee.filename) @@ -2051,6 +2060,10 @@ func TestBadConfigs(t *testing.T) { } func TestBadStaticConfigsJSON(t *testing.T) { + model.NameValidationScheme = model.LegacyValidation + defer func() { + model.NameValidationScheme = model.UTF8Validation + }() content, err := os.ReadFile("testdata/static_config.bad.json") require.NoError(t, err) var tg targetgroup.Group @@ -2059,6 +2072,10 @@ func TestBadStaticConfigsJSON(t *testing.T) { } func TestBadStaticConfigsYML(t *testing.T) { + model.NameValidationScheme = model.LegacyValidation + defer func() { + model.NameValidationScheme = model.UTF8Validation + }() content, err := os.ReadFile("testdata/static_config.bad.yml") require.NoError(t, err) var tg targetgroup.Group @@ -2323,17 +2340,17 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) { { name: "global setting implies local settings", inputFile: "scrape_config_global_validation_mode", - expectScheme: "utf8", + expectScheme: "legacy", }, { name: "local setting", inputFile: "scrape_config_local_validation_mode", - expectScheme: "utf8", + expectScheme: "legacy", }, { name: "local setting overrides global setting", inputFile: "scrape_config_local_global_validation_mode", - expectScheme: "legacy", + expectScheme: "utf8", }, } diff --git a/config/testdata/jobname_dup.bad.yml b/config/testdata/jobname_dup.bad.yml index 0265493c30..d03cb0cf97 100644 --- a/config/testdata/jobname_dup.bad.yml +++ b/config/testdata/jobname_dup.bad.yml @@ -1,4 +1,6 @@ # Two scrape configs with the same job names are not allowed. +global: + metric_name_validation_scheme: legacy scrape_configs: - job_name: prometheus - job_name: service-x diff --git a/config/testdata/lowercase.bad.yml b/config/testdata/lowercase.bad.yml index 9bc9583341..6dd72e6476 100644 --- a/config/testdata/lowercase.bad.yml +++ b/config/testdata/lowercase.bad.yml @@ -1,3 +1,5 @@ +global: + metric_name_validation_scheme: legacy scrape_configs: - job_name: prometheus relabel_configs: diff --git a/config/testdata/scrape_config_global_validation_mode.yml b/config/testdata/scrape_config_global_validation_mode.yml index 1548554397..e9b0618c70 100644 --- a/config/testdata/scrape_config_global_validation_mode.yml +++ b/config/testdata/scrape_config_global_validation_mode.yml @@ -1,4 +1,4 @@ global: - metric_name_validation_scheme: utf8 + metric_name_validation_scheme: legacy scrape_configs: - job_name: prometheus diff --git a/config/testdata/scrape_config_local_global_validation_mode.yml b/config/testdata/scrape_config_local_global_validation_mode.yml index d13605e21d..30b54834a5 100644 --- a/config/testdata/scrape_config_local_global_validation_mode.yml +++ b/config/testdata/scrape_config_local_global_validation_mode.yml @@ -1,5 +1,5 @@ global: - metric_name_validation_scheme: utf8 + metric_name_validation_scheme: legacy scrape_configs: - job_name: prometheus - metric_name_validation_scheme: legacy + metric_name_validation_scheme: utf8 diff --git a/config/testdata/scrape_config_local_validation_mode.yml b/config/testdata/scrape_config_local_validation_mode.yml index fad4235806..90279ff081 100644 --- a/config/testdata/scrape_config_local_validation_mode.yml +++ b/config/testdata/scrape_config_local_validation_mode.yml @@ -1,3 +1,3 @@ scrape_configs: - job_name: prometheus - metric_name_validation_scheme: utf8 + metric_name_validation_scheme: legacy diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 61d233b19b..64a11f1c34 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -122,9 +122,9 @@ global: [ keep_dropped_targets: | default = 0 ] # Specifies the validation scheme for metric and label names. Either blank or - # "legacy" for letters, numbers, colons, and underscores; or "utf8" for full - # UTF-8 support. - [ metric_name_validation_scheme | default "legacy" ] + # "utf8" for for full UTF-8 support, or "legacy" for letters, numbers, colons, + # and underscores. + [ metric_name_validation_scheme | default "utf8" ] runtime: # Configure the Go garbage collector GOGC parameter @@ -477,10 +477,10 @@ metric_relabel_configs: # that will be kept in memory. 0 means no limit. [ keep_dropped_targets: | default = 0 ] -# Specifies the validation scheme for metric and label names. Either blank or -# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full -# UTF-8 support. -[ metric_name_validation_scheme | default "legacy" ] +# Specifies the validation scheme for metric and label names. Either blank or +# "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons, and +# underscores. +[ metric_name_validation_scheme | default "utf8" ] # Limit on total number of positive and negative buckets allowed in a single # native histogram. The resolution of a histogram with more buckets will be diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 1a8908548c..d351f26786 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -250,10 +250,3 @@ When enabled, Prometheus will change the way in which the `__name__` label is re This allows optionally preserving the `__name__` label via the `label_replace` and `label_join` functions, and helps prevent the "vector cannot contain metrics with the same labelset" error, which can happen when applying a regex-matcher to the `__name__` label. -## UTF-8 Name Support - -`--enable-feature=utf8-names` - -When enabled, changes the metric and label name validation scheme inside Prometheus to allow the full UTF-8 character set. -By itself, this flag does not enable the request of UTF-8 names via content negotiation. -Users will also have to set `metric_name_validation_scheme` in scrape configs to enable the feature either on the global config or on a per-scrape config basis. diff --git a/scrape/manager_test.go b/scrape/manager_test.go index c71691c95d..0b24a86ee6 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -45,6 +45,11 @@ import ( "github.com/prometheus/prometheus/util/testutil" ) +func init() { + // This can be removed when the default validation scheme in common is updated. + model.NameValidationScheme = model.UTF8Validation +} + func TestPopulateLabels(t *testing.T) { cases := []struct { in labels.Labels diff --git a/scrape/scrape.go b/scrape/scrape.go index 2abd4691d6..d407e94bfe 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -305,9 +305,9 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { mrc = sp.config.MetricRelabelConfigs ) - validationScheme := model.LegacyValidation - if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig { - validationScheme = model.UTF8Validation + validationScheme := model.UTF8Validation + if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig { + validationScheme = model.LegacyValidation } sp.targetMtx.Lock() @@ -460,9 +460,9 @@ func (sp *scrapePool) sync(targets []*Target) { scrapeClassicHistograms = sp.config.ScrapeClassicHistograms ) - validationScheme := model.LegacyValidation - if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig { - validationScheme = model.UTF8Validation + validationScheme := model.UTF8Validation + if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig { + validationScheme = model.LegacyValidation } sp.targetMtx.Lock() diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index b703f21d46..76a418bda5 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1040,6 +1040,7 @@ func TestScrapeLoopSeriesAdded(t *testing.T) { } func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) { + model.NameValidationScheme = model.LegacyValidation s := teststorage.New(t) defer s.Close() ctx, cancel := context.WithCancel(context.Background()) @@ -3768,6 +3769,7 @@ func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expec // Create a scrape loop with the HTTP server as the target. configStr := fmt.Sprintf(` global: + metric_name_validation_scheme: legacy scrape_interval: 1s scrape_timeout: 1s scrape_configs: From edc83ed164011ddd0c0c62fb2933dc057637441d Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Fri, 6 Sep 2024 14:50:10 +0200 Subject: [PATCH 0466/1397] Update storage.md with the latest around Remote Write 2.0 (#14848) * Update storage.md with the latest around Remote Write 2.0 Signed-off-by: Bartlomiej Plotka * Update storage.md Signed-off-by: Bartlomiej Plotka --------- Signed-off-by: Bartlomiej Plotka --- docs/storage.md | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/docs/storage.md b/docs/storage.md index 55d4309d37..8f5f42b8c6 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -157,31 +157,27 @@ a set of interfaces that allow integrating with remote storage systems. ### Overview -Prometheus integrates with remote storage systems in three ways: +Prometheus integrates with remote storage systems in four ways: -- Prometheus can write samples that it ingests to a remote URL in a standardized format. -- Prometheus can receive samples from other Prometheus servers in a standardized format. -- Prometheus can read (back) sample data from a remote URL in a standardized format. +- Prometheus can write samples that it ingests to a remote URL in a [Remote Write format](https://prometheus.io/docs/specs/remote_write_spec_2_0/). +- Prometheus can receive samples from other clients in a [Remote Write format](https://prometheus.io/docs/specs/remote_write_spec_2_0/). +- Prometheus can read (back) sample data from a remote URL in a [Remote Read format](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto#L31). +- Prometheus can return sample data requested by clients in a [Remote Read format](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto#L31). ![Remote read and write architecture](images/remote_integrations.png) -The read and write protocols both use a snappy-compressed protocol buffer encoding over -HTTP. The protocols are not considered as stable APIs yet and may change to use gRPC -over HTTP/2 in the future, when all hops between Prometheus and the remote storage can -safely be assumed to support HTTP/2. +The remote read and write protocols both use a snappy-compressed protocol buffer encoding over +HTTP. The read protocol is not yet considered as stable API. -For details on configuring remote storage integrations in Prometheus, see the +The write protocol has a [stable specification for 1.0 version](https://prometheus.io/docs/specs/remote_write_spec/) +and [experimental specification for 2.0 version](https://prometheus.io/docs/specs/remote_write_spec_2_0/), +both supported by Prometheus server. + +For details on configuring remote storage integrations in Prometheus as a client, see the [remote write](configuration/configuration.md#remote_write) and [remote read](configuration/configuration.md#remote_read) sections of the Prometheus configuration documentation. -The built-in remote write receiver can be enabled by setting the -`--web.enable-remote-write-receiver` command line flag. When enabled, -the remote write receiver endpoint is `/api/v1/write`. - -For details on the request and response messages, see the -[remote storage protocol buffer definitions](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto). - Note that on the read path, Prometheus only fetches raw series data for a set of label selectors and time ranges from the remote end. All PromQL evaluation on the raw data still happens in Prometheus itself. This means that remote read queries @@ -189,6 +185,11 @@ have some scalability limit, since all necessary data needs to be loaded into th querying Prometheus server first and then processed there. However, supporting fully distributed evaluation of PromQL was deemed infeasible for the time being. +Prometheus also serves both protocols. The built-in remote write receiver can be enabled +by setting the `--web.enable-remote-write-receiver` command line flag. When enabled, +the remote write receiver endpoint is `/api/v1/write`. The remote read endpoint is +available on [`/api/v1/read`](https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/). + ### Existing integrations To learn more about existing integrations with remote storage systems, see the From 3dc4a9cfa70aa18933d6e714355f8a82e7421da6 Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 6 Sep 2024 15:42:08 +0200 Subject: [PATCH 0467/1397] Bump klog-gokit Signed-off-by: Julien --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index cd5940dbfa..d8efbcb744 100644 --- a/go.mod +++ b/go.mod @@ -207,7 +207,7 @@ require ( replace ( k8s.io/klog => github.com/simonpasquier/klog-gokit v0.3.0 - k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.3.0 + k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.5.0 ) // Exclude linodego v1.0.0 as it is no longer published on github. diff --git a/go.sum b/go.sum index 97a6a9f057..24e8183976 100644 --- a/go.sum +++ b/go.sum @@ -663,8 +663,8 @@ github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1 github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/simonpasquier/klog-gokit v0.3.0 h1:TkFK21cbwDRS+CiystjqbAiq5ubJcVTk9hLUck5Ntcs= github.com/simonpasquier/klog-gokit v0.3.0/go.mod h1:+SUlDQNrhVtGt2FieaqNftzzk8P72zpWlACateWxA9k= -github.com/simonpasquier/klog-gokit/v3 v3.3.0 h1:HMzH999kO5gEgJTaWWO+xjncW5oycspcsBnjn9b853Q= -github.com/simonpasquier/klog-gokit/v3 v3.3.0/go.mod h1:uSbnWC3T7kt1dQyY9sjv0Ao1SehMAJdVnUNSKhjaDsg= +github.com/simonpasquier/klog-gokit/v3 v3.5.0 h1:ewnk+ickph0hkQFgdI4pffKIbruAxxWcg0Fe/vQmLOM= +github.com/simonpasquier/klog-gokit/v3 v3.5.0/go.mod h1:S9flvRzzpaYLYtXI2w8jf9R/IU/Cy14NrbvDUevNP1E= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= From df750ef17018dd3f197f9726920ba67328055335 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 15:58:34 +0000 Subject: [PATCH 0468/1397] build(deps): bump github.com/go-zookeeper/zk from 1.0.3 to 1.0.4 Bumps [github.com/go-zookeeper/zk](https://github.com/go-zookeeper/zk) from 1.0.3 to 1.0.4. - [Release notes](https://github.com/go-zookeeper/zk/releases) - [Commits](https://github.com/go-zookeeper/zk/compare/v1.0.3...v1.0.4) --- updated-dependencies: - dependency-name: github.com/go-zookeeper/zk dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 845e3277b8..667a6fa56e 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 github.com/go-openapi/strfmt v0.23.0 - github.com/go-zookeeper/zk v1.0.3 + github.com/go-zookeeper/zk v1.0.4 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.6.0 diff --git a/go.sum b/go.sum index edb5b650bd..e03a64918c 100644 --- a/go.sum +++ b/go.sum @@ -238,8 +238,8 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= -github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= +github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= From 07856b4bb9abc421b91937ee8211791c8ae2cf29 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 6 Sep 2024 20:35:55 +0200 Subject: [PATCH 0469/1397] Make linter run by upgrading the ESLint config Signed-off-by: Julius Volz --- web/ui/mantine-ui/.eslintrc.cjs | 21 ---- web/ui/mantine-ui/eslint.config.mjs | 71 +++++++++++ web/ui/mantine-ui/package.json | 7 +- web/ui/mantine-ui/src/App.tsx | 2 +- .../src/pages/query/HistogramHelpers.ts | 2 +- .../mantine-ui/src/pages/query/TreeNode.tsx | 6 +- web/ui/mantine-ui/src/promql/ast.ts | 114 +++++++++--------- .../{cmd => tools}/gen_functions_docs/main.go | 0 .../{cmd => tools}/gen_functions_list/main.go | 0 web/ui/mantine-ui/src/promql/tools/go.mod | 26 ++++ web/ui/mantine-ui/src/promql/tools/go.sum | 107 ++++++++++++++++ web/ui/mantine-ui/src/promql/utils.ts | 113 ++++++++++------- web/ui/package-lock.json | 27 +++++ 13 files changed, 371 insertions(+), 125 deletions(-) delete mode 100644 web/ui/mantine-ui/.eslintrc.cjs create mode 100644 web/ui/mantine-ui/eslint.config.mjs rename web/ui/mantine-ui/src/promql/{cmd => tools}/gen_functions_docs/main.go (100%) rename web/ui/mantine-ui/src/promql/{cmd => tools}/gen_functions_list/main.go (100%) create mode 100644 web/ui/mantine-ui/src/promql/tools/go.mod create mode 100644 web/ui/mantine-ui/src/promql/tools/go.sum diff --git a/web/ui/mantine-ui/.eslintrc.cjs b/web/ui/mantine-ui/.eslintrc.cjs deleted file mode 100644 index cf68b3c90e..0000000000 --- a/web/ui/mantine-ui/.eslintrc.cjs +++ /dev/null @@ -1,21 +0,0 @@ -module.exports = { - root: true, - env: { browser: true, es2020: true }, - extends: [ - 'eslint:recommended', - 'plugin:@typescript-eslint/recommended', - 'plugin:react-hooks/recommended', - ], - ignorePatterns: ['dist', '.eslintrc.cjs'], - parser: '@typescript-eslint/parser', - plugins: ['react-refresh'], - rules: { - 'react-refresh/only-export-components': [ - 'warn', - { allowConstantExport: true }, - ], - 'no-unused-vars': ['error', { 'argsIgnorePattern': '^_' }], - '@typescript-eslint/no-unused-vars': ['error', { 'argsIgnorePattern': '^_' }], - 'prefer-const': ['error', { destructuring: 'all' }], - }, -} diff --git a/web/ui/mantine-ui/eslint.config.mjs b/web/ui/mantine-ui/eslint.config.mjs new file mode 100644 index 0000000000..c3cc58920e --- /dev/null +++ b/web/ui/mantine-ui/eslint.config.mjs @@ -0,0 +1,71 @@ +import { fixupConfigRules } from '@eslint/compat'; +import reactRefresh from 'eslint-plugin-react-refresh'; +import globals from 'globals'; +import tsParser from '@typescript-eslint/parser'; +import path from 'node:path'; +import { fileURLToPath } from 'node:url'; +import js from '@eslint/js'; +import { FlatCompat } from '@eslint/eslintrc'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); +const compat = new FlatCompat({ + baseDirectory: __dirname, + recommendedConfig: js.configs.recommended, + allConfig: js.configs.all +}); + +export default [{ + ignores: ['**/dist', '**/.eslintrc.cjs'], +}, ...fixupConfigRules(compat.extends( + 'eslint:recommended', + 'plugin:@typescript-eslint/recommended', + 'plugin:react-hooks/recommended', +)), { + plugins: { + 'react-refresh': reactRefresh, + }, + + languageOptions: { + globals: { + ...globals.browser, + }, + + parser: tsParser, + }, + + rules: { + 'react-refresh/only-export-components': ['warn', { + allowConstantExport: true, + }], + + // Disable the base rule as it can report incorrect errors + 'no-unused-vars': 'off', + + // Use the TypeScript-specific rule for unused vars + '@typescript-eslint/no-unused-vars': ['warn', { + argsIgnorePattern: '^_', + varsIgnorePattern: '^_', + caughtErrorsIgnorePattern: '^_', + }], + + 'prefer-const': ['error', { + destructuring: 'all', + }], + }, + }, + // Override for Node.js-based config files + { + files: ['postcss.config.cjs'], // Specify any other config files + languageOptions: { + ecmaVersion: 2021, // Optional, set ECMAScript version + sourceType: 'script', // For CommonJS (non-ESM) modules + globals: { + module: 'readonly', + require: 'readonly', + process: 'readonly', + __dirname: 'readonly', // Include other Node.js globals if needed + }, + }, + }, +]; diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index bb7e5534ac..7dfdf80b4c 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -6,7 +6,8 @@ "scripts": { "start": "vite", "build": "tsc && vite build", - "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", + "lint": "eslint . --report-unused-disable-directives --max-warnings 0", + "lint:fix": "eslint . --report-unused-disable-directives --max-warnings 0 --fix", "preview": "vite preview", "test": "vitest" }, @@ -49,6 +50,9 @@ "use-query-params": "^2.2.1" }, "devDependencies": { + "@eslint/compat": "^1.1.1", + "@eslint/eslintrc": "^3.1.0", + "@eslint/js": "^9.9.1", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", "@typescript-eslint/eslint-plugin": "^6.21.0", @@ -57,6 +61,7 @@ "eslint": "^9.9.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.11", + "globals": "^15.9.0", "jsdom": "^25.0.0", "postcss": "^8.4.35", "postcss-preset-mantine": "^1.17.0", diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index 57701914fd..1b15f01800 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -196,7 +196,7 @@ function App() { useEffect(() => { dispatch(updateSettings({ pathPrefix })); - }, [pathPrefix]); + }, [pathPrefix, dispatch]); const { agentMode, consolesLink } = useSettings(); diff --git a/web/ui/mantine-ui/src/pages/query/HistogramHelpers.ts b/web/ui/mantine-ui/src/pages/query/HistogramHelpers.ts index 0bb76ba429..12dd3e1bd2 100644 --- a/web/ui/mantine-ui/src/pages/query/HistogramHelpers.ts +++ b/web/ui/mantine-ui/src/pages/query/HistogramHelpers.ts @@ -137,7 +137,7 @@ export const bucketRangeString = ([ boundaryRule, leftBoundary, rightBoundary, - // eslint-disable-next-line @typescript-eslint/no-unused-vars + _, ]: [number, string, string, string]): string => { return `${leftDelim(boundaryRule)}${leftBoundary} -> ${rightBoundary}${rightDelim(boundaryRule)}`; diff --git a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx index f48bda3278..b6a78e9a69 100644 --- a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx +++ b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx @@ -131,13 +131,13 @@ const TreeNode: FC<{ if (error) { reportNodeState && reportNodeState("error"); } - }, [error]); + }, [error, reportNodeState]); useEffect(() => { if (isFetching) { reportNodeState && reportNodeState("running"); } - }, [isFetching]); + }, [isFetching, reportNodeState]); // Update the size and position of tree connector lines based on the node's and its parent's position. useLayoutEffect(() => { @@ -221,7 +221,7 @@ const TreeNode: FC<{ ), labelExamples, }); - }, [data]); + }, [data, reportNodeState]); const innerNode = ( { let cur: ASTNode; - for (cur = n; cur.type === 'parenExpr'; cur = cur.expr) {} + for (cur = n; cur.type === "parenExpr"; cur = cur.expr) { + // Continue traversing until a non-parenthesis expression is found + } return cur.type; }; @@ -34,7 +43,10 @@ const binOpPrecedence = { [binaryOperatorType.atan2]: 2, }; -export const maybeParenthesizeBinopChild = (op: binaryOperatorType, child: ASTNode): ASTNode => { +export const maybeParenthesizeBinopChild = ( + op: binaryOperatorType, + child: ASTNode +): ASTNode => { if (child.type !== nodeType.binaryExpr) { return child; } @@ -73,7 +85,7 @@ export const getNodeChildren = (node: ASTNode): ASTNode[] => { case nodeType.binaryExpr: return [node.lhs, node.rhs]; default: - throw new Error('unsupported node type'); + throw new Error("unsupported node type"); } }; @@ -92,18 +104,19 @@ export const getNodeChild = (node: ASTNode, idx: number) => { case nodeType.binaryExpr: return idx === 0 ? node.lhs : node.rhs; default: - throw new Error('unsupported node type'); + throw new Error("unsupported node type"); } }; export const containsPlaceholders = (node: ASTNode): boolean => - node.type === nodeType.placeholder || getNodeChildren(node).some((n) => containsPlaceholders(n)); + node.type === nodeType.placeholder || + getNodeChildren(node).some((n) => containsPlaceholders(n)); export const nodeValueType = (node: ASTNode): valueType | null => { switch (node.type) { case nodeType.aggregation: return valueType.vector; - case nodeType.binaryExpr: + case nodeType.binaryExpr: { const childTypes = [nodeValueType(node.lhs), nodeValueType(node.rhs)]; if (childTypes.includes(null)) { @@ -116,6 +129,7 @@ export const nodeValueType = (node: ASTNode): valueType | null => { } return valueType.scalar; + } case nodeType.call: return node.func.returnType; case nodeType.matrixSelector: @@ -135,7 +149,7 @@ export const nodeValueType = (node: ASTNode): valueType | null => { case nodeType.vectorSelector: return valueType.vector; default: - throw new Error('invalid node type'); + throw new Error("invalid node type"); } }; @@ -144,50 +158,65 @@ export const childDescription = (node: ASTNode, idx: number): string => { case nodeType.aggregation: if (aggregatorsWithParam.includes(node.op) && idx === 0) { switch (node.op) { - case 'topk': - case 'bottomk': - case 'limitk': - return 'k'; - case 'quantile': - return 'quantile'; - case 'count_values': - return 'target label name'; - case 'limit_ratio': - return 'ratio'; + case "topk": + case "bottomk": + case "limitk": + return "k"; + case "quantile": + return "quantile"; + case "count_values": + return "target label name"; + case "limit_ratio": + return "ratio"; } } - return 'vector to aggregate'; + return "vector to aggregate"; case nodeType.binaryExpr: - return idx === 0 ? 'left-hand side' : 'right-hand side'; + return idx === 0 ? "left-hand side" : "right-hand side"; case nodeType.call: - if (functionArgNames.hasOwnProperty(node.func.name)) { + if (node.func.name in functionArgNames) { const argNames = functionArgNames[node.func.name]; - return argNames[Math.min(functionArgNames[node.func.name].length - 1, idx)]; + return argNames[Math.min(argNames.length - 1, idx)]; } - return 'argument'; + return "argument"; case nodeType.parenExpr: - return 'expression'; + return "expression"; case nodeType.placeholder: - return 'argument'; + return "argument"; case nodeType.subquery: - return 'subquery to execute'; + return "subquery to execute"; case nodeType.unaryExpr: - return 'expression'; + return "expression"; default: - throw new Error('invalid node type'); + throw new Error("invalid node type"); } }; -export const aggregatorsWithParam = ['topk', 'bottomk', 'quantile', 'count_values', 'limitk', 'limit_ratio']; +export const aggregatorsWithParam = [ + "topk", + "bottomk", + "quantile", + "count_values", + "limitk", + "limit_ratio", +]; -export const anyValueType = [valueType.scalar, valueType.string, valueType.matrix, valueType.vector]; +export const anyValueType = [ + valueType.scalar, + valueType.string, + valueType.matrix, + valueType.vector, +]; -export const allowedChildValueTypes = (node: ASTNode, idx: number): valueType[] => { +export const allowedChildValueTypes = ( + node: ASTNode, + idx: number +): valueType[] => { switch (node.type) { case nodeType.aggregation: if (aggregatorsWithParam.includes(node.op) && idx === 0) { - if (node.op === 'count_values') { + if (node.op === "count_values") { return [valueType.string]; } return [valueType.scalar]; @@ -211,7 +240,7 @@ export const allowedChildValueTypes = (node: ASTNode, idx: number): valueType[] case nodeType.unaryExpr: return anyValueType; default: - throw new Error('invalid node type'); + throw new Error("invalid node type"); } }; @@ -225,17 +254,19 @@ export const canAddVarArg = (node: Call): boolean => { }; export const canRemoveVarArg = (node: Call): boolean => { - return node.func.variadic !== 0 && node.args.length >= node.func.argTypes.length; + return ( + node.func.variadic !== 0 && node.args.length >= node.func.argTypes.length + ); }; export const humanizedValueType: Record = { - [valueType.none]: 'none', - [valueType.string]: 'string', - [valueType.scalar]: 'number (scalar)', - [valueType.vector]: 'instant vector', - [valueType.matrix]: 'range vector', + [valueType.none]: "none", + [valueType.string]: "string", + [valueType.scalar]: "number (scalar)", + [valueType.vector]: "instant vector", + [valueType.matrix]: "range vector", }; export const escapeString = (str: string) => { - return str.replace(/([\\"])/g, '\\$1'); + return str.replace(/([\\"])/g, "\\$1"); }; diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 00736ad63d..56e34e2ded 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -62,6 +62,9 @@ "use-query-params": "^2.2.1" }, "devDependencies": { + "@eslint/compat": "^1.1.1", + "@eslint/eslintrc": "^3.1.0", + "@eslint/js": "^9.9.1", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", "@typescript-eslint/eslint-plugin": "^6.21.0", @@ -70,6 +73,7 @@ "eslint": "^9.9.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.11", + "globals": "^15.9.0", "jsdom": "^25.0.0", "postcss": "^8.4.35", "postcss-preset-mantine": "^1.17.0", @@ -169,6 +173,19 @@ } } }, + "mantine-ui/node_modules/globals": { + "version": "15.9.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-15.9.0.tgz", + "integrity": "sha512-SmSKyLLKFbSr6rptvP8izbyxJL4ILwqO9Jg23UA0sDlGlu58V59D1//I3vlc0KJphVdUR7vMjHIplYnzBxorQA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", "version": "0.54.1", @@ -1448,6 +1465,16 @@ "node": "^12.0.0 || ^14.0.0 || >=16.0.0" } }, + "node_modules/@eslint/compat": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@eslint/compat/-/compat-1.1.1.tgz", + "integrity": "sha512-lpHyRyplhGPL5mGEh6M9O5nnKk0Gz4bFI+Zu6tKlPpDUN7XshWvH9C/px4UVm87IAANE0W81CEsNGbS1KlzXpA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, "node_modules/@eslint/config-array": { "version": "0.18.0", "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.18.0.tgz", From b75a12b52f2ebbf228a80a77bc6859cc230cbb0c Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 6 Sep 2024 20:36:39 +0200 Subject: [PATCH 0470/1397] Remove unneeded in TSDBStatusPage Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/pages/TSDBStatusPage.tsx | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/TSDBStatusPage.tsx b/web/ui/mantine-ui/src/pages/TSDBStatusPage.tsx index 44e67a38be..721ac69040 100644 --- a/web/ui/mantine-ui/src/pages/TSDBStatusPage.tsx +++ b/web/ui/mantine-ui/src/pages/TSDBStatusPage.tsx @@ -1,4 +1,4 @@ -import { Stack, Card, Group, Table, Text } from "@mantine/core"; +import { Stack, Card, Table, Text } from "@mantine/core"; import { useSuspenseAPIQuery } from "../api/api"; import { TSDBStatusResult } from "../api/responseTypes/tsdbStatus"; import { formatTimestamp } from "../lib/formatTime"; @@ -71,11 +71,9 @@ export default function TSDBStatusPage() { }, ].map(({ title, unit = "Count", stats, formatAsCode }) => ( - - - {title} - - + + {title} + From 5fd860f8067653a85b06ca2be7fed1b2e730fe58 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 6 Sep 2024 20:38:10 +0200 Subject: [PATCH 0471/1397] Complete building tree view and implement "Explain" tab Signed-off-by: Julius Volz --- .../pages/query/ExplainViews/Aggregation.tsx | 109 +++ .../ExplainViews/BinaryExpr/BinaryExpr.tsx | 106 +++ .../ExplainViews/BinaryExpr/ScalarScalar.tsx | 54 ++ .../ExplainViews/BinaryExpr/VectorScalar.tsx | 104 +++ .../ExplainViews/BinaryExpr/VectorVector.tsx | 686 ++++++++++++++++++ .../query/ExplainViews/ExplainView.module.css | 8 + .../pages/query/ExplainViews/ExplainView.tsx | 198 +++++ .../src/pages/query/ExplainViews/Selector.tsx | 230 ++++++ .../mantine-ui/src/pages/query/QueryPanel.tsx | 32 + .../mantine-ui/src/pages/query/SeriesName.tsx | 4 +- .../mantine-ui/src/pages/query/TreeNode.tsx | 11 +- .../mantine-ui/src/pages/query/TreeView.tsx | 6 +- .../src/pages/query/urlStateEncoding.ts | 19 +- 13 files changed, 1558 insertions(+), 9 deletions(-) create mode 100644 web/ui/mantine-ui/src/pages/query/ExplainViews/Aggregation.tsx create mode 100644 web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/BinaryExpr.tsx create mode 100644 web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/ScalarScalar.tsx create mode 100644 web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorScalar.tsx create mode 100644 web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx create mode 100644 web/ui/mantine-ui/src/pages/query/ExplainViews/ExplainView.module.css create mode 100644 web/ui/mantine-ui/src/pages/query/ExplainViews/ExplainView.tsx create mode 100644 web/ui/mantine-ui/src/pages/query/ExplainViews/Selector.tsx diff --git a/web/ui/mantine-ui/src/pages/query/ExplainViews/Aggregation.tsx b/web/ui/mantine-ui/src/pages/query/ExplainViews/Aggregation.tsx new file mode 100644 index 0000000000..3db2d4c0f5 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/query/ExplainViews/Aggregation.tsx @@ -0,0 +1,109 @@ +import { FC } from "react"; +import ASTNode, { Aggregation, aggregationType } from "../../../promql/ast"; +import { labelNameList } from "../../../promql/format"; +import { parsePrometheusFloat } from "../../../lib/formatFloatValue"; +import { Card, Text } from "@mantine/core"; + +const describeAggregationType = ( + aggrType: aggregationType, + param: ASTNode | null +) => { + switch (aggrType) { + case "sum": + return "sums over the sample values of the input series"; + case "min": + return "takes the minimum of the sample values of the input series"; + case "max": + return "takes the maximum of the sample values of the input series"; + case "avg": + return "calculates the average of the sample values of the input series"; + case "stddev": + return "calculates the population standard deviation of the sample values of the input series"; + case "stdvar": + return "calculates the population standard variation of the sample values of the input series"; + case "count": + return "counts the number of input series"; + case "group": + return "groups the input series by the supplied grouping labels, while setting the sample value to 1"; + case "count_values": + if (param === null) { + throw new Error( + "encountered count_values() node without label parameter" + ); + } + if (param.type !== "stringLiteral") { + throw new Error( + "encountered count_values() node without string literal label parameter" + ); + } + return ( + <> + outputs one time series for each unique sample value in the input + series (each counting the number of occurrences of that value and + indicating the original value in the {labelNameList([param.val])}{" "} + label) + + ); + case "bottomk": + return "returns the bottom K series by value"; + case "topk": + return "returns the top K series by value"; + case "quantile": + if (param === null) { + throw new Error( + "encountered quantile() node without quantile parameter" + ); + } + if (param.type === "numberLiteral") { + return `calculates the ${param.val}th quantile (${ + parsePrometheusFloat(param.val) * 100 + }th percentile) over the sample values of the input series`; + } + return "calculates a quantile over the sample values of the input series"; + + case "limitk": + return "limits the output to K series"; + case "limit_ratio": + return "limits the output to a ratio of the input series"; + default: + throw new Error(`invalid aggregation type ${aggrType}`); + } +}; + +const describeAggregationGrouping = (grouping: string[], without: boolean) => { + if (without) { + return ( + <>aggregating away the [{labelNameList(grouping)}] label dimensions + ); + } + + if (grouping.length === 1) { + return <>grouped by their {labelNameList(grouping)} label dimension; + } + + if (grouping.length > 1) { + return <>grouped by their [{labelNameList(grouping)}] label dimensions; + } + + return "aggregating away any label dimensions"; +}; + +interface AggregationExplainViewProps { + node: Aggregation; +} + +const AggregationExplainView: FC = ({ node }) => { + return ( + + + Aggregation + + + This node {describeAggregationType(node.op, node.param)},{" "} + {describeAggregationGrouping(node.grouping, node.without)}. + + + ); +}; + +export default AggregationExplainView; diff --git a/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/BinaryExpr.tsx b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/BinaryExpr.tsx new file mode 100644 index 0000000000..837b84da31 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/BinaryExpr.tsx @@ -0,0 +1,106 @@ +import { FC } from "react"; +import { BinaryExpr } from "../../../../promql/ast"; +import serializeNode from "../../../../promql/serialize"; +import VectorScalarBinaryExprExplainView from "./VectorScalar"; +import VectorVectorBinaryExprExplainView from "./VectorVector"; +import ScalarScalarBinaryExprExplainView from "./ScalarScalar"; +import { nodeValueType } from "../../../../promql/utils"; +import { useSuspenseAPIQuery } from "../../../../api/api"; +import { InstantQueryResult } from "../../../../api/responseTypes/query"; +import { Card, Text } from "@mantine/core"; + +interface BinaryExprExplainViewProps { + node: BinaryExpr; +} + +const BinaryExprExplainView: FC = ({ node }) => { + const { data: lhs } = useSuspenseAPIQuery({ + path: `/query`, + params: { + query: serializeNode(node.lhs), + }, + }); + const { data: rhs } = useSuspenseAPIQuery({ + path: `/query`, + params: { + query: serializeNode(node.rhs), + }, + }); + + if ( + lhs.data.resultType !== nodeValueType(node.lhs) || + rhs.data.resultType !== nodeValueType(node.rhs) + ) { + // This can happen for a brief transitionary render when "node" has changed, but "lhs" and "rhs" + // haven't switched back to loading yet (leading to a crash in e.g. the vector-vector explain view). + return null; + } + + // Scalar-scalar binops. + if (lhs.data.resultType === "scalar" && rhs.data.resultType === "scalar") { + return ( + + + Scalar-to-scalar binary operation + + + + ); + } + + // Vector-scalar binops. + if (lhs.data.resultType === "scalar" && rhs.data.resultType === "vector") { + return ( + + + Scalar-to-vector binary operation + + + + ); + } + if (lhs.data.resultType === "vector" && rhs.data.resultType === "scalar") { + return ( + + + Vector-to-scalar binary operation + + + + ); + } + + // Vector-vector binops. + if (lhs.data.resultType === "vector" && rhs.data.resultType === "vector") { + return ( + + + Vector-to-vector binary operation + + + + ); + } + + throw new Error("invalid binary operator argument types"); +}; + +export default BinaryExprExplainView; diff --git a/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/ScalarScalar.tsx b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/ScalarScalar.tsx new file mode 100644 index 0000000000..874d824486 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/ScalarScalar.tsx @@ -0,0 +1,54 @@ +import { FC } from "react"; +import { BinaryExpr } from "../../../../promql/ast"; +import { scalarBinOp } from "../../../../promql/binOp"; +import { Table } from "@mantine/core"; +import { SampleValue } from "../../../../api/responseTypes/query"; +import { + formatPrometheusFloat, + parsePrometheusFloat, +} from "../../../../lib/formatFloatValue"; + +interface ScalarScalarBinaryExprExplainViewProps { + node: BinaryExpr; + lhs: SampleValue; + rhs: SampleValue; +} + +const ScalarScalarBinaryExprExplainView: FC< + ScalarScalarBinaryExprExplainViewProps +> = ({ node, lhs, rhs }) => { + const [lhsVal, rhsVal] = [ + parsePrometheusFloat(lhs[1]), + parsePrometheusFloat(rhs[1]), + ]; + + return ( +
    + + + Left value + Operator + Right value + + Result + + + + + {lhs[1]} + + {node.op} + {node.bool && " bool"} + + {rhs[1]} + = + + {formatPrometheusFloat(scalarBinOp(node.op, lhsVal, rhsVal))} + + + +
    + ); +}; + +export default ScalarScalarBinaryExprExplainView; diff --git a/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorScalar.tsx b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorScalar.tsx new file mode 100644 index 0000000000..3a0652f818 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorScalar.tsx @@ -0,0 +1,104 @@ +import { FC } from "react"; +import { BinaryExpr } from "../../../../promql/ast"; +// import SeriesName from '../../../../utils/SeriesName'; +import { isComparisonOperator } from "../../../../promql/utils"; +import { vectorElemBinop } from "../../../../promql/binOp"; +import { + InstantSample, + SampleValue, +} from "../../../../api/responseTypes/query"; +import { Alert, Table, Text } from "@mantine/core"; +import { + formatPrometheusFloat, + parsePrometheusFloat, +} from "../../../../lib/formatFloatValue"; +import SeriesName from "../../SeriesName"; + +interface VectorScalarBinaryExprExplainViewProps { + node: BinaryExpr; + scalar: SampleValue; + vector: InstantSample[]; + scalarLeft: boolean; +} + +const VectorScalarBinaryExprExplainView: FC< + VectorScalarBinaryExprExplainViewProps +> = ({ node, scalar, vector, scalarLeft }) => { + if (vector.length === 0) { + return ( + + One side of the binary operation produces 0 results, no matching + information shown. + + ); + } + + return ( + + + + {!scalarLeft && Left labels} + Left value + Operator + {scalarLeft && Right labels} + Right value + + Result + + + + {vector.map((sample: InstantSample, idx) => { + if (!sample.value) { + // TODO: Handle native histograms or show a better error message. + throw new Error("Native histograms are not supported yet"); + } + + const vecVal = parsePrometheusFloat(sample.value[1]); + const scalVal = parsePrometheusFloat(scalar[1]); + + let { value, keep } = scalarLeft + ? vectorElemBinop(node.op, scalVal, vecVal) + : vectorElemBinop(node.op, vecVal, scalVal); + if (isComparisonOperator(node.op) && scalarLeft) { + value = vecVal; + } + if (node.bool) { + value = Number(keep); + keep = true; + } + + const scalarCell = {scalar[1]}; + const vectorCells = ( + <> + + + + {sample.value[1]} + + ); + + return ( + + {scalarLeft ? scalarCell : vectorCells} + + {node.op} + {node.bool && " bool"} + + {scalarLeft ? vectorCells : scalarCell} + = + + {keep ? ( + formatPrometheusFloat(value) + ) : ( + dropped + )} + + + ); + })} + +
    + ); +}; + +export default VectorScalarBinaryExprExplainView; diff --git a/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx new file mode 100644 index 0000000000..0b0854409e --- /dev/null +++ b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx @@ -0,0 +1,686 @@ +import React, { FC, useState } from "react"; +import { BinaryExpr, vectorMatchCardinality } from "../../../../promql/ast"; +import { InstantSample, Metric } from "../../../../api/responseTypes/query"; +import { isComparisonOperator, isSetOperator } from "../../../../promql/utils"; +import { + VectorMatchError, + BinOpMatchGroup, + MatchErrorType, + computeVectorVectorBinOp, + filteredSampleValue, +} from "../../../../promql/binOp"; +import { formatNode, labelNameList } from "../../../../promql/format"; +import { + Alert, + Anchor, + Box, + Button, + Group, + List, + Switch, + Table, + Text, +} from "@mantine/core"; +import { useLocalStorage } from "@mantine/hooks"; +import { IconAlertTriangle } from "@tabler/icons-react"; +import SeriesName from "../../SeriesName"; + +// We use this color pool for two purposes: +// +// 1. To distinguish different match groups from each other. +// 2. To distinguish multiple series within one match group from each other. +const colorPool = [ + "#1f77b4", + "#ff7f0e", + "#2ca02c", + "#d62728", + "#9467bd", + "#8c564b", + "#e377c2", + "#7f7f7f", + "#bcbd22", + "#17becf", + "#393b79", + "#637939", + "#8c6d31", + "#843c39", + "#d6616b", + "#7b4173", + "#ce6dbd", + "#9c9ede", + "#c5b0d5", + "#c49c94", + "#f7b6d2", + "#c7c7c7", + "#dbdb8d", + "#9edae5", + "#393b79", + "#637939", + "#8c6d31", + "#843c39", + "#d6616b", + "#7b4173", + "#ce6dbd", + "#9c9ede", + "#c5b0d5", + "#c49c94", + "#f7b6d2", + "#c7c7c7", + "#dbdb8d", + "#9edae5", + "#17becf", + "#393b79", + "#637939", + "#8c6d31", + "#843c39", + "#d6616b", + "#7b4173", + "#ce6dbd", + "#9c9ede", + "#c5b0d5", + "#c49c94", + "#f7b6d2", +]; + +const rhsColorOffset = colorPool.length / 2 + 3; +const colorForIndex = (idx: number, offset?: number) => + `${colorPool[(idx + (offset || 0)) % colorPool.length]}80`; + +const seriesSwatch = (color: string) => ( + +); +interface VectorVectorBinaryExprExplainViewProps { + node: BinaryExpr; + lhs: InstantSample[]; + rhs: InstantSample[]; +} + +const noMatchLabels = ( + metric: Metric, + on: boolean, + labels: string[] +): Metric => { + const result: Metric = {}; + for (const name in metric) { + if (!(labels.includes(name) === on && (on || name !== "__name__"))) { + result[name] = metric[name]; + } + } + return result; +}; + +const explanationText = (node: BinaryExpr): React.ReactNode => { + const matching = node.matching!; + const [oneSide, manySide] = + matching.card === vectorMatchCardinality.oneToMany + ? ["left", "right"] + : ["right", "left"]; + + return ( + <> + + {isComparisonOperator(node.op) ? ( + <> + This node filters the series from the left-hand side based on the + result of a " + {node.op}" + comparison with matching series from the right-hand side. + + ) : ( + <> + This node calculates the result of applying the " + {node.op}" + operator between the sample values of matching series from two sets + of time series. + + )} + + + {(matching.labels.length > 0 || matching.on) && + (matching.on ? ( + + on( + {labelNameList(matching.labels)}):{" "} + {matching.labels.length > 0 ? ( + <> + series on both sides are matched on the labels{" "} + {labelNameList(matching.labels)} + + ) : ( + <> + all series from one side are matched to all series on the + other side. + + )} + + ) : ( + + ignoring( + {labelNameList(matching.labels)}): series on both sides are + matched on all of their labels, except{" "} + {labelNameList(matching.labels)}. + + ))} + {matching.card === vectorMatchCardinality.oneToOne ? ( + + One-to-one match. Each series from the left-hand side is allowed to + match with at most one series on the right-hand side, and vice + versa. + + ) : ( + + + group_{manySide}({labelNameList(matching.include)}) + + : {matching.card} match. Each series from the {oneSide}-hand side is + allowed to match with multiple series from the {manySide}-hand side. + {matching.include.length !== 0 && ( + <> + {" "} + Any {labelNameList(matching.include)} labels found on the{" "} + {oneSide}-hand side are propagated into the result, in addition + to the match group's labels. + + )} + + )} + {node.bool && ( + + bool: Instead of + filtering series based on the outcome of the comparison for matched + series, keep all series, but return the comparison outcome as a + boolean 0 or{" "} + 1 sample value. + + )} + + + ); +}; + +const explainError = ( + binOp: BinaryExpr, + _mg: BinOpMatchGroup, + err: VectorMatchError +) => { + const fixes = ( + <> + + Possible fixes: + + + {err.type === MatchErrorType.multipleMatchesForOneToOneMatching && ( + + + + Allow {err.dupeSide === "left" ? "many-to-one" : "one-to-many"}{" "} + matching + + : If you want to allow{" "} + {err.dupeSide === "left" ? "many-to-one" : "one-to-many"}{" "} + matching, you need to explicitly request it by adding a{" "} + + group_{err.dupeSide}() + {" "} + modifier to the operator: + + + {formatNode( + { + ...binOp, + matching: { + ...(binOp.matching + ? binOp.matching + : { labels: [], on: false, include: [] }), + card: + err.dupeSide === "left" + ? vectorMatchCardinality.manyToOne + : vectorMatchCardinality.oneToMany, + }, + }, + true, + 1 + )} + + + )} + + Update your matching parameters: Consider including + more differentiating labels in your matching modifiers (via{" "} + on() /{" "} + ignoring()) to + split multiple series into distinct match groups. + + + Aggregate the input: Consider aggregating away the + extra labels that create multiple series per group before applying the + binary operation. + + + + ); + + switch (err.type) { + case MatchErrorType.multipleMatchesForOneToOneMatching: + return ( + <> + + Binary operators only allow one-to-one matching by + default, but we found{" "} + multiple series on the {err.dupeSide} side for this + match group. + + {fixes} + + ); + case MatchErrorType.multipleMatchesOnBothSides: + return ( + <> + + We found multiple series on both sides for this + match group. Since many-to-many matching is not + supported, you need to ensure that at least one of the sides only + yields a single series. + + {fixes} + + ); + case MatchErrorType.multipleMatchesOnOneSide: { + const [oneSide, manySide] = + binOp.matching!.card === vectorMatchCardinality.oneToMany + ? ["left", "right"] + : ["right", "left"]; + return ( + <> + + You requested{" "} + + {oneSide === "right" ? "many-to-one" : "one-to-many"} matching + {" "} + via{" "} + + group_{manySide}() + + , but we also found{" "} + multiple series on the {oneSide} side of the match + group. Make sure that the {oneSide} side only contains a single + series. + + {fixes} + + ); + } + default: + throw new Error("unknown match error"); + } +}; + +const VectorVectorBinaryExprExplainView: FC< + VectorVectorBinaryExprExplainViewProps +> = ({ node, lhs, rhs }) => { + // TODO: Don't use Mantine's local storage as a one-off here. + // const [allowLineBreaks, setAllowLineBreaks] = useLocalStorage({ + // key: "queryPage.explain.binaryOperators.breakLongLines", + // defaultValue: true, + // }); + + const [showSampleValues, setShowSampleValues] = useLocalStorage({ + key: "queryPage.explain.binaryOperators.showSampleValues", + defaultValue: false, + }); + + const [maxGroups, setMaxGroups] = useState(100); + const [maxSeriesPerGroup, setMaxSeriesPerGroup] = useState< + number | undefined + >(100); + + const { matching } = node; + if (matching === null) { + // The parent should make sure to only pass in vector-vector binops that have their "matching" field filled out. + throw new Error("missing matching parameters in vector-to-vector binop"); + } + + const { groups: matchGroups, numGroups } = computeVectorVectorBinOp( + node.op, + matching, + node.bool, + lhs, + rhs, + { + maxGroups: maxGroups, + maxSeriesPerGroup: maxSeriesPerGroup, + } + ); + const errCount = Object.values(matchGroups).filter((mg) => mg.error).length; + + return ( + <> + {explanationText(node)} + + {!isSetOperator(node.op) && ( + <> + + {/* + setAllowLineBreaks(event.currentTarget.checked) + } + /> */} + + setShowSampleValues(event.currentTarget.checked) + } + /> + + + {numGroups > Object.keys(matchGroups).length && ( + } + > + Too many match groups to display, only showing{" "} + {Object.keys(matchGroups).length} out of {numGroups} groups. + setMaxGroups(undefined)}> + Show all groups + + + )} + + {errCount > 0 && ( + } + > + Found matching issues in {errCount} match group + {errCount > 1 ? "s" : ""}. See below for per-group error details. + + )} + + + + {Object.values(matchGroups).map((mg, mgIdx) => { + const { + groupLabels, + lhs, + lhsCount, + rhs, + rhsCount, + result, + error, + } = mg; + + const noLHSMatches = lhs.length === 0; + const noRHSMatches = rhs.length === 0; + + const groupColor = colorPool[mgIdx % colorPool.length]; + const noMatchesColor = "#e0e0e0"; + const lhsGroupColor = noLHSMatches + ? noMatchesColor + : groupColor; + const rhsGroupColor = noRHSMatches + ? noMatchesColor + : groupColor; + const resultGroupColor = + noLHSMatches || noRHSMatches ? noMatchesColor : groupColor; + + const matchGroupTitleRow = (color: string) => ( + + + + + + ); + + const matchGroupTable = ( + series: InstantSample[], + seriesCount: number, + color: string, + colorOffset?: number + ) => ( + +
    + + {series.length === 0 ? ( + + + no matching series + + + ) : ( + <> + {matchGroupTitleRow(color)} + {series.map((s, sIdx) => { + if (s.value === undefined) { + // TODO: Figure out how to handle native histograms. + throw new Error( + "Native histograms are not supported yet" + ); + } + + return ( + + + + {seriesSwatch( + colorForIndex(sIdx, colorOffset) + )} + + + + + {showSampleValues && ( + {s.value[1]} + )} + + ); + })} + + )} + {seriesCount > series.length && ( + + + {seriesCount - series.length} more series omitted + – + + + + )} + +
    +
    + ); + + const lhsTable = matchGroupTable(lhs, lhsCount, lhsGroupColor); + const rhsTable = matchGroupTable( + rhs, + rhsCount, + rhsGroupColor, + rhsColorOffset + ); + + const resultTable = ( + + + + {noLHSMatches || noRHSMatches ? ( + + + dropped + + + ) : error !== null ? ( + + + error, result omitted + + + ) : ( + <> + {result.map(({ sample, manySideIdx }, resIdx) => { + if (sample.value === undefined) { + // TODO: Figure out how to handle native histograms. + throw new Error( + "Native histograms are not supported yet" + ); + } + + const filtered = + sample.value[1] === filteredSampleValue; + const [lIdx, rIdx] = + matching.card === + vectorMatchCardinality.oneToMany + ? [0, manySideIdx] + : [manySideIdx, 0]; + + return ( + + + + + {seriesSwatch(colorForIndex(lIdx))} + + {seriesSwatch( + colorForIndex(rIdx, rhsColorOffset) + )} + + + + + + {showSampleValues && ( + + {filtered ? ( + + filtered + + ) : ( + {sample.value[1]} + )} + + )} + + ); + })} + + )} + +
    +
    + ); + + return ( + + {mgIdx !== 0 && } + + + {error && ( + } + > + {explainError(node, mg, error)} + + )} + + + + + {lhsTable} + + + {node.op} + {node.bool && " bool"} + + + {rhsTable} + + = + + {resultTable} + + + + ); + })} + + + + )} + + ); +}; + +export default VectorVectorBinaryExprExplainView; diff --git a/web/ui/mantine-ui/src/pages/query/ExplainViews/ExplainView.module.css b/web/ui/mantine-ui/src/pages/query/ExplainViews/ExplainView.module.css new file mode 100644 index 0000000000..7cb36c7624 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/query/ExplainViews/ExplainView.module.css @@ -0,0 +1,8 @@ +.funcDoc code { + background-color: light-dark( + var(--mantine-color-gray-1), + var(--mantine-color-dark-5) + ); + padding: 0.05em 0.2em; + border-radius: 0.2em; +} diff --git a/web/ui/mantine-ui/src/pages/query/ExplainViews/ExplainView.tsx b/web/ui/mantine-ui/src/pages/query/ExplainViews/ExplainView.tsx new file mode 100644 index 0000000000..907830757d --- /dev/null +++ b/web/ui/mantine-ui/src/pages/query/ExplainViews/ExplainView.tsx @@ -0,0 +1,198 @@ +import { FC } from "react"; +import { Alert, Text, Anchor, Card, Divider } from "@mantine/core"; +import ASTNode, { nodeType } from "../../../promql/ast"; +// import AggregationExplainView from "./Aggregation"; +// import BinaryExprExplainView from "./BinaryExpr/BinaryExpr"; +// import SelectorExplainView from "./Selector"; +import funcDocs from "../../../promql/functionDocs"; +import { escapeString } from "../../../promql/utils"; +import { formatPrometheusDuration } from "../../../lib/formatTime"; +import classes from "./ExplainView.module.css"; +import SelectorExplainView from "./Selector"; +import AggregationExplainView from "./Aggregation"; +import BinaryExprExplainView from "./BinaryExpr/BinaryExpr"; +interface ExplainViewProps { + node: ASTNode | null; + treeShown: boolean; + setShowTree: () => void; +} + +const ExplainView: FC = ({ + node, + treeShown, + setShowTree, +}) => { + if (node === null) { + return ( + + <> + To use the Explain view,{" "} + {!treeShown && ( + <> + + enable the query tree view + {" "} + (also available via the expression input dropdown) and then + + )}{" "} + select a node in the tree above. + + + ); + } + + switch (node.type) { + case nodeType.aggregation: + return ; + case nodeType.binaryExpr: + return ; + case nodeType.call: + return ( + + + Function call + + + This node calls the{" "} + + + {node.func.name}() + + {" "} + function{node.args.length > 0 ? " on the provided inputs" : ""}. + + + {/* TODO: Some docs, like x_over_time, have relative links pointing back to the Prometheus docs, + make sure to modify those links in the docs extraction so they work from the explain view */} + + {funcDocs[node.func.name]} + + + ); + case nodeType.matrixSelector: + return ; + case nodeType.subquery: + return ( + + + Subquery + + + This node evaluates the passed expression as a subquery over the + last{" "} + + {formatPrometheusDuration(node.range)} + {" "} + at a query resolution{" "} + {node.step > 0 ? ( + <> + of{" "} + + {formatPrometheusDuration(node.step)} + + + ) : ( + "equal to the default rule evaluation interval" + )} + {node.timestamp !== null ? ( + <> + , evaluated relative to an absolute evaluation timestamp of{" "} + + {(node.timestamp / 1000).toFixed(3)} + + + ) : node.startOrEnd !== null ? ( + <> + , evaluated relative to the {node.startOrEnd} of the query range + + ) : ( + <> + )} + {node.offset === 0 ? ( + <> + ) : node.offset > 0 ? ( + <> + , time-shifted{" "} + + {formatPrometheusDuration(node.offset)} + {" "} + into the past + + ) : ( + <> + , time-shifted{" "} + + {formatPrometheusDuration(-node.offset)} + {" "} + into the future + + )} + . + + + ); + case nodeType.numberLiteral: + return ( + + + Number literal + + + A scalar number literal with the value{" "} + {node.val}. + + + ); + case nodeType.parenExpr: + return ( + + + Parentheses + + + Parentheses that contain a sub-expression to be evaluated. + + + ); + case nodeType.stringLiteral: + return ( + + + String literal + + + A string literal with the value{" "} + + "{escapeString(node.val)}" + + . + + + ); + case nodeType.unaryExpr: + return ( + + + Unary expression + + + A unary expression that{" "} + {node.op === "+" + ? "does not affect the expression it is applied to" + : "changes the sign of the expression it is applied to"} + . + + + ); + case nodeType.vectorSelector: + return ; + default: + throw new Error("invalid node type"); + } +}; + +export default ExplainView; diff --git a/web/ui/mantine-ui/src/pages/query/ExplainViews/Selector.tsx b/web/ui/mantine-ui/src/pages/query/ExplainViews/Selector.tsx new file mode 100644 index 0000000000..26f62fb335 --- /dev/null +++ b/web/ui/mantine-ui/src/pages/query/ExplainViews/Selector.tsx @@ -0,0 +1,230 @@ +import { FC, ReactNode } from "react"; +import { + VectorSelector, + MatrixSelector, + nodeType, + LabelMatcher, + matchType, +} from "../../../promql/ast"; +import { escapeString } from "../../../promql/utils"; +import { useSuspenseAPIQuery } from "../../../api/api"; +import { Card, Text, Divider, List } from "@mantine/core"; +import { MetadataResult } from "../../../api/responseTypes/metadata"; +import { formatPrometheusDuration } from "../../../lib/formatTime"; + +interface SelectorExplainViewProps { + node: VectorSelector | MatrixSelector; +} + +const matchingCriteriaList = ( + name: string, + matchers: LabelMatcher[] +): ReactNode => { + return ( + + {name.length > 0 && ( + + The metric name is{" "} + {name}. + + )} + {matchers + .filter((m) => !(m.name === "__name__")) + .map((m) => { + switch (m.type) { + case matchType.equal: + return ( + + + {m.name} + + {m.type} + + "{escapeString(m.value)}" + + : The label{" "} + + {m.name} + {" "} + is exactly{" "} + + "{escapeString(m.value)}" + + . + + ); + case matchType.notEqual: + return ( + + + {m.name} + + {m.type} + + "{escapeString(m.value)}" + + : The label{" "} + + {m.name} + {" "} + is not{" "} + + "{escapeString(m.value)}" + + . + + ); + case matchType.matchRegexp: + return ( + + + {m.name} + + {m.type} + + "{escapeString(m.value)}" + + : The label{" "} + + {m.name} + {" "} + matches the regular expression{" "} + + "{escapeString(m.value)}" + + . + + ); + case matchType.matchNotRegexp: + return ( + + + {m.name} + + {m.type} + + "{escapeString(m.value)}" + + : The label{" "} + + {m.name} + {" "} + does not match the regular expression{" "} + + "{escapeString(m.value)}" + + . + + ); + default: + throw new Error("invalid matcher type"); + } + })} + + ); +}; + +const SelectorExplainView: FC = ({ node }) => { + const baseMetricName = node.name.replace(/(_count|_sum|_bucket)$/, ""); + const { data: metricMeta } = useSuspenseAPIQuery({ + path: `/metadata`, + params: { + metric: baseMetricName, + }, + }); + + return ( + + + {node.type === nodeType.vectorSelector ? "Instant" : "Range"} vector + selector + + + {metricMeta.data === undefined || + metricMeta.data[baseMetricName] === undefined || + metricMeta.data[baseMetricName].length < 1 ? ( + <>No metric metadata found. + ) : ( + <> + Metric help:{" "} + {metricMeta.data[baseMetricName][0].help} +
    + Metric type:{" "} + {metricMeta.data[baseMetricName][0].type} + + )} +
    + + + {node.type === nodeType.vectorSelector ? ( + <> + This node selects the latest (non-stale) sample value within the + last 5m + + ) : ( + <> + This node selects{" "} + + {formatPrometheusDuration(node.range)} + {" "} + of data going backward from the evaluation timestamp + + )} + {node.timestamp !== null ? ( + <> + , evaluated relative to an absolute evaluation timestamp of{" "} + + {(node.timestamp / 1000).toFixed(3)} + + + ) : node.startOrEnd !== null ? ( + <>, evaluated relative to the {node.startOrEnd} of the query range + ) : ( + <> + )} + {node.offset === 0 ? ( + <> + ) : node.offset > 0 ? ( + <> + , time-shifted{" "} + + {formatPrometheusDuration(node.offset)} + {" "} + into the past, + + ) : ( + <> + , time-shifted{" "} + + {formatPrometheusDuration(-node.offset)} + {" "} + into the future, + + )}{" "} + for any series that match all of the following criteria: + + {matchingCriteriaList(node.name, node.matchers)} + + If a series has no values in the last{" "} + + {node.type === nodeType.vectorSelector + ? "5m" + : formatPrometheusDuration(node.range)} + + {node.offset > 0 && ( + <> + {" "} + (relative to the time-shifted instant{" "} + + {formatPrometheusDuration(node.offset)} + {" "} + in the past) + + )} + , the series will not be returned. + +
    + ); +}; + +export default SelectorExplainView; diff --git a/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx b/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx index 628183bd6a..8cddd601a4 100644 --- a/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx +++ b/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx @@ -14,6 +14,7 @@ import { IconChartLine, IconCheckbox, IconGraph, + IconInfoCircle, IconSquare, IconTable, } from "@tabler/icons-react"; @@ -38,6 +39,7 @@ import TreeView from "./TreeView"; import ErrorBoundary from "../../components/ErrorBoundary"; import ASTNode from "../../promql/ast"; import serializeNode from "../../promql/serialize"; +import ExplainView from "./ExplainViews/ExplainView"; export interface PanelProps { idx: number; @@ -153,6 +155,12 @@ const QueryPanel: FC = ({ idx, metricNames }) => { }> Graph + } + > + Explain + @@ -300,6 +308,30 @@ const QueryPanel: FC = ({ idx, metricNames }) => { onSelectRange={onSelectRange} /> + + + + {Array.from(Array(20), (_, i) => ( + + ))} + + } + > + { + dispatch(setShowTree({ idx, showTree: true })); + }} + /> + + + ); diff --git a/web/ui/mantine-ui/src/pages/query/SeriesName.tsx b/web/ui/mantine-ui/src/pages/query/SeriesName.tsx index 4fc3a7df60..66a7856f56 100644 --- a/web/ui/mantine-ui/src/pages/query/SeriesName.tsx +++ b/web/ui/mantine-ui/src/pages/query/SeriesName.tsx @@ -51,14 +51,14 @@ const SeriesName: FC = ({ labels, format }) => { } return ( -
    + {labels ? labels.__name__ : ""} {"{"} {labelNodes} {"}"} -
    + ); }; diff --git a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx index b6a78e9a69..102396f6e4 100644 --- a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx +++ b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx @@ -88,6 +88,13 @@ const TreeNode: FC<{ sortedLabelCards: [], }); + // Deselect node when node is unmounted. + useEffect(() => { + return () => { + setSelectedNode(null); + }; + }, [setSelectedNode]); + const children = getNodeChildren(node); const [childStates, setChildStates] = useState( @@ -236,12 +243,12 @@ const TreeNode: FC<{ // Connector line between this node and its parent. )} - {/* The node itself. */} + {/* The node (visible box) itself. */} + - +
    ); }; diff --git a/web/ui/mantine-ui/src/pages/query/urlStateEncoding.ts b/web/ui/mantine-ui/src/pages/query/urlStateEncoding.ts index 86b85b3c8a..f948205a12 100644 --- a/web/ui/mantine-ui/src/pages/query/urlStateEncoding.ts +++ b/web/ui/mantine-ui/src/pages/query/urlStateEncoding.ts @@ -39,7 +39,22 @@ export const decodePanelOptionsFromURLParams = (query: string): Panel[] => { panel.showTree = value === "1"; }); decodeSetting("tab", (value) => { - panel.visualizer.activeTab = value === "0" ? "graph" : "table"; + // Numeric values are deprecated (from the old UI), but we still support decoding them. + switch (value) { + case "0": + case "graph": + panel.visualizer.activeTab = "graph"; + break; + case "1": + case "table": + panel.visualizer.activeTab = "table"; + break; + case "explain": + panel.visualizer.activeTab = "explain"; + break; + default: + console.log("Unknown tab", value); + } }); decodeSetting("display_mode", (value) => { panel.visualizer.displayMode = value as GraphDisplayMode; @@ -125,7 +140,7 @@ export const encodePanelOptionsToURLParams = ( panels.forEach((p, idx) => { addParam(idx, "expr", p.expr); addParam(idx, "show_tree", p.showTree ? "1" : "0"); - addParam(idx, "tab", p.visualizer.activeTab === "graph" ? "0" : "1"); + addParam(idx, "tab", p.visualizer.activeTab); if (p.visualizer.endTime !== null) { addParam(idx, "end_input", formatTime(p.visualizer.endTime)); addParam(idx, "moment_input", formatTime(p.visualizer.endTime)); From ae131e7d866d46277721680eb9474b2394be1205 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 6 Sep 2024 20:48:55 +0200 Subject: [PATCH 0472/1397] Install prettier deps for modules and old React app Signed-off-by: Julius Volz --- web/ui/package-lock.json | 16 +++++++++++++++- web/ui/package.json | 2 ++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 56e34e2ded..e384d92ed9 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -15,6 +15,8 @@ "@types/jest": "^29.5.12", "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", + "eslint-config-prettier": "^9.1.0", + "prettier": "^3.3.3", "ts-jest": "^29.2.2", "typescript": "^5.2.2", "vite": "^5.1.0" @@ -4662,6 +4664,19 @@ "url": "https://opencollective.com/eslint" } }, + "node_modules/eslint-config-prettier": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-9.1.0.tgz", + "integrity": "sha512-NSWl5BFQWEPi1j4TjVNItzYV7dZXZ+wP6I6ZhrBGpChQhZRUaElihE9uRRkcbRnNb76UMKDF3r+WTmNcGPKsqw==", + "dev": true, + "license": "MIT", + "bin": { + "eslint-config-prettier": "bin/cli.js" + }, + "peerDependencies": { + "eslint": ">=7.0.0" + } + }, "node_modules/eslint-plugin-prettier": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-5.2.1.tgz", @@ -7834,7 +7849,6 @@ "integrity": "sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==", "dev": true, "license": "MIT", - "peer": true, "bin": { "prettier": "bin/prettier.cjs" }, diff --git a/web/ui/package.json b/web/ui/package.json index ff5493e436..f1cd0b90de 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -18,6 +18,8 @@ "@types/jest": "^29.5.12", "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", + "eslint-config-prettier": "^9.1.0", + "prettier": "^3.3.3", "ts-jest": "^29.2.2", "typescript": "^5.2.2", "vite": "^5.1.0" From ee4a06be37324ca6c012c61958e2df7df2926169 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 6 Sep 2024 20:59:21 +0200 Subject: [PATCH 0473/1397] Make new UI tools Go version the same as rest of Prometheus Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/promql/tools/go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/ui/mantine-ui/src/promql/tools/go.mod b/web/ui/mantine-ui/src/promql/tools/go.mod index 0d0b46e547..d8f4634199 100644 --- a/web/ui/mantine-ui/src/promql/tools/go.mod +++ b/web/ui/mantine-ui/src/promql/tools/go.mod @@ -1,6 +1,6 @@ module github.com/prometheus/prometheus/web/ui/mantine-ui/src/promql/tools -go 1.23.0 +go 1.21.0 require ( github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc From 5c88aa4fea092d45ec25d2d7e2a18519ad73d0e2 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 6 Sep 2024 21:22:26 +0200 Subject: [PATCH 0474/1397] Add commented-out Prometheus demo server as alternative to vite proxy config Signed-off-by: Julius Volz --- web/ui/mantine-ui/vite.config.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/web/ui/mantine-ui/vite.config.ts b/web/ui/mantine-ui/vite.config.ts index ca52ca1672..079605403d 100644 --- a/web/ui/mantine-ui/vite.config.ts +++ b/web/ui/mantine-ui/vite.config.ts @@ -12,6 +12,14 @@ export default defineConfig({ "/-/": { target: "http://localhost:9090", }, + // "/api": { + // target: "https://prometheus.demo.do.prometheus.io/", + // changeOrigin: true, + // }, + // "/-/": { + // target: "https://prometheus.demo.do.prometheus.io/", + // changeOrigin: true, + // }, }, }, }); From 9b27e0d9155e46459cd3233055da3795b716c459 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 6 Sep 2024 21:40:29 +0200 Subject: [PATCH 0475/1397] Only switch dir temporarily in build_ui.sh Signed-off-by: Julius Volz --- web/ui/build_ui.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/web/ui/build_ui.sh b/web/ui/build_ui.sh index 62568aaa30..7046ca0596 100644 --- a/web/ui/build_ui.sh +++ b/web/ui/build_ui.sh @@ -31,9 +31,7 @@ function buildModule() { function buildReactApp() { echo "build react-app" - cd react-app - npm run build - cd .. + (cd react-app && npm run build) rm -rf ./static/react-app mv ./react-app/build ./static/react-app } From cb6613197452fd3cbeba5f71decd192837eae989 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 6 Sep 2024 21:45:39 +0200 Subject: [PATCH 0476/1397] Rename and restructure AST-to-JSON file Signed-off-by: Julius Volz --- web/api/v1/{parse_expr.go => ast_to_json.go} | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) rename web/api/v1/{parse_expr.go => ast_to_json.go} (96%) diff --git a/web/api/v1/parse_expr.go b/web/api/v1/ast_to_json.go similarity index 96% rename from web/api/v1/parse_expr.go rename to web/api/v1/ast_to_json.go index ed14b6829a..c5f0d08dcc 100644 --- a/web/api/v1/parse_expr.go +++ b/web/api/v1/ast_to_json.go @@ -20,14 +20,8 @@ import ( "github.com/prometheus/prometheus/promql/parser" ) -func getStartOrEnd(startOrEnd parser.ItemType) interface{} { - if startOrEnd == 0 { - return nil - } - - return startOrEnd.String() -} - +// Take a Go PromQL AST and translate it to a JSON object for the tree view in the UI. +// TODO: Could it make sense to do this via the normal JSON marshalling methods? func translateAST(node parser.Expr) interface{} { if node == nil { return nil @@ -151,3 +145,11 @@ func translateMatchers(in []*labels.Matcher) interface{} { } return out } + +func getStartOrEnd(startOrEnd parser.ItemType) interface{} { + if startOrEnd == 0 { + return nil + } + + return startOrEnd.String() +} From 392e0a655475eea83fb71e822aad94dc86dbe86c Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 6 Sep 2024 21:47:35 +0200 Subject: [PATCH 0477/1397] Signed-off-by: Julius Volz --- web/api/v1/{ast_to_json.go => translate_ast.go} | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) rename web/api/v1/{ast_to_json.go => translate_ast.go} (96%) diff --git a/web/api/v1/ast_to_json.go b/web/api/v1/translate_ast.go similarity index 96% rename from web/api/v1/ast_to_json.go rename to web/api/v1/translate_ast.go index c5f0d08dcc..afa11f16b9 100644 --- a/web/api/v1/ast_to_json.go +++ b/web/api/v1/translate_ast.go @@ -20,8 +20,10 @@ import ( "github.com/prometheus/prometheus/promql/parser" ) -// Take a Go PromQL AST and translate it to a JSON object for the tree view in the UI. -// TODO: Could it make sense to do this via the normal JSON marshalling methods? +// Take a Go PromQL AST and translate it to an object that's nicely JSON-serializable +// for the tree view in the UI. +// TODO: Could it make sense to do this via the normal JSON marshalling methods? Maybe +// too UI-specific though. func translateAST(node parser.Expr) interface{} { if node == nil { return nil From 89bfe813a9e795a24907f93301d4dc456ef1aed0 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 6 Sep 2024 22:08:09 +0200 Subject: [PATCH 0478/1397] Update UI tools Go version again to match rest of Prometheus Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/promql/tools/go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/ui/mantine-ui/src/promql/tools/go.mod b/web/ui/mantine-ui/src/promql/tools/go.mod index d8f4634199..aac6e019aa 100644 --- a/web/ui/mantine-ui/src/promql/tools/go.mod +++ b/web/ui/mantine-ui/src/promql/tools/go.mod @@ -1,6 +1,6 @@ module github.com/prometheus/prometheus/web/ui/mantine-ui/src/promql/tools -go 1.21.0 +go 1.22.0 require ( github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc From 0c5e4ef8032e12b1a7121b05b876d3c3db35512b Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 6 Sep 2024 23:29:02 +0200 Subject: [PATCH 0479/1397] git-ignore the correct build output dir for the old UI Signed-off-by: Julius Volz --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index f4ed7242ba..63c65b8e61 100644 --- a/.gitignore +++ b/.gitignore @@ -22,7 +22,7 @@ benchmark.txt /documentation/examples/remote_storage/example_write_adapter/example_write_adapter npm_licenses.tar.bz2 -/web/ui/static/react-app +/web/ui/static/react /web/ui/static/mantine-ui /vendor From d9fdb50a61d5e1cd7b360d85f1097c52a8242bd6 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Sat, 7 Sep 2024 18:00:35 +0200 Subject: [PATCH 0480/1397] Remove file that was erroneously added to old UI Signed-off-by: Julius Volz --- .../src/pages/graph/GraphHistogramHelpers.ts | 62 ------------------- 1 file changed, 62 deletions(-) delete mode 100644 web/ui/react-app/src/pages/graph/GraphHistogramHelpers.ts diff --git a/web/ui/react-app/src/pages/graph/GraphHistogramHelpers.ts b/web/ui/react-app/src/pages/graph/GraphHistogramHelpers.ts deleted file mode 100644 index 4481f8f965..0000000000 --- a/web/ui/react-app/src/pages/graph/GraphHistogramHelpers.ts +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Inspired by a similar feature in VictoriaMetrics. - * See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3384 for more details. - * Developed by VictoriaMetrics team. - */ - -import { GraphExemplar, GraphProps, GraphSeries } from './Graph'; - -export function isHistogramData(data: GraphProps['data']) { - if (!data?.result?.length) return false; - const result = data.result; - if (result.length < 2) return false; - const histogramLabels = ['le']; - - const firstLabels = Object.keys(result[0].metric).filter((n) => !histogramLabels.includes(n)); - const isHistogram = result.every((r) => { - const labels = Object.keys(r.metric).filter((n) => !histogramLabels.includes(n)); - return firstLabels.length === labels.length && labels.every((l) => r.metric[l] === result[0].metric[l]); - }); - - return isHistogram && result.every((r) => histogramLabels.some((l) => l in r.metric)); -} - -export function prepareHistogramData(buckets: GraphSeries[]) { - if (!buckets.every((a) => a.labels.le)) return buckets; - - const sortedBuckets = buckets.sort((a, b) => promValueToNumber(a.labels.le) - promValueToNumber(b.labels.le)); - const result: GraphSeries[] = []; - - for (let i = 0; i < sortedBuckets.length; i++) { - const values = []; - const { data, labels, color } = sortedBuckets[i]; - - for (const [timestamp, value] of data) { - const prevVal = sortedBuckets[i - 1]?.data.find((v) => v[0] === timestamp)?.[1] || 0; - const newVal = Number(value) - +prevVal; - values.push([Number(timestamp), newVal]); - } - - result.push({ - data: values, - labels, - color, - index: i, - }); - } - return result; -} - -export function promValueToNumber(s: string) { - switch (s) { - case 'NaN': - return NaN; - case 'Inf': - case '+Inf': - return Infinity; - case '-Inf': - return -Infinity; - default: - return parseFloat(s); - } -} From a084a05897b9a65ce38426bfa1edd32aba9a3db7 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Sat, 7 Sep 2024 18:39:45 +0200 Subject: [PATCH 0481/1397] Remove package-lock.json from root of repo Signed-off-by: Julius Volz --- package-lock.json | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 package-lock.json diff --git a/package-lock.json b/package-lock.json deleted file mode 100644 index 9940a51863..0000000000 --- a/package-lock.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "name": "prometheus", - "lockfileVersion": 3, - "requires": true, - "packages": {} -} From db5e48dc3391807b41f2329c90e03c2979318e19 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Sun, 8 Sep 2024 14:39:13 +0200 Subject: [PATCH 0482/1397] promql.Engine.Close: No-op if nil (#14861) Signed-off-by: Arve Knudsen --- promql/engine.go | 4 ++++ promql/engine_test.go | 23 +++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/promql/engine.go b/promql/engine.go index dd855c6d2d..e55f154d23 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -435,6 +435,10 @@ func NewEngine(opts EngineOpts) *Engine { // Close closes ng. func (ng *Engine) Close() error { + if ng == nil { + return nil + } + if ng.activeQueryTracker != nil { return ng.activeQueryTracker.Close() } diff --git a/promql/engine_test.go b/promql/engine_test.go index 947c0e1ed8..db399d8656 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3019,6 +3019,29 @@ func TestEngineOptsValidation(t *testing.T) { } } +func TestEngine_Close(t *testing.T) { + t.Run("nil engine", func(t *testing.T) { + var ng *promql.Engine + require.NoError(t, ng.Close()) + }) + + t.Run("non-nil engine", func(t *testing.T) { + ng := promql.NewEngine(promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 0, + Timeout: 100 * time.Second, + NoStepSubqueryIntervalFn: nil, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnablePerStepStats: false, + LookbackDelta: 0, + EnableDelayedNameRemoval: true, + }) + require.NoError(t, ng.Close()) + }) +} + func TestInstantQueryWithRangeVectorSelector(t *testing.T) { engine := newTestEngine(t) From 7e0cd2e0b4d66dd76e879e1ba434dae7ea24657d Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Sun, 8 Sep 2024 15:16:02 +0200 Subject: [PATCH 0483/1397] Improve binop explain view styling Signed-off-by: Julius Volz --- .../query/ExplainViews/BinaryExpr/VectorVector.tsx | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx index 0b0854409e..1ba2d0eb56 100644 --- a/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx +++ b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx @@ -457,7 +457,7 @@ const VectorVectorBinaryExprExplainView: FC< - +
    {series.length === 0 ? ( @@ -501,7 +501,7 @@ const VectorVectorBinaryExprExplainView: FC< {showSampleValues && ( - {s.value[1]} + {s.value[1]} )} ); @@ -549,7 +549,7 @@ const VectorVectorBinaryExprExplainView: FC< border: `2px solid ${resultGroupColor}`, }} > -
    +
    {noLHSMatches || noRHSMatches ? ( @@ -603,7 +603,11 @@ const VectorVectorBinaryExprExplainView: FC< : undefined } > - + {seriesSwatch(colorForIndex(lIdx))} @@ -619,7 +623,7 @@ const VectorVectorBinaryExprExplainView: FC< {showSampleValues && ( - + {filtered ? ( filtered From 8b4291537bc081ea0fce1629aab58864a577b6e0 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Sun, 8 Sep 2024 15:17:00 +0200 Subject: [PATCH 0484/1397] Clarify explain view, add tree view close button, fix callback bug Signed-off-by: Julius Volz --- .../pages/query/ExplainViews/ExplainView.tsx | 33 +++++----- .../mantine-ui/src/pages/query/QueryPanel.tsx | 19 +++--- .../mantine-ui/src/pages/query/TreeNode.tsx | 60 +++++++++---------- .../mantine-ui/src/pages/query/TreeView.tsx | 15 ++++- 4 files changed, 69 insertions(+), 58 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/ExplainViews/ExplainView.tsx b/web/ui/mantine-ui/src/pages/query/ExplainViews/ExplainView.tsx index 907830757d..e089c7851a 100644 --- a/web/ui/mantine-ui/src/pages/query/ExplainViews/ExplainView.tsx +++ b/web/ui/mantine-ui/src/pages/query/ExplainViews/ExplainView.tsx @@ -11,32 +11,35 @@ import classes from "./ExplainView.module.css"; import SelectorExplainView from "./Selector"; import AggregationExplainView from "./Aggregation"; import BinaryExprExplainView from "./BinaryExpr/BinaryExpr"; +import { IconInfoCircle } from "@tabler/icons-react"; interface ExplainViewProps { node: ASTNode | null; treeShown: boolean; - setShowTree: () => void; + showTree: () => void; } const ExplainView: FC = ({ node, treeShown, - setShowTree, + showTree: setShowTree, }) => { if (node === null) { return ( - - <> - To use the Explain view,{" "} - {!treeShown && ( - <> - - enable the query tree view - {" "} - (also available via the expression input dropdown) and then - - )}{" "} - select a node in the tree above. - + }> + This tab can help you understand the behavior of individual components + of a query. +
    +
    + To use the Explain view,{" "} + {!treeShown && ( + <> + + enable the query tree view + {" "} + (also available via the expression input menu) and then + + )}{" "} + select a node in the tree above.
    ); } diff --git a/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx b/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx index 8cddd601a4..09deb95e38 100644 --- a/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx +++ b/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx @@ -6,16 +6,13 @@ import { Box, SegmentedControl, Stack, - Button, Skeleton, } from "@mantine/core"; import { IconChartAreaFilled, IconChartLine, - IconCheckbox, IconGraph, IconInfoCircle, - IconSquare, IconTable, } from "@tabler/icons-react"; import { FC, Suspense, useCallback, useMemo, useState } from "react"; @@ -129,6 +126,10 @@ const QueryPanel: FC = ({ idx, metricNames }) => { retriggerIdx={retriggerIdx} selectedNode={selectedNode} setSelectedNode={setSelectedNode} + closeTreeView={() => { + dispatch(setShowTree({ idx, showTree: false })); + setSelectedNode(null); + }} /> @@ -165,11 +166,7 @@ const QueryPanel: FC = ({ idx, metricNames }) => { - + = ({ idx, metricNames }) => { - + */} @@ -325,7 +322,7 @@ const QueryPanel: FC = ({ idx, metricNames }) => { { + showTree={() => { dispatch(setShowTree({ idx, showTree: true })); }} /> diff --git a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx index 102396f6e4..8ac176e58b 100644 --- a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx +++ b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx @@ -1,5 +1,6 @@ import { FC, + useCallback, useEffect, useLayoutEffect, useMemo, @@ -57,8 +58,10 @@ const TreeNode: FC<{ selectedNode: { id: string; node: ASTNode } | null; setSelectedNode: (Node: { id: string; node: ASTNode } | null) => void; parentRef?: React.RefObject; - reportNodeState?: (state: NodeState) => void; + reportNodeState?: (childIdx: number, state: NodeState) => void; reverse: boolean; + // The index of this node in its parent's children. + childIdx: number; }> = ({ node, selectedNode, @@ -66,6 +69,7 @@ const TreeNode: FC<{ parentRef, reportNodeState, reverse, + childIdx, }) => { const nodeID = useId(); const nodeRef = useRef(null); @@ -130,21 +134,32 @@ const TreeNode: FC<{ useEffect(() => { if (mergedChildState === "error") { - reportNodeState && reportNodeState("error"); + reportNodeState && reportNodeState(childIdx, "error"); } - }, [mergedChildState, reportNodeState]); + }, [mergedChildState, reportNodeState, childIdx]); useEffect(() => { if (error) { - reportNodeState && reportNodeState("error"); + reportNodeState && reportNodeState(childIdx, "error"); } - }, [error, reportNodeState]); + }, [error, reportNodeState, childIdx]); useEffect(() => { if (isFetching) { - reportNodeState && reportNodeState("running"); + reportNodeState && reportNodeState(childIdx, "running"); } - }, [isFetching, reportNodeState]); + }, [isFetching, reportNodeState, childIdx]); + + const childReportNodeState = useCallback( + (childIdx: number, state: NodeState) => { + setChildStates((prev) => { + const newStates = [...prev]; + newStates[childIdx] = state; + return newStates; + }); + }, + [setChildStates] + ); // Update the size and position of tree connector lines based on the node's and its parent's position. useLayoutEffect(() => { @@ -185,7 +200,7 @@ const TreeNode: FC<{ return; } - reportNodeState && reportNodeState("success"); + reportNodeState && reportNodeState(childIdx, "success"); let resultSeries = 0; const labelValuesByName: Record> = {}; @@ -228,7 +243,7 @@ const TreeNode: FC<{ ), labelExamples, }); - }, [data, reportNodeState]); + }, [data, reportNodeState, childIdx]); const innerNode = ( { - setChildStates((prev) => { - const newStates = [...prev]; - newStates[0] = state; - return newStates; - }); - }} + childIdx={0} + reportNodeState={childReportNodeState} /> {innerNode} @@ -384,13 +394,8 @@ const TreeNode: FC<{ setSelectedNode={setSelectedNode} parentRef={nodeRef} reverse={false} - reportNodeState={(state: NodeState) => { - setChildStates((prev) => { - const newStates = [...prev]; - newStates[1] = state; - return newStates; - }); - }} + childIdx={1} + reportNodeState={childReportNodeState} /> @@ -407,13 +412,8 @@ const TreeNode: FC<{ setSelectedNode={setSelectedNode} parentRef={nodeRef} reverse={false} - reportNodeState={(state: NodeState) => { - setChildStates((prev) => { - const newStates = [...prev]; - newStates[idx] = state; - return newStates; - }); - }} + childIdx={idx} + reportNodeState={childReportNodeState} /> ))} diff --git a/web/ui/mantine-ui/src/pages/query/TreeView.tsx b/web/ui/mantine-ui/src/pages/query/TreeView.tsx index 01bfcd7503..1455e43774 100644 --- a/web/ui/mantine-ui/src/pages/query/TreeView.tsx +++ b/web/ui/mantine-ui/src/pages/query/TreeView.tsx @@ -3,7 +3,7 @@ import { useSuspenseAPIQuery } from "../../api/api"; import { useAppSelector } from "../../state/hooks"; import ASTNode from "../../promql/ast"; import TreeNode from "./TreeNode"; -import { Card } from "@mantine/core"; +import { Card, CloseButton } from "@mantine/core"; const TreeView: FC<{ panelIdx: number; @@ -19,7 +19,8 @@ const TreeView: FC<{ node: ASTNode; } | null ) => void; -}> = ({ panelIdx, selectedNode, setSelectedNode }) => { + closeTreeView: () => void; +}> = ({ panelIdx, selectedNode, setSelectedNode, closeTreeView }) => { const { expr } = useAppSelector((state) => state.queryPage.panels[panelIdx]); const { data } = useSuspenseAPIQuery({ @@ -32,7 +33,17 @@ const TreeView: FC<{ return ( + Date: Sun, 8 Sep 2024 15:17:49 +0200 Subject: [PATCH 0485/1397] Update mantine-ui package version to latest release Signed-off-by: Julius Volz --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 7dfdf80b4c..da686fdc9b 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,7 +1,7 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "0.49.1", + "version": "0.54.1", "type": "module", "scripts": { "start": "vite", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index e384d92ed9..5ca2c85858 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -24,7 +24,7 @@ }, "mantine-ui": { "name": "@prometheus-io/mantine-ui", - "version": "0.49.1", + "version": "0.54.1", "dependencies": { "@codemirror/autocomplete": "^6.12.0", "@codemirror/language": "^6.10.1", From 52a6e32fd08a33bfa186903bc1113538a9856269 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Sun, 8 Sep 2024 17:05:34 +0200 Subject: [PATCH 0486/1397] Update all CodeMirror dependencies Signed-off-by: Julius Volz --- web/ui/mantine-ui/package.json | 16 ++++----- web/ui/package-lock.json | 61 +++++++++------------------------- 2 files changed, 23 insertions(+), 54 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index da686fdc9b..0cb3fdaafa 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -12,14 +12,14 @@ "test": "vitest" }, "dependencies": { - "@codemirror/autocomplete": "^6.12.0", - "@codemirror/language": "^6.10.1", - "@codemirror/lint": "^6.5.0", - "@codemirror/state": "^6.4.0", - "@codemirror/view": "^6.24.0", + "@codemirror/autocomplete": "^6.18.0", + "@codemirror/language": "^6.10.2", + "@codemirror/lint": "^6.8.1", + "@codemirror/state": "^6.4.1", + "@codemirror/view": "^6.33.0", "@floating-ui/dom": "^1.6.7", "@lezer/common": "^1.2.1", - "@lezer/highlight": "^1.2.0", + "@lezer/highlight": "^1.2.1", "@mantine/code-highlight": "^7.11.2", "@mantine/core": "^7.11.2", "@mantine/dates": "^7.11.2", @@ -27,7 +27,7 @@ "@mantine/notifications": "^7.11.2", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "^0.50.0-rc.1", + "@prometheus-io/codemirror-promql": "^0.54.1", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^2.47.0", "@tanstack/react-query": "^5.22.2", @@ -35,7 +35,7 @@ "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.7", "@types/sanitize-html": "^2.13.0", - "@uiw/react-codemirror": "^4.21.22", + "@uiw/react-codemirror": "^4.23.1", "clsx": "^2.1.1", "dayjs": "^1.11.10", "lodash": "^4.17.21", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 5ca2c85858..b3b85e6a6a 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -26,14 +26,14 @@ "name": "@prometheus-io/mantine-ui", "version": "0.54.1", "dependencies": { - "@codemirror/autocomplete": "^6.12.0", - "@codemirror/language": "^6.10.1", - "@codemirror/lint": "^6.5.0", - "@codemirror/state": "^6.4.0", - "@codemirror/view": "^6.24.0", + "@codemirror/autocomplete": "^6.18.0", + "@codemirror/language": "^6.10.2", + "@codemirror/lint": "^6.8.1", + "@codemirror/state": "^6.4.1", + "@codemirror/view": "^6.33.0", "@floating-ui/dom": "^1.6.7", "@lezer/common": "^1.2.1", - "@lezer/highlight": "^1.2.0", + "@lezer/highlight": "^1.2.1", "@mantine/code-highlight": "^7.11.2", "@mantine/core": "^7.11.2", "@mantine/dates": "^7.11.2", @@ -41,7 +41,7 @@ "@mantine/notifications": "^7.11.2", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "^0.50.0-rc.1", + "@prometheus-io/codemirror-promql": "^0.54.1", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^2.47.0", "@tanstack/react-query": "^5.22.2", @@ -49,7 +49,7 @@ "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.7", "@types/sanitize-html": "^2.13.0", - "@uiw/react-codemirror": "^4.21.22", + "@uiw/react-codemirror": "^4.23.1", "clsx": "^2.1.1", "dayjs": "^1.11.10", "lodash": "^4.17.21", @@ -84,37 +84,6 @@ "vitest": "^2.0.5" } }, - "mantine-ui/node_modules/@prometheus-io/codemirror-promql": { - "version": "0.50.1", - "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.50.1.tgz", - "integrity": "sha512-2TabgkxM3m8VbFv8eueCMXAQQyNgd0UU4edBXHkNkTKxpwOXQZ4wEc++1rRqOw+eLPOrTX8sXjbUFi4kp1vZXw==", - "license": "Apache-2.0", - "dependencies": { - "@prometheus-io/lezer-promql": "0.50.1", - "lru-cache": "^7.18.3" - }, - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "@codemirror/autocomplete": "^6.4.0", - "@codemirror/language": "^6.3.0", - "@codemirror/lint": "^6.0.0", - "@codemirror/state": "^6.1.1", - "@codemirror/view": "^6.4.0", - "@lezer/common": "^1.0.1" - } - }, - "mantine-ui/node_modules/@prometheus-io/lezer-promql": { - "version": "0.50.1", - "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.50.1.tgz", - "integrity": "sha512-DzcskmyZn4k0dv+vk7W6aFCYnXJQszpwrqoS29Sa+rHsRMmoLAELcFDMWhxfkM9UqIb75/382rqMBQya8l018Q==", - "license": "Apache-2.0", - "peerDependencies": { - "@lezer/highlight": "^1.1.2", - "@lezer/lr": "^1.2.3" - } - }, "mantine-ui/node_modules/eslint": { "version": "9.9.1", "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.9.1.tgz", @@ -3330,9 +3299,9 @@ } }, "node_modules/@uiw/codemirror-extensions-basic-setup": { - "version": "4.23.0", - "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.23.0.tgz", - "integrity": "sha512-+k5nkRpUWGaHr1JWT8jcKsVewlXw5qBgSopm9LW8fZ6KnSNZBycz8kHxh0+WSvckmXEESGptkIsb7dlkmJT/hQ==", + "version": "4.23.1", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.23.1.tgz", + "integrity": "sha512-l/1iBZt3Ao9ElUvUvA0CI8bLcGw0kgV0976l1u3psYMfKYJl5TwSHn6JOeSt/iCq/13exp1f7u+zFMRwtzeinw==", "license": "MIT", "dependencies": { "@codemirror/autocomplete": "^6.0.0", @@ -3357,16 +3326,16 @@ } }, "node_modules/@uiw/react-codemirror": { - "version": "4.23.0", - "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.23.0.tgz", - "integrity": "sha512-MnqTXfgeLA3fsUUQjqjJgemEuNyoGALgsExVm0NQAllAAi1wfj+IoKFeK+h3XXMlTFRCFYOUh4AHDv0YXJLsOg==", + "version": "4.23.1", + "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.23.1.tgz", + "integrity": "sha512-OUrBY/7gvmiolgP4m9UlsGAzNce9YEzmDvPPAc+g27q+BZEJYeWQCzqtjtXfL7OkwQcZ0Aea2DuUUZRUTTIyxg==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.18.6", "@codemirror/commands": "^6.1.0", "@codemirror/state": "^6.1.1", "@codemirror/theme-one-dark": "^6.0.0", - "@uiw/codemirror-extensions-basic-setup": "4.23.0", + "@uiw/codemirror-extensions-basic-setup": "4.23.1", "codemirror": "^6.0.0" }, "funding": { From 4fc562f9e71b24ffc8c0dee72313fe42ee2b937a Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Sun, 8 Sep 2024 17:13:40 +0200 Subject: [PATCH 0487/1397] OTLP: Support context cancellation/timeout during translation (#14612) * OTLP: Support context cancellation/timeout during translation --------- Signed-off-by: Arve Knudsen --- CHANGELOG.md | 1 + .../prometheusremotewrite/context.go | 37 +++++++++++ .../prometheusremotewrite/context_test.go | 40 ++++++++++++ .../prometheusremotewrite/helper.go | 37 ++++++++--- .../prometheusremotewrite/helper_test.go | 3 + .../prometheusremotewrite/histograms.go | 16 +++-- .../prometheusremotewrite/histograms_test.go | 2 + .../prometheusremotewrite/metrics_to_prw.go | 62 ++++++++++++++++--- .../metrics_to_prw_test.go | 38 +++++++++++- .../number_data_points.go | 28 +++++++-- .../number_data_points_test.go | 3 + storage/remote/write_handler.go | 2 +- 12 files changed, 239 insertions(+), 30 deletions(-) create mode 100644 storage/remote/otlptranslator/prometheusremotewrite/context.go create mode 100644 storage/remote/otlptranslator/prometheusremotewrite/context_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 37cbea6ef5..ff222790fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ * [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200 * [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706 +* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612 * [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042 ## 2.54.1 / 2024-08-27 diff --git a/storage/remote/otlptranslator/prometheusremotewrite/context.go b/storage/remote/otlptranslator/prometheusremotewrite/context.go new file mode 100644 index 0000000000..5c6dd20f18 --- /dev/null +++ b/storage/remote/otlptranslator/prometheusremotewrite/context.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package prometheusremotewrite + +import "context" + +// everyNTimes supports checking for context error every n times. +type everyNTimes struct { + n int + i int + err error +} + +// checkContext calls ctx.Err() every e.n times and returns an eventual error. +func (e *everyNTimes) checkContext(ctx context.Context) error { + if e.err != nil { + return e.err + } + + e.i++ + if e.i >= e.n { + e.i = 0 + e.err = ctx.Err() + } + + return e.err +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/context_test.go b/storage/remote/otlptranslator/prometheusremotewrite/context_test.go new file mode 100644 index 0000000000..94b23be04f --- /dev/null +++ b/storage/remote/otlptranslator/prometheusremotewrite/context_test.go @@ -0,0 +1,40 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package prometheusremotewrite + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestEveryNTimes(t *testing.T) { + const n = 128 + ctx, cancel := context.WithCancel(context.Background()) + e := &everyNTimes{ + n: n, + } + + for i := 0; i < n; i++ { + require.NoError(t, e.checkContext(ctx)) + } + + cancel() + for i := 0; i < n-1; i++ { + require.NoError(t, e.checkContext(ctx)) + } + require.EqualError(t, e.checkContext(ctx), context.Canceled.Error()) + // e should remember the error. + require.EqualError(t, e.checkContext(ctx), context.Canceled.Error()) +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 67cf28119d..fd7f58f073 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -17,6 +17,7 @@ package prometheusremotewrite import ( + "context" "encoding/hex" "fmt" "log" @@ -241,9 +242,13 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool { // with the user defined bucket boundaries of non-exponential OTel histograms. // However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets: // https://github.com/prometheus/prometheus/issues/13485. -func (c *PrometheusConverter) addHistogramDataPoints(dataPoints pmetric.HistogramDataPointSlice, - resource pcommon.Resource, settings Settings, baseName string) { +func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, + resource pcommon.Resource, settings Settings, baseName string) error { for x := 0; x < dataPoints.Len(); x++ { + if err := c.everyN.checkContext(ctx); err != nil { + return err + } + pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false) @@ -284,6 +289,10 @@ func (c *PrometheusConverter) addHistogramDataPoints(dataPoints pmetric.Histogra // process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1 for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ { + if err := c.everyN.checkContext(ctx); err != nil { + return err + } + bound := pt.ExplicitBounds().At(i) cumulativeCount += pt.BucketCounts().At(i) bucket := &prompb.Sample{ @@ -312,7 +321,9 @@ func (c *PrometheusConverter) addHistogramDataPoints(dataPoints pmetric.Histogra ts := c.addSample(infBucket, infLabels) bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: math.Inf(1)}) - c.addExemplars(pt, bucketBounds) + if err := c.addExemplars(ctx, pt, bucketBounds); err != nil { + return err + } startTimestamp := pt.StartTimestamp() if settings.ExportCreatedMetric && startTimestamp != 0 { @@ -320,6 +331,8 @@ func (c *PrometheusConverter) addHistogramDataPoints(dataPoints pmetric.Histogra c.addTimeSeriesIfNeeded(labels, startTimestamp, pt.Timestamp()) } } + + return nil } type exemplarType interface { @@ -327,9 +340,13 @@ type exemplarType interface { Exemplars() pmetric.ExemplarSlice } -func getPromExemplars[T exemplarType](pt T) []prompb.Exemplar { +func getPromExemplars[T exemplarType](ctx context.Context, everyN *everyNTimes, pt T) ([]prompb.Exemplar, error) { promExemplars := make([]prompb.Exemplar, 0, pt.Exemplars().Len()) for i := 0; i < pt.Exemplars().Len(); i++ { + if err := everyN.checkContext(ctx); err != nil { + return nil, err + } + exemplar := pt.Exemplars().At(i) exemplarRunes := 0 @@ -379,7 +396,7 @@ func getPromExemplars[T exemplarType](pt T) []prompb.Exemplar { promExemplars = append(promExemplars, promExemplar) } - return promExemplars + return promExemplars, nil } // mostRecentTimestampInMetric returns the latest timestamp in a batch of metrics @@ -417,9 +434,13 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { return ts } -func (c *PrometheusConverter) addSummaryDataPoints(dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, - settings Settings, baseName string) { +func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, + settings Settings, baseName string) error { for x := 0; x < dataPoints.Len(); x++ { + if err := c.everyN.checkContext(ctx); err != nil { + return err + } + pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false) @@ -468,6 +489,8 @@ func (c *PrometheusConverter) addSummaryDataPoints(dataPoints pmetric.SummaryDat c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp()) } } + + return nil } // createLabels returns a copy of baseLabels, adding to it the pair model.MetricNameLabel=name. diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go index e02ebbf5de..a48a57b062 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go @@ -17,6 +17,7 @@ package prometheusremotewrite import ( + "context" "testing" "time" @@ -280,6 +281,7 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { converter := NewPrometheusConverter() converter.addSummaryDataPoints( + context.Background(), metric.Summary().DataPoints(), pcommon.NewResource(), Settings{ @@ -390,6 +392,7 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { converter := NewPrometheusConverter() converter.addHistogramDataPoints( + context.Background(), metric.Histogram().DataPoints(), pcommon.NewResource(), Settings{ diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index ec93387fc6..8349d4f907 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -17,6 +17,7 @@ package prometheusremotewrite import ( + "context" "fmt" "math" @@ -33,10 +34,14 @@ const defaultZeroThreshold = 1e-128 // addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series // as native histogram samples. -func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetric.ExponentialHistogramDataPointSlice, +func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Context, dataPoints pmetric.ExponentialHistogramDataPointSlice, resource pcommon.Resource, settings Settings, promName string) (annotations.Annotations, error) { var annots annotations.Annotations for x := 0; x < dataPoints.Len(); x++ { + if err := c.everyN.checkContext(ctx); err != nil { + return annots, err + } + pt := dataPoints.At(x) histogram, ws, err := exponentialToNativeHistogram(pt) @@ -57,15 +62,18 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetr ts, _ := c.getOrCreateTimeSeries(lbls) ts.Histograms = append(ts.Histograms, histogram) - exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt) + exemplars, err := getPromExemplars[pmetric.ExponentialHistogramDataPoint](ctx, &c.everyN, pt) + if err != nil { + return annots, err + } ts.Exemplars = append(ts.Exemplars, exemplars...) } return annots, nil } -// exponentialToNativeHistogram translates OTel Exponential Histogram data point -// to Prometheus Native Histogram. +// exponentialToNativeHistogram translates an OTel Exponential Histogram data point +// to a Prometheus Native Histogram. func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, annotations.Annotations, error) { var annots annotations.Annotations scale := p.Scale() diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go index cd1c858ac1..e064ab28a2 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go @@ -17,6 +17,7 @@ package prometheusremotewrite import ( + "context" "fmt" "testing" "time" @@ -754,6 +755,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { converter := NewPrometheusConverter() annots, err := converter.addExponentialHistogramDataPoints( + context.Background(), metric.ExponentialHistogram().DataPoints(), pcommon.NewResource(), Settings{ diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 9d76800809..0afd2ad57e 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -17,6 +17,7 @@ package prometheusremotewrite import ( + "context" "errors" "fmt" "sort" @@ -44,6 +45,7 @@ type Settings struct { type PrometheusConverter struct { unique map[uint64]*prompb.TimeSeries conflicts map[uint64][]*prompb.TimeSeries + everyN everyNTimes } func NewPrometheusConverter() *PrometheusConverter { @@ -54,7 +56,8 @@ func NewPrometheusConverter() *PrometheusConverter { } // FromMetrics converts pmetric.Metrics to Prometheus remote write format. -func (c *PrometheusConverter) FromMetrics(md pmetric.Metrics, settings Settings) (annots annotations.Annotations, errs error) { +func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings) (annots annotations.Annotations, errs error) { + c.everyN = everyNTimes{n: 128} resourceMetricsSlice := md.ResourceMetrics() for i := 0; i < resourceMetricsSlice.Len(); i++ { resourceMetrics := resourceMetricsSlice.At(i) @@ -68,6 +71,11 @@ func (c *PrometheusConverter) FromMetrics(md pmetric.Metrics, settings Settings) // TODO: decide if instrumentation library information should be exported as labels for k := 0; k < metricSlice.Len(); k++ { + if err := c.everyN.checkContext(ctx); err != nil { + errs = multierr.Append(errs, err) + return + } + metric := metricSlice.At(k) mostRecentTimestamp = max(mostRecentTimestamp, mostRecentTimestampInMetric(metric)) @@ -87,21 +95,36 @@ func (c *PrometheusConverter) FromMetrics(md pmetric.Metrics, settings Settings) errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - c.addGaugeNumberDataPoints(dataPoints, resource, settings, promName) + if err := c.addGaugeNumberDataPoints(ctx, dataPoints, resource, settings, promName); err != nil { + errs = multierr.Append(errs, err) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } + } case pmetric.MetricTypeSum: dataPoints := metric.Sum().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - c.addSumNumberDataPoints(dataPoints, resource, metric, settings, promName) + if err := c.addSumNumberDataPoints(ctx, dataPoints, resource, metric, settings, promName); err != nil { + errs = multierr.Append(errs, err) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } + } case pmetric.MetricTypeHistogram: dataPoints := metric.Histogram().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - c.addHistogramDataPoints(dataPoints, resource, settings, promName) + if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, promName); err != nil { + errs = multierr.Append(errs, err) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } + } case pmetric.MetricTypeExponentialHistogram: dataPoints := metric.ExponentialHistogram().DataPoints() if dataPoints.Len() == 0 { @@ -109,20 +132,31 @@ func (c *PrometheusConverter) FromMetrics(md pmetric.Metrics, settings Settings) break } ws, err := c.addExponentialHistogramDataPoints( + ctx, dataPoints, resource, settings, promName, ) annots.Merge(ws) - errs = multierr.Append(errs, err) + if err != nil { + errs = multierr.Append(errs, err) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } + } case pmetric.MetricTypeSummary: dataPoints := metric.Summary().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - c.addSummaryDataPoints(dataPoints, resource, settings, promName) + if err := c.addSummaryDataPoints(ctx, dataPoints, resource, settings, promName); err != nil { + errs = multierr.Append(errs, err) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } + } default: errs = multierr.Append(errs, errors.New("unsupported metric type")) } @@ -148,25 +182,33 @@ func isSameMetric(ts *prompb.TimeSeries, lbls []prompb.Label) bool { // addExemplars adds exemplars for the dataPoint. For each exemplar, if it can find a bucket bound corresponding to its value, // the exemplar is added to the bucket bound's time series, provided that the time series' has samples. -func (c *PrometheusConverter) addExemplars(dataPoint pmetric.HistogramDataPoint, bucketBounds []bucketBoundsData) { +func (c *PrometheusConverter) addExemplars(ctx context.Context, dataPoint pmetric.HistogramDataPoint, bucketBounds []bucketBoundsData) error { if len(bucketBounds) == 0 { - return + return nil } - exemplars := getPromExemplars(dataPoint) + exemplars, err := getPromExemplars(ctx, &c.everyN, dataPoint) + if err != nil { + return err + } if len(exemplars) == 0 { - return + return nil } sort.Sort(byBucketBoundsData(bucketBounds)) for _, exemplar := range exemplars { for _, bound := range bucketBounds { + if err := c.everyN.checkContext(ctx); err != nil { + return err + } if len(bound.ts.Samples) > 0 && exemplar.Value <= bound.bound { bound.ts.Exemplars = append(bound.ts.Exemplars, exemplar) break } } } + + return nil } // addSample finds a TimeSeries that corresponds to lbls, and adds sample to it. diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go index bdc1c9d0b2..641437632d 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go @@ -17,6 +17,7 @@ package prometheusremotewrite import ( + "context" "fmt" "testing" "time" @@ -28,6 +29,39 @@ import ( ) func TestFromMetrics(t *testing.T) { + t.Run("successful", func(t *testing.T) { + converter := NewPrometheusConverter() + payload := createExportRequest(5, 128, 128, 2, 0) + + annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), Settings{}) + require.NoError(t, err) + require.Empty(t, annots) + }) + + t.Run("context cancellation", func(t *testing.T) { + converter := NewPrometheusConverter() + ctx, cancel := context.WithCancel(context.Background()) + // Verify that converter.FromMetrics respects cancellation. + cancel() + payload := createExportRequest(5, 128, 128, 2, 0) + + annots, err := converter.FromMetrics(ctx, payload.Metrics(), Settings{}) + require.ErrorIs(t, err, context.Canceled) + require.Empty(t, annots) + }) + + t.Run("context timeout", func(t *testing.T) { + converter := NewPrometheusConverter() + // Verify that converter.FromMetrics respects timeout. + ctx, cancel := context.WithTimeout(context.Background(), 0) + t.Cleanup(cancel) + payload := createExportRequest(5, 128, 128, 2, 0) + + annots, err := converter.FromMetrics(ctx, payload.Metrics(), Settings{}) + require.ErrorIs(t, err, context.DeadlineExceeded) + require.Empty(t, annots) + }) + t.Run("exponential histogram warnings for zero count and non-zero sum", func(t *testing.T) { request := pmetricotlp.NewExportRequest() rm := request.Metrics().ResourceMetrics().AppendEmpty() @@ -51,7 +85,7 @@ func TestFromMetrics(t *testing.T) { } converter := NewPrometheusConverter() - annots, err := converter.FromMetrics(request.Metrics(), Settings{}) + annots, err := converter.FromMetrics(context.Background(), request.Metrics(), Settings{}) require.NoError(t, err) require.NotEmpty(t, annots) ws, infos := annots.AsStrings("", 0, 0) @@ -84,7 +118,7 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) { for i := 0; i < b.N; i++ { converter := NewPrometheusConverter() - annots, err := converter.FromMetrics(payload.Metrics(), Settings{}) + annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), Settings{}) require.NoError(b, err) require.Empty(b, annots) require.NotNil(b, converter.TimeSeries()) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index 80ccb46c75..6cdab450e1 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -17,6 +17,7 @@ package prometheusremotewrite import ( + "context" "math" "github.com/prometheus/common/model" @@ -27,9 +28,13 @@ import ( "github.com/prometheus/prometheus/prompb" ) -func (c *PrometheusConverter) addGaugeNumberDataPoints(dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, settings Settings, name string) { +func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, + resource pcommon.Resource, settings Settings, name string) error { for x := 0; x < dataPoints.Len(); x++ { + if err := c.everyN.checkContext(ctx); err != nil { + return err + } + pt := dataPoints.At(x) labels := createAttributes( resource, @@ -55,11 +60,17 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(dataPoints pmetric.Number } c.addSample(sample, labels) } + + return nil } -func (c *PrometheusConverter) addSumNumberDataPoints(dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string) { +func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, + resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string) error { for x := 0; x < dataPoints.Len(); x++ { + if err := c.everyN.checkContext(ctx); err != nil { + return err + } + pt := dataPoints.At(x) lbls := createAttributes( resource, @@ -85,7 +96,10 @@ func (c *PrometheusConverter) addSumNumberDataPoints(dataPoints pmetric.NumberDa } ts := c.addSample(sample, lbls) if ts != nil { - exemplars := getPromExemplars[pmetric.NumberDataPoint](pt) + exemplars, err := getPromExemplars[pmetric.NumberDataPoint](ctx, &c.everyN, pt) + if err != nil { + return err + } ts.Exemplars = append(ts.Exemplars, exemplars...) } @@ -93,7 +107,7 @@ func (c *PrometheusConverter) addSumNumberDataPoints(dataPoints pmetric.NumberDa if settings.ExportCreatedMetric && metric.Sum().IsMonotonic() { startTimestamp := pt.StartTimestamp() if startTimestamp == 0 { - return + return nil } createdLabels := make([]prompb.Label, len(lbls)) @@ -107,4 +121,6 @@ func (c *PrometheusConverter) addSumNumberDataPoints(dataPoints pmetric.NumberDa c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp()) } } + + return nil } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go index 41afc8c4c3..e932269644 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go @@ -17,6 +17,7 @@ package prometheusremotewrite import ( + "context" "testing" "time" @@ -66,6 +67,7 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { converter := NewPrometheusConverter() converter.addGaugeNumberDataPoints( + context.Background(), metric.Gauge().DataPoints(), pcommon.NewResource(), Settings{ @@ -242,6 +244,7 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { converter := NewPrometheusConverter() converter.addSumNumberDataPoints( + context.Background(), metric.Sum().DataPoints(), pcommon.NewResource(), metric, diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 58fb668cc1..736bc8eff3 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -512,7 +512,7 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { otlpCfg := h.configFunc().OTLPConfig converter := otlptranslator.NewPrometheusConverter() - annots, err := converter.FromMetrics(req.Metrics(), otlptranslator.Settings{ + annots, err := converter.FromMetrics(r.Context(), req.Metrics(), otlptranslator.Settings{ AddMetricSuffixes: true, PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, }) From 09c1e0b1405ee74d3ee9b922738c7ff35704a01c Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Sun, 8 Sep 2024 20:04:46 +0200 Subject: [PATCH 0488/1397] Limit memory usage Go tests with race detector (#14862) * Limit memory usage Go tests with race detector Preliminary fix for https://github.com/prometheus/prometheus/issues/14858 Signed-off-by: Julius Volz * Use CI job env var instead for Go memory limits Signed-off-by: Julius Volz --------- Signed-off-by: Julius Volz --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 98d3d9a754..3a181bb7cc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,6 +12,10 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. image: quay.io/prometheus/golang-builder:1.23-base + env: + # Preliminary fix to make Go tests with race detector not use too much memory, + # see https://github.com/prometheus/prometheus/issues/14858. + GOMEMLIMIT: 10GiB steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 From cdcd43af5b0b280172002f964f023555aa190211 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Sun, 8 Sep 2024 20:53:44 +0200 Subject: [PATCH 0489/1397] Fix a lot of styling in tree view and binop explain view Signed-off-by: Julius Volz --- .../ExplainViews/BinaryExpr/VectorVector.tsx | 71 +++++++++---------- .../src/pages/query/TreeNode.module.css | 4 +- 2 files changed, 34 insertions(+), 41 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx index 1ba2d0eb56..1ba42310ef 100644 --- a/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx +++ b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx @@ -14,7 +14,6 @@ import { Alert, Anchor, Box, - Button, Group, List, Switch, @@ -393,7 +392,9 @@ const VectorVectorBinaryExprExplainView: FC< > Too many match groups to display, only showing{" "} {Object.keys(matchGroups).length} out of {numGroups} groups. - setMaxGroups(undefined)}> +
    +
    + setMaxGroups(undefined)}> Show all groups
    @@ -423,20 +424,6 @@ const VectorVectorBinaryExprExplainView: FC< error, } = mg; - const noLHSMatches = lhs.length === 0; - const noRHSMatches = rhs.length === 0; - - const groupColor = colorPool[mgIdx % colorPool.length]; - const noMatchesColor = "#e0e0e0"; - const lhsGroupColor = noLHSMatches - ? noMatchesColor - : groupColor; - const rhsGroupColor = noRHSMatches - ? noMatchesColor - : groupColor; - const resultGroupColor = - noLHSMatches || noRHSMatches ? noMatchesColor : groupColor; - const matchGroupTitleRow = (color: string) => ( (
    {series.length === 0 ? ( @@ -510,22 +503,15 @@ const VectorVectorBinaryExprExplainView: FC< )} {seriesCount > series.length && ( - + {seriesCount - series.length} more series omitted - – - + Show all series + )} @@ -534,11 +520,16 @@ const VectorVectorBinaryExprExplainView: FC< ); - const lhsTable = matchGroupTable(lhs, lhsCount, lhsGroupColor); + const noLHSMatches = lhs.length === 0; + const noRHSMatches = rhs.length === 0; + + const groupColor = colorPool[mgIdx % colorPool.length]; + + const lhsTable = matchGroupTable(lhs, lhsCount, groupColor); const rhsTable = matchGroupTable( rhs, rhsCount, - rhsGroupColor, + groupColor, rhsColorOffset ); @@ -546,7 +537,11 @@ const VectorVectorBinaryExprExplainView: FC<
    @@ -554,9 +549,8 @@ const VectorVectorBinaryExprExplainView: FC< {noLHSMatches || noRHSMatches ? ( @@ -566,9 +560,8 @@ const VectorVectorBinaryExprExplainView: FC< ) : error !== null ? ( diff --git a/web/ui/mantine-ui/src/pages/query/TreeNode.module.css b/web/ui/mantine-ui/src/pages/query/TreeNode.module.css index da3b8436b4..80845d921f 100644 --- a/web/ui/mantine-ui/src/pages/query/TreeNode.module.css +++ b/web/ui/mantine-ui/src/pages/query/TreeNode.module.css @@ -10,8 +10,8 @@ .nodeText.nodeTextSelected, .nodeText.nodeTextSelected:hover { background-color: light-dark( - var(--mantine-color-gray-3), - var(--mantine-color-dark-3) + var(--mantine-color-gray-4), + var(--mantine-color-gray-7) ); border: 2px solid light-dark(var(--mantine-color-gray-5), var(--mantine-color-dark-2)); From 1f1ca37fd761b3fdd6d9b22650d5cc4dbb77973a Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Sun, 8 Sep 2024 20:53:59 +0200 Subject: [PATCH 0490/1397] Select root of tree by default in tree view Signed-off-by: Julius Volz --- .../mantine-ui/src/pages/query/TreeNode.tsx | 45 +++++++++++-------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx index 8ac176e58b..95d51f0de3 100644 --- a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx +++ b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx @@ -92,6 +92,13 @@ const TreeNode: FC<{ sortedLabelCards: [], }); + // Select the node when it is mounted and it is the root of the tree. + useEffect(() => { + if (parentRef === undefined) { + setSelectedNode({ id: nodeID, node: node }); + } + }, [parentRef, setSelectedNode, nodeID, node]); + // Deselect node when node is unmounted. useEffect(() => { return () => { @@ -400,26 +407,26 @@ const TreeNode: FC<{ ); - } else { - return ( -
    - {innerNode} - {children.map((child, idx) => ( - - - - ))} -
    - ); } + + return ( +
    + {innerNode} + {children.map((child, idx) => ( + + + + ))} +
    + ); }; export default TreeNode; From d23872ef30206654076bd4269b32c9b39ba915a6 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Sun, 8 Sep 2024 21:11:06 +0200 Subject: [PATCH 0491/1397] Graph range vector selectors as instant vector selectors with notice Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/pages/query/Graph.tsx | 84 +++++++++++++------ .../mantine-ui/src/pages/query/QueryPanel.tsx | 1 + 2 files changed, 59 insertions(+), 26 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/Graph.tsx b/web/ui/mantine-ui/src/pages/query/Graph.tsx index 3fc9a6c510..d037546cfd 100644 --- a/web/ui/mantine-ui/src/pages/query/Graph.tsx +++ b/web/ui/mantine-ui/src/pages/query/Graph.tsx @@ -1,5 +1,5 @@ import { FC, useEffect, useId, useState } from "react"; -import { Alert, Skeleton, Box, LoadingOverlay } from "@mantine/core"; +import { Alert, Skeleton, Box, LoadingOverlay, Stack } from "@mantine/core"; import { IconAlertTriangle, IconInfoCircle } from "@tabler/icons-react"; import { RangeQueryResult } from "../../api/responseTypes/query"; import { SuccessAPIResponse, useAPIQuery } from "../../api/api"; @@ -13,9 +13,12 @@ import "uplot/dist/uPlot.min.css"; import "./uplot.css"; import { useElementSize } from "@mantine/hooks"; import UPlotChart, { UPlotChartRange } from "./UPlotChart"; +import ASTNode, { nodeType } from "../../promql/ast"; +import serializeNode from "../../promql/serialize"; export interface GraphProps { expr: string; + node: ASTNode | null; endTime: number | null; range: number; resolution: GraphResolution; @@ -27,6 +30,7 @@ export interface GraphProps { const Graph: FC = ({ expr, + node, endTime, range, resolution, @@ -38,6 +42,22 @@ const Graph: FC = ({ const { ref, width } = useElementSize(); const [rerender, setRerender] = useState(true); + const effectiveExpr = + node === null + ? expr + : serializeNode( + node.type === nodeType.matrixSelector + ? { + type: nodeType.vectorSelector, + name: node.name, + matchers: node.matchers, + offset: node.offset, + timestamp: node.timestamp, + startOrEnd: node.startOrEnd, + } + : node + ); + const effectiveEndTime = (endTime !== null ? endTime : Date.now()) / 1000; const startTime = effectiveEndTime - range / 1000; const effectiveResolution = getEffectiveResolution(resolution, range) / 1000; @@ -47,12 +67,12 @@ const Graph: FC = ({ key: [useId()], path: "/query_range", params: { - query: expr, + query: effectiveExpr, step: effectiveResolution.toString(), start: startTime.toString(), end: effectiveEndTime.toString(), }, - enabled: expr !== "", + enabled: effectiveExpr !== "", }); // Bundle the chart data and the displayed range together. This has two purposes: @@ -82,8 +102,8 @@ const Graph: FC = ({ // Re-execute the query when the user presses Enter (or hits the Execute button). useEffect(() => { - expr !== "" && refetch(); - }, [retriggerIdx, refetch, expr, endTime, range, resolution]); + effectiveExpr !== "" && refetch(); + }, [retriggerIdx, refetch, effectiveExpr, endTime, range, resolution]); // The useElementSize hook above only gets a valid size on the second render, so this // is a workaround to make the component render twice after mount. @@ -133,27 +153,39 @@ const Graph: FC = ({ } return ( - - , - // }} - // styles={{ loader: { width: "100%", height: "100%" } }} - /> - - + + {node !== null && node.type === nodeType.matrixSelector && ( + } + > + Note: Range vector selectors can't be graphed, so + graphing the equivalent instant vector selector instead. + + )} + + , + // }} + // styles={{ loader: { width: "100%", height: "100%" } }} + /> + + + ); }; diff --git a/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx b/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx index 09deb95e38..a172311828 100644 --- a/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx +++ b/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx @@ -296,6 +296,7 @@ const QueryPanel: FC = ({ idx, metricNames }) => { Date: Sun, 8 Sep 2024 21:12:11 +0200 Subject: [PATCH 0492/1397] Git-ignore correct path for old web UI build Signed-off-by: Julius Volz --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 63c65b8e61..f4ed7242ba 100644 --- a/.gitignore +++ b/.gitignore @@ -22,7 +22,7 @@ benchmark.txt /documentation/examples/remote_storage/example_write_adapter/example_write_adapter npm_licenses.tar.bz2 -/web/ui/static/react +/web/ui/static/react-app /web/ui/static/mantine-ui /vendor From 98528550842e38be419c2d8fbbc437a9da2b39bf Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 4 Sep 2024 16:20:47 +1000 Subject: [PATCH 0493/1397] Implement unary negation for native histograms Signed-off-by: Charles Korn --- promql/engine.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/promql/engine.go b/promql/engine.go index b54ce2d6dc..1fa51ef481 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1799,6 +1799,9 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio for j := range mat[i].Floats { mat[i].Floats[j].F = -mat[i].Floats[j].F } + for j := range mat[i].Histograms { + mat[i].Histograms[j].H = mat[i].Histograms[j].H.Copy().Mul(-1) + } } if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() { ev.errorf("vector cannot contain metrics with the same labelset") From 2bdb3452d1181e40283c2ae141adf4f0e952d48a Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Mon, 9 Sep 2024 14:29:29 +1000 Subject: [PATCH 0494/1397] Modify parser for native histograms to allow negative values where supported Signed-off-by: Charles Korn --- promql/parser/generated_parser.y | 8 +- promql/parser/generated_parser.y.go | 156 ++++++++++++++-------------- promql/parser/lex.go | 3 + promql/parser/parse_test.go | 24 ++++- 4 files changed, 105 insertions(+), 86 deletions(-) diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index da24be0c44..befb9bdf3e 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -818,12 +818,12 @@ histogram_desc_item $$ = yylex.(*parser).newMap() $$["sum"] = $3 } - | COUNT_DESC COLON number + | COUNT_DESC COLON signed_or_unsigned_number { $$ = yylex.(*parser).newMap() $$["count"] = $3 } - | ZERO_BUCKET_DESC COLON number + | ZERO_BUCKET_DESC COLON signed_or_unsigned_number { $$ = yylex.(*parser).newMap() $$["z_bucket"] = $3 @@ -875,11 +875,11 @@ bucket_set : LEFT_BRACKET bucket_set_list SPACE RIGHT_BRACKET } ; -bucket_set_list : bucket_set_list SPACE number +bucket_set_list : bucket_set_list SPACE signed_or_unsigned_number { $$ = append($1, $3) } - | number + | signed_or_unsigned_number { $$ = []float64{$1} } diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 22231f73e2..ad58a52976 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -410,55 +410,55 @@ const yyPrivate = 57344 const yyLast = 799 var yyAct = [...]int16{ - 155, 334, 332, 276, 339, 152, 226, 39, 192, 44, - 291, 290, 156, 118, 82, 178, 229, 107, 106, 346, - 347, 348, 349, 109, 108, 198, 239, 199, 133, 110, - 105, 60, 245, 121, 6, 329, 325, 111, 328, 228, - 200, 201, 160, 119, 304, 267, 293, 128, 260, 160, - 151, 261, 159, 302, 358, 311, 122, 55, 89, 159, - 196, 241, 242, 259, 113, 243, 114, 54, 98, 99, - 302, 112, 101, 256, 104, 88, 230, 232, 234, 235, + 152, 334, 332, 155, 339, 226, 39, 192, 276, 44, + 291, 290, 118, 82, 178, 229, 107, 106, 346, 347, + 348, 349, 109, 108, 198, 239, 199, 156, 110, 105, + 6, 245, 200, 201, 133, 325, 111, 329, 228, 60, + 357, 293, 328, 304, 267, 160, 266, 128, 55, 151, + 302, 311, 302, 196, 340, 159, 55, 89, 54, 356, + 241, 242, 355, 113, 243, 114, 54, 98, 99, 265, + 112, 101, 256, 104, 88, 230, 232, 234, 235, 236, + 244, 246, 249, 250, 251, 252, 253, 257, 258, 105, + 333, 231, 233, 237, 238, 240, 247, 248, 103, 115, + 109, 254, 255, 324, 150, 218, 110, 264, 111, 270, + 77, 35, 7, 149, 188, 163, 322, 321, 173, 320, + 167, 170, 323, 165, 271, 166, 2, 3, 4, 5, + 263, 101, 194, 104, 180, 184, 197, 187, 186, 319, + 272, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 195, 299, 103, 318, + 217, 36, 298, 1, 190, 219, 220, 317, 160, 160, + 316, 193, 160, 154, 182, 196, 229, 297, 159, 159, + 160, 358, 159, 268, 181, 183, 239, 260, 296, 262, + 159, 315, 245, 129, 314, 55, 225, 313, 161, 228, + 161, 161, 259, 312, 161, 54, 86, 295, 310, 288, + 289, 8, 161, 292, 162, 37, 162, 162, 49, 269, + 162, 241, 242, 309, 179, 243, 180, 127, 162, 126, + 308, 223, 294, 256, 48, 222, 230, 232, 234, 235, 236, 244, 246, 249, 250, 251, 252, 253, 257, 258, - 160, 115, 231, 233, 237, 238, 240, 247, 248, 103, - 159, 109, 254, 255, 324, 150, 357, 110, 333, 218, - 111, 340, 310, 149, 77, 163, 7, 105, 35, 173, - 167, 170, 161, 323, 165, 356, 166, 309, 355, 194, - 2, 3, 4, 5, 308, 322, 184, 197, 162, 186, - 321, 195, 202, 203, 204, 205, 206, 207, 208, 209, - 210, 211, 212, 213, 214, 215, 216, 229, 129, 101, - 217, 104, 219, 220, 190, 266, 270, 239, 160, 121, - 268, 193, 264, 245, 55, 196, 154, 225, 159, 119, - 228, 271, 188, 160, 54, 161, 103, 117, 265, 84, - 262, 299, 122, 159, 320, 263, 298, 272, 10, 83, - 161, 162, 241, 242, 269, 187, 243, 185, 79, 288, - 289, 297, 319, 292, 256, 161, 162, 230, 232, 234, - 235, 236, 244, 246, 249, 250, 251, 252, 253, 257, - 258, 162, 294, 231, 233, 237, 238, 240, 247, 248, - 318, 317, 316, 254, 255, 180, 315, 134, 135, 136, - 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, - 147, 148, 157, 158, 169, 105, 314, 296, 300, 301, - 303, 223, 305, 313, 55, 222, 179, 168, 180, 84, - 306, 307, 177, 125, 54, 182, 295, 176, 124, 83, - 221, 312, 87, 89, 8, 181, 183, 81, 37, 86, - 175, 123, 36, 98, 99, 326, 327, 101, 102, 104, - 88, 127, 331, 126, 50, 336, 337, 338, 182, 335, - 78, 1, 342, 341, 344, 343, 49, 48, 181, 183, - 350, 351, 47, 55, 103, 352, 53, 77, 164, 56, - 46, 354, 22, 54, 59, 55, 172, 9, 9, 57, - 132, 45, 43, 130, 171, 54, 359, 42, 131, 41, - 40, 51, 191, 353, 273, 75, 85, 189, 224, 80, - 345, 18, 19, 120, 153, 20, 58, 227, 52, 116, + 221, 169, 231, 233, 237, 238, 240, 247, 248, 157, + 158, 164, 254, 255, 168, 10, 182, 300, 55, 301, + 303, 47, 305, 46, 132, 79, 181, 183, 54, 306, + 307, 45, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 43, 59, 50, + 84, 9, 9, 121, 326, 78, 327, 130, 171, 121, + 83, 42, 131, 119, 335, 336, 337, 331, 185, 119, + 338, 261, 342, 341, 344, 343, 122, 117, 41, 177, + 350, 351, 122, 55, 176, 352, 53, 77, 40, 56, + 125, 354, 22, 54, 84, 124, 172, 175, 51, 57, + 191, 353, 273, 85, 83, 189, 359, 224, 123, 80, + 345, 120, 81, 153, 58, 75, 227, 52, 116, 0, + 0, 18, 19, 0, 0, 20, 0, 0, 0, 0, 0, 76, 0, 0, 0, 0, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, 13, 0, 0, 0, 24, 0, 30, - 0, 0, 31, 32, 55, 38, 0, 53, 77, 0, + 0, 0, 31, 32, 55, 38, 105, 53, 77, 0, 56, 275, 0, 22, 54, 0, 0, 0, 274, 0, 57, 0, 278, 279, 277, 284, 286, 283, 285, 280, - 281, 282, 287, 0, 0, 0, 75, 0, 0, 0, - 0, 0, 18, 19, 0, 0, 20, 0, 0, 0, - 0, 0, 76, 0, 0, 0, 0, 61, 62, 63, + 281, 282, 287, 87, 89, 0, 75, 0, 0, 0, + 0, 0, 18, 19, 98, 99, 20, 0, 101, 102, + 104, 88, 76, 0, 0, 0, 0, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 0, 0, 0, 13, 0, 0, 0, 24, 0, + 74, 0, 0, 0, 13, 103, 0, 0, 24, 0, 30, 0, 55, 31, 32, 53, 77, 0, 56, 330, 0, 22, 54, 0, 0, 0, 0, 0, 57, 0, 278, 279, 277, 284, 286, 283, 285, 280, 281, 282, @@ -493,51 +493,51 @@ var yyAct = [...]int16{ } var yyPact = [...]int16{ - 32, 106, 569, 569, 405, 526, -1000, -1000, -1000, 105, + 28, 102, 569, 569, 405, 526, -1000, -1000, -1000, 98, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 277, -1000, 297, -1000, 650, + -1000, -1000, -1000, -1000, -1000, 342, -1000, 204, -1000, 650, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 22, 95, -1000, -1000, 483, -1000, 483, 101, + -1000, -1000, 21, 93, -1000, -1000, 483, -1000, 483, 97, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 167, -1000, -1000, - 281, -1000, -1000, 309, -1000, 23, -1000, -50, -50, -50, - -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, - -50, -50, -50, 48, 174, 336, 95, -56, -1000, 262, - 262, 324, -1000, 631, 103, -1000, 280, -1000, -1000, 274, - 241, -1000, -1000, -1000, 187, -1000, 180, -1000, 159, 483, - -1000, -57, -40, -1000, 483, 483, 483, 483, 483, 483, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 307, -1000, -1000, + 338, -1000, -1000, 225, -1000, 23, -1000, -44, -44, -44, + -44, -44, -44, -44, -44, -44, -44, -44, -44, -44, + -44, -44, -44, 47, 171, 259, 93, -57, -1000, 249, + 249, 324, -1000, 631, 75, -1000, 327, -1000, -1000, 222, + 130, -1000, -1000, -1000, 298, -1000, 112, -1000, 159, 483, + -1000, -58, -48, -1000, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, 483, -1000, - 165, -1000, -1000, 94, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 40, 40, 269, -1000, -1000, -1000, -1000, 155, -1000, - -1000, 41, -1000, 650, -1000, -1000, 31, -1000, 170, -1000, - -1000, -1000, -1000, -1000, 163, -1000, -1000, -1000, -1000, -1000, - 19, 144, 140, -1000, -1000, -1000, 404, 16, 262, 262, - 262, 262, 103, 103, 251, 251, 251, 715, 696, 251, - 251, 715, 103, 103, 251, 103, 16, -1000, 24, -1000, - -1000, -1000, 265, -1000, 189, -1000, -1000, -1000, -1000, -1000, + 39, -1000, -1000, 90, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 36, 36, 229, -1000, -1000, -1000, -1000, 174, -1000, + -1000, 180, -1000, 650, -1000, -1000, 301, -1000, 105, -1000, + -1000, -1000, -1000, -1000, 44, -1000, -1000, -1000, -1000, -1000, + 18, 157, 83, -1000, -1000, -1000, 404, 15, 249, 249, + 249, 249, 75, 75, 402, 402, 402, 715, 696, 402, + 402, 715, 75, 75, 402, 75, 15, -1000, 19, -1000, + -1000, -1000, 186, -1000, 155, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 483, -1000, -1000, -1000, -1000, -1000, -1000, 34, 34, 18, - 34, 44, 44, 110, 38, -1000, -1000, 285, 267, 260, - 240, 236, 235, 234, 206, 188, 134, 129, -1000, -1000, - -1000, -1000, -1000, -1000, 102, -1000, -1000, -1000, 14, -1000, - 650, -1000, -1000, -1000, 34, -1000, 12, 9, 482, -1000, - -1000, -1000, 51, 81, 40, 40, 40, 97, 97, 51, - 97, 51, -73, -1000, -1000, -1000, -1000, -1000, 34, 34, - -1000, -1000, -1000, 34, -1000, -1000, -1000, -1000, -1000, -1000, - 40, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 104, -1000, 33, -1000, -1000, -1000, -1000, + 483, -1000, -1000, -1000, -1000, -1000, -1000, 31, 31, 17, + 31, 37, 37, 206, 34, -1000, -1000, 197, 191, 188, + 185, 164, 161, 153, 133, 113, 111, 110, -1000, -1000, + -1000, -1000, -1000, -1000, 101, -1000, -1000, -1000, 13, -1000, + 650, -1000, -1000, -1000, 31, -1000, 16, 11, 482, -1000, + -1000, -1000, 33, 163, 163, 163, 36, 40, 40, 33, + 40, 33, -74, -1000, -1000, -1000, -1000, -1000, 31, 31, + -1000, -1000, -1000, 31, -1000, -1000, -1000, -1000, -1000, -1000, + 163, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 38, -1000, 160, -1000, -1000, -1000, -1000, } var yyPgo = [...]int16{ - 0, 379, 13, 378, 6, 15, 377, 344, 376, 374, - 373, 370, 198, 294, 369, 14, 368, 10, 11, 367, - 366, 8, 364, 3, 4, 363, 2, 1, 0, 362, - 12, 5, 361, 360, 18, 158, 359, 358, 7, 357, - 354, 17, 353, 31, 352, 9, 351, 350, 340, 332, - 327, 326, 314, 321, 302, + 0, 368, 12, 367, 5, 14, 366, 298, 364, 363, + 361, 360, 265, 211, 359, 13, 357, 10, 11, 355, + 353, 7, 352, 8, 4, 351, 2, 1, 3, 350, + 27, 0, 348, 338, 17, 193, 328, 312, 6, 311, + 308, 16, 307, 39, 297, 9, 281, 274, 273, 271, + 234, 218, 299, 163, 161, } var yyR1 = [...]int8{ @@ -630,9 +630,9 @@ var yyChk = [...]int16{ -38, -27, 19, -27, 26, -27, -21, -21, 24, 17, 2, 17, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 21, 2, 22, -4, -27, 26, 26, - 17, -23, -26, 57, -27, -31, -28, -28, -28, -24, + 17, -23, -26, 57, -27, -31, -31, -31, -28, -24, 14, -24, -26, -24, -26, -11, 92, 93, 94, 95, - -27, -27, -27, -25, -28, 24, 21, 2, 21, -28, + -27, -27, -27, -25, -31, 24, 21, 2, 21, -31, } var yyDef = [...]int16{ diff --git a/promql/parser/lex.go b/promql/parser/lex.go index d031e83307..82bf0367b8 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -610,6 +610,9 @@ func lexBuckets(l *Lexer) stateFn { case isSpace(r): l.emit(SPACE) return lexSpace + case r == '-': + l.emit(SUB) + return lexNumber case isDigit(r): l.backup() return lexNumber diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index 37748323ce..d9956e7452 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -4084,17 +4084,17 @@ func TestParseHistogramSeries(t *testing.T) { }, { name: "all properties used", - input: `{} {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}}`, + input: `{} {{schema:1 sum:0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:3 n_buckets:[4.1 5] n_offset:5 counter_reset_hint:gauge}}`, expected: []histogram.FloatHistogram{{ Schema: 1, - Sum: -0.3, + Sum: 0.3, Count: 3.1, ZeroCount: 7.1, ZeroThreshold: 0.05, PositiveBuckets: []float64{5.1, 10, 7}, - PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}}, + PositiveSpans: []histogram.Span{{Offset: 3, Length: 3}}, NegativeBuckets: []float64{4.1, 5}, - NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}}, + NegativeSpans: []histogram.Span{{Offset: 5, Length: 2}}, CounterResetHint: histogram.GaugeType, }}, }, @@ -4114,6 +4114,22 @@ func TestParseHistogramSeries(t *testing.T) { CounterResetHint: histogram.GaugeType, }}, }, + { + name: "all properties used, with negative values where supported", + input: `{} {{schema:1 sum:-0.3 count:-3.1 z_bucket:-7.1 z_bucket_w:0.05 buckets:[-5.1 -10 -7] offset:-3 n_buckets:[-4.1 -5] n_offset:-5 counter_reset_hint:gauge}}`, + expected: []histogram.FloatHistogram{{ + Schema: 1, + Sum: -0.3, + Count: -3.1, + ZeroCount: -7.1, + ZeroThreshold: 0.05, + PositiveBuckets: []float64{-5.1, -10, -7}, + PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}}, + NegativeBuckets: []float64{-4.1, -5}, + NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}}, + CounterResetHint: histogram.GaugeType, + }}, + }, { name: "static series", input: `{} {{buckets:[5 10 7] schema:1}}x2`, From e8c74821375131e33dfa9c2d14ddc896fc776b5d Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 4 Sep 2024 16:21:17 +1000 Subject: [PATCH 0495/1397] Return negative counts when multiplied or divided by a negative value Signed-off-by: Charles Korn --- model/histogram/float_histogram_test.go | 96 +++++++++++++++++++ .../testdata/native_histograms.test | 8 +- 2 files changed, 100 insertions(+), 4 deletions(-) diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index 1558a6d679..cf370a313e 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -131,6 +131,54 @@ func TestFloatHistogramMul(t *testing.T) { NegativeBuckets: []float64{9, 3, 15, 18}, }, }, + { + "negation", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -1, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -11, + Count: -30, + Sum: -23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-1, 0, -3, -4, -7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-3, -1, -5, -6}, + }, + }, + { + "negative multiplier", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -2, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -22, + Count: -60, + Sum: -46, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-2, 0, -6, -8, -14}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-6, -2, -10, -12}, + }, + }, { "no-op with custom buckets", &FloatHistogram{ @@ -409,6 +457,54 @@ func TestFloatHistogramDiv(t *testing.T) { NegativeBuckets: []float64{1.5, 0.5, 2.5, 3}, }, }, + { + "negation", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + -1, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -5.5, + Count: -3493.3, + Sum: -2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{-1, -3.3, -4.2, -0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-3.1, -3, -1.234e5, -1000}, + }, + }, + { + "negative half", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + -2, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: -5.5, + Count: -15, + Sum: -11.5, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{-0.5, 0, -1.5, -2, -3.5}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{-1.5, -0.5, -2.5, -3}, + }, + }, { "no-op with custom buckets", &FloatHistogram{ diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index ee2ae7759c..7d2eec32cf 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -728,13 +728,13 @@ eval instant at 10m histogram_mul_div*3 {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} eval instant at 10m histogram_mul_div*-1 - {} {{schema:0 count:30 sum:-33 z_bucket:3 z_bucket_w:0.001 buckets:[6 6 6] n_buckets:[3 3 3]}} + {} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}} eval instant at 10m -histogram_mul_div - {} {{schema:0 count:30 sum:-33 z_bucket:3 z_bucket_w:0.001 buckets:[6 6 6] n_buckets:[3 3 3]}} + {} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}} eval instant at 10m histogram_mul_div*-3 - {} {{schema:0 count:90 sum:-99 z_bucket:9 z_bucket_w:0.001 buckets:[18 18 18] n_buckets:[9 9 9]}} + {} {{schema:0 count:-90 sum:-99 z_bucket:-9 z_bucket_w:0.001 buckets:[-9 -9 -9] n_buckets:[-18 -18 -18]}} eval instant at 10m 3*histogram_mul_div {} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}} @@ -749,7 +749,7 @@ eval instant at 10m histogram_mul_div/3 {} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}} eval instant at 10m histogram_mul_div/-3 - {} {{schema:0 count:10 sum:-11 z_bucket:1 z_bucket_w:0.001 buckets:[2 2 2] n_buckets:[1 1 1]}} + {} {{schema:0 count:-10 sum:-11 z_bucket:-1 z_bucket_w:0.001 buckets:[-1 -1 -1] n_buckets:[-2 -2 -2]}} eval instant at 10m histogram_mul_div/float_series_3 {} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}} From f1e0939f2f0e0658232313b987a5f0570cd93e2d Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 9 Sep 2024 10:27:24 +0200 Subject: [PATCH 0496/1397] Document /api/v1/parse_expr endpoint Signed-off-by: Julius Volz --- docs/querying/api.md | 71 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 70 insertions(+), 1 deletion(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index efa244fbc8..e32c8ecaf5 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -239,6 +239,75 @@ $ curl 'http://localhost:9090/api/v1/format_query?query=foo/bar' } ``` +## Parsing a PromQL expressions into a abstract syntax tree (AST) + +This endpoint is **experimental** and might change in the future. It is currently only meant to be used by Prometheus' own web UI, and the endpoint name and exact format returned may change from one Prometheus version to another. It may also be removed again in case it is no longer needed by the UI. + +The following endpoint parses a PromQL expression and returns it as a JSON-formatted AST (abstract syntax tree) representation: + +``` +GET /api/v1/parse_query +POST /api/v1/parse_query +``` + +URL query parameters: + +- `query=`: Prometheus expression query string. + +You can URL-encode these parameters directly in the request body by using the `POST` method and +`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large +query that may breach server-side URL character limits. + +The `data` section of the query result is a string containing the AST of the parsed query expression. + +The following example parses the expression `foo/bar`: + +```json +$ curl 'http://localhost:9090/api/v1/parse_query?query=foo/bar' +{ + "data" : { + "bool" : false, + "lhs" : { + "matchers" : [ + { + "name" : "__name__", + "type" : "=", + "value" : "foo" + } + ], + "name" : "foo", + "offset" : 0, + "startOrEnd" : null, + "timestamp" : null, + "type" : "vectorSelector" + }, + "matching" : { + "card" : "one-to-one", + "include" : [], + "labels" : [], + "on" : false + }, + "op" : "/", + "rhs" : { + "matchers" : [ + { + "name" : "__name__", + "type" : "=", + "value" : "bar" + } + ], + "name" : "bar", + "offset" : 0, + "startOrEnd" : null, + "timestamp" : null, + "type" : "vectorSelector" + }, + "type" : "binaryExpr" + }, + "status" : "success" +} +``` + ## Querying metadata Prometheus offers a set of API endpoints to query metadata about series and their labels. @@ -693,7 +762,7 @@ URL query parameters: - `rule_name[]=`: only return rules with the given rule name. If the parameter is repeated, rules with any of the provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. - `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done. - `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done. -- `exclude_alerts=`: only return rules, do not return active alerts. +- `exclude_alerts=`: only return rules, do not return active alerts. - `match[]=`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional. ```json From 0c9a2c95bfb2b71d6d9f6610c66846b9e460c7cf Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 9 Sep 2024 11:02:15 +0200 Subject: [PATCH 0497/1397] Make "npm run start" from web/ui start the new UI Signed-off-by: Julius Volz --- web/ui/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/ui/package.json b/web/ui/package.json index f1cd0b90de..9a39069dda 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -6,7 +6,7 @@ "scripts": { "build": "bash build_ui.sh --all", "build:module": "bash build_ui.sh --build-module", - "start": "npm run start -w app", + "start": "npm run start -w mantine-ui", "test": "npm run test --workspaces", "lint": "npm run lint --workspaces" }, From a0463f550138c9b08c4f7852c6f5a17fd889b9ce Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 9 Sep 2024 11:02:32 +0200 Subject: [PATCH 0498/1397] Update UI docs to mention both old + new UIs Signed-off-by: Julius Volz --- web/ui/README.md | 49 ++++++++++++++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/web/ui/README.md b/web/ui/README.md index 465adf5372..38087755e5 100644 --- a/web/ui/README.md +++ b/web/ui/README.md @@ -1,4 +1,5 @@ ## Overview + The `ui` directory contains static files and templates used in the web UI. For easier distribution they are compressed (c.f. Makefile) and statically compiled into the Prometheus binary using the embed package. @@ -15,15 +16,23 @@ This will serve all files from your local filesystem. This is for development pu ### Introduction -The react application is a monorepo composed by multiple different npm packages. The main one is `react-app` which -contains the code of the react application. +This directory contains two generations of Prometheus' React-based web UI: + +* `react-app`: The old 2.x web UI +* `mantine-ui`: The new 3.x web UI + +Both UIs are built and compiled into Prometheus. The new UI is served by default, but a feature flag +(`--enable-feature=old-ui`) can be used to switch back to serving the old UI. Then you have different npm packages located in the folder `modules`. These packages are supposed to be used by the -react-app and also by others consumers (like Thanos) +two React apps and also by others consumers (like Thanos). + +While most of these applications / modules are part of the same npm workspace, the old UI in the `react-app` directory +has been separated out of the workspace setup, since its dependencies were too incompatible. ### Pre-requisite -To be able to build the react application you need: +To be able to build either of the React applications, you need: * npm >= v7 * node >= v20 @@ -38,46 +47,50 @@ need to move to the directory `web/ui` and then download and install them locall npm consults the `package.json` and `package-lock.json` files for dependencies to install. It creates a `node_modules` directory with all installed dependencies. -**NOTE**: Do not run `npm install` in the `react-app` folder or in any sub folder of the `module` directory. +**NOTE**: Do not run `npm install` in the `react-app` / `mantine-ui` folder or in any sub folder of the `module` directory. ### Upgrading npm dependencies -As it is a monorepo, when upgrading a dependency, you have to upgrade it in every packages that composed this monorepo ( -aka, in all sub folder of `module` and in `react-app`) +As it is a monorepo, when upgrading a dependency, you have to upgrade it in every packages that composed this monorepo +(aka, in all sub folders of `module` and `react-app` / `mantine-ui`) Then you have to run the command `npm install` in `web/ui` and not in a sub folder / sub package. It won't simply work. ### Running a local development server -You can start a development server for the React UI outside of a running Prometheus server by running: +You can start a development server for the new React UI outside of a running Prometheus server by running: npm start -This will open a browser window with the React app running on http://localhost:3000/. The page will reload if you make +(For the old UI, you will have to run the same command from the `react-app` subdirectory.) + +This will open a browser window with the React app running on http://localhost:5173/. The page will reload if you make edits to the source code. You will also see any lint errors in the console. -**NOTE**: It will reload only if you change the code in `react-app` folder. Any code changes in the folder `module` is +**NOTE**: It will reload only if you change the code in `mantine-ui` folder. Any code changes in the folder `module` is not considered by the command `npm start`. In order to see the changes in the react-app you will have to run `npm run build:module` -Due to a `"proxy": "http://localhost:9090"` setting in the `package.json` file, any API requests from the React UI are +Due to a `"proxy": "http://localhost:9090"` setting in the `mantine-ui/vite.config.ts` file, any API requests from the React UI are proxied to `localhost` on port `9090` by the development server. This allows you to run a normal Prometheus server to handle API requests, while iterating separately on the UI. - [browser] ----> [localhost:3000 (dev server)] --(proxy API requests)--> [localhost:9090 (Prometheus)] + [browser] ----> [localhost:5173 (dev server)] --(proxy API requests)--> [localhost:9090 (Prometheus)] ### Running tests -To run the test for the react-app and for all modules, you can simply run: +To run the test for the new React app and for all modules, you can simply run: ```bash npm test ``` -if you want to run the test only for a specific module, you need to go to the folder of the module and run +(For the old UI, you will have to run the same command from the `react-app` subdirectory.) + +If you want to run the test only for a specific module, you need to go to the folder of the module and run again `npm test`. -For example, in case you only want to run the test of the react-app, go to `web/ui/react-app` and run `npm test` +For example, in case you only want to run the test of the new React app, go to `web/ui/mantine-ui` and run `npm test` To generate an HTML-based test coverage report, run: @@ -93,7 +106,7 @@ running tests. ### Building the app for production -To build a production-optimized version of the React app to a `build` subdirectory, run: +To build a production-optimized version of both React app versions to a `static/{react-app,mantine-ui}` subdirectory, run: npm run build @@ -102,10 +115,10 @@ Prometheus `Makefile` when building the full binary. ### Integration into Prometheus -To build a Prometheus binary that includes a compiled-in version of the production build of the React app, change to the +To build a Prometheus binary that includes a compiled-in version of the production build of both React app versions, change to the root of the repository and run: make build -This installs dependencies via npm, builds a production build of the React app, and then finally compiles in all web +This installs dependencies via npm, builds a production build of both React apps, and then finally compiles in all web assets into the Prometheus binary. From ff41d45baefc48e264dcdc0d2aa0300a1a406de3 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 9 Sep 2024 11:13:22 +0200 Subject: [PATCH 0499/1397] Make status page timestamps consistent and use local time Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/pages/StatusPage.tsx | 56 ++++++++++++---------- 1 file changed, 30 insertions(+), 26 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/StatusPage.tsx b/web/ui/mantine-ui/src/pages/StatusPage.tsx index 15798756d5..04d215cf5f 100644 --- a/web/ui/mantine-ui/src/pages/StatusPage.tsx +++ b/web/ui/mantine-ui/src/pages/StatusPage.tsx @@ -2,32 +2,7 @@ import { Card, Group, Stack, Table, Text } from "@mantine/core"; import { useSuspenseAPIQuery } from "../api/api"; import { IconRun, IconWall } from "@tabler/icons-react"; import { formatTimestamp } from "../lib/formatTime"; - -const statusConfig: Record< - string, - { - title?: string; - formatValue?: (v: string | boolean) => string; - } -> = { - startTime: { - title: "Start time", - formatValue: (v: string | boolean) => - formatTimestamp(new Date(v as string).valueOf() / 1000, false), // TODO: Set useLocalTime parameter correctly. - }, - CWD: { title: "Working directory" }, - reloadConfigSuccess: { - title: "Configuration reload", - formatValue: (v: string | boolean) => (v ? "Successful" : "Unsuccessful"), - }, - lastConfigTime: { - title: "Last successful configuration reload", - formatValue: (v: string | boolean) => new Date(v as string).toUTCString(), - }, - corruptionCount: { title: "WAL corruptions" }, - goroutineCount: { title: "Goroutines" }, - storageRetention: { title: "Storage retention" }, -}; +import { useSettings } from "../state/settingsSlice"; export default function StatusPage() { const { data: buildinfo } = useSuspenseAPIQuery>({ @@ -37,6 +12,35 @@ export default function StatusPage() { path: `/status/runtimeinfo`, }); + const { useLocalTime } = useSettings(); + + const statusConfig: Record< + string, + { + title?: string; + formatValue?: (v: string | boolean) => string; + } + > = { + startTime: { + title: "Start time", + formatValue: (v: string | boolean) => + formatTimestamp(new Date(v as string).valueOf() / 1000, useLocalTime), + }, + CWD: { title: "Working directory" }, + reloadConfigSuccess: { + title: "Configuration reload", + formatValue: (v: string | boolean) => (v ? "Successful" : "Unsuccessful"), + }, + lastConfigTime: { + title: "Last successful configuration reload", + formatValue: (v: string | boolean) => + formatTimestamp(new Date(v as string).valueOf() / 1000, useLocalTime), + }, + corruptionCount: { title: "WAL corruptions" }, + goroutineCount: { title: "Goroutines" }, + storageRetention: { title: "Storage retention" }, + }; + return ( From d7bacf96c7aaad22ec65d21e8df7d9efc8fa1985 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 9 Sep 2024 11:19:02 +0200 Subject: [PATCH 0500/1397] Remove commented-out and unused allowLineBreaks settings for binop explain Signed-off-by: Julius Volz --- .../pages/query/ExplainViews/BinaryExpr/VectorVector.tsx | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx index 1ba42310ef..6735d1320b 100644 --- a/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx +++ b/web/ui/mantine-ui/src/pages/query/ExplainViews/BinaryExpr/VectorVector.tsx @@ -326,12 +326,8 @@ const explainError = ( const VectorVectorBinaryExprExplainView: FC< VectorVectorBinaryExprExplainViewProps > = ({ node, lhs, rhs }) => { - // TODO: Don't use Mantine's local storage as a one-off here. - // const [allowLineBreaks, setAllowLineBreaks] = useLocalStorage({ - // key: "queryPage.explain.binaryOperators.breakLongLines", - // defaultValue: true, - // }); - + // TODO: Don't use Mantine's local storage as a one-off here. Decide whether we + // want to keep Redux, and then do it only via one or the other everywhere. const [showSampleValues, setShowSampleValues] = useLocalStorage({ key: "queryPage.explain.binaryOperators.showSampleValues", defaultValue: false, From 5956d482e8f8f5979aaf372ccce793bc196a4e17 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 9 Sep 2024 11:19:47 +0200 Subject: [PATCH 0501/1397] Remove a few unneeded comments and component props Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx | 2 +- web/ui/mantine-ui/src/pages/query/QueryPanel.tsx | 1 - web/ui/mantine-ui/src/pages/query/TreeView.tsx | 2 -- web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx | 1 - 4 files changed, 1 insertion(+), 5 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx index d7dbf0dd69..dce05c1814 100644 --- a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx +++ b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx @@ -206,7 +206,7 @@ const ExpressionInput: FC = ({ enableLinter, enableQueryHistory, queryHistory, - ]); // TODO: Make this depend on external settings changes, maybe use dynamic config compartment again. + ]); // TODO: Maybe use dynamic config compartment again as in the old UI? return ( diff --git a/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx b/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx index a172311828..24eb8c59cb 100644 --- a/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx +++ b/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx @@ -123,7 +123,6 @@ const QueryPanel: FC = ({ idx, metricNames }) => { > { diff --git a/web/ui/mantine-ui/src/pages/query/TreeView.tsx b/web/ui/mantine-ui/src/pages/query/TreeView.tsx index 1455e43774..3a631f1e32 100644 --- a/web/ui/mantine-ui/src/pages/query/TreeView.tsx +++ b/web/ui/mantine-ui/src/pages/query/TreeView.tsx @@ -7,8 +7,6 @@ import { Card, CloseButton } from "@mantine/core"; const TreeView: FC<{ panelIdx: number; - // TODO: Do we need retriggerIdx for the tree view AST parsing? Maybe for children! - retriggerIdx: number; selectedNode: { id: string; node: ASTNode; diff --git a/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx b/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx index 290543fc5b..8790f45221 100644 --- a/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx +++ b/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx @@ -326,7 +326,6 @@ const ScrapePoolList: FC = ({ }} > - {/* TODO: Process target URL like in old UI */} Date: Mon, 9 Sep 2024 11:46:42 +0200 Subject: [PATCH 0502/1397] Show alert annotations on /rules page Signed-off-by: Julius Volz --- .../src/components/RuleDefinition.tsx | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/web/ui/mantine-ui/src/components/RuleDefinition.tsx b/web/ui/mantine-ui/src/components/RuleDefinition.tsx index 0865f58ad9..4e22dd9344 100644 --- a/web/ui/mantine-ui/src/components/RuleDefinition.tsx +++ b/web/ui/mantine-ui/src/components/RuleDefinition.tsx @@ -94,20 +94,20 @@ const RuleDefinition: FC<{ rule: Rule }> = ({ rule }) => { )} - {/* {Object.keys(r.annotations).length > 0 && ( - - {Object.entries(r.annotations).map(([k, v]) => ( - - {k}: {v} - - ))} - - )} */} + {rule.type === "alerting" && Object.keys(rule.annotations).length > 0 && ( +
    + + {Object.entries(rule.annotations).map(([k, v]) => ( + + + {k} + + {v} + + ))} + +
    + )} ); }; From 7e0cba568e8412c0b0a52868195b39299bd122c2 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 9 Sep 2024 11:47:03 +0200 Subject: [PATCH 0503/1397] Make annotations display on /alerts page consistent with /rules Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/components/RuleDefinition.tsx | 1 + web/ui/mantine-ui/src/pages/AlertsPage.tsx | 12 ++++-------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/web/ui/mantine-ui/src/components/RuleDefinition.tsx b/web/ui/mantine-ui/src/components/RuleDefinition.tsx index 4e22dd9344..10944f6e79 100644 --- a/web/ui/mantine-ui/src/components/RuleDefinition.tsx +++ b/web/ui/mantine-ui/src/components/RuleDefinition.tsx @@ -5,6 +5,7 @@ import { Card, Group, rem, + Table, Tooltip, useComputedColorScheme, } from "@mantine/core"; diff --git a/web/ui/mantine-ui/src/pages/AlertsPage.tsx b/web/ui/mantine-ui/src/pages/AlertsPage.tsx index e960083062..852236f867 100644 --- a/web/ui/mantine-ui/src/pages/AlertsPage.tsx +++ b/web/ui/mantine-ui/src/pages/AlertsPage.tsx @@ -376,19 +376,15 @@ export default function AlertsPage() { {showAnnotations && ( - +
    {Object.entries( a.annotations ).map(([k, v]) => ( - {k} + + {k} + {v} ))} From 306d3576354009fac5054e9382ad79774fce37f7 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 9 Sep 2024 11:54:41 +0200 Subject: [PATCH 0504/1397] Revert back to normal font for recording rule titles Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/pages/RulesPage.tsx | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/RulesPage.tsx b/web/ui/mantine-ui/src/pages/RulesPage.tsx index 8247ef810a..a335228f3a 100644 --- a/web/ui/mantine-ui/src/pages/RulesPage.tsx +++ b/web/ui/mantine-ui/src/pages/RulesPage.tsx @@ -141,16 +141,7 @@ export default function RulesPage() { )} - - {r.name} - + {r.name} From e2be869a7ab98884efbdd5baa9691029ad9321da Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 9 Sep 2024 11:59:36 +0200 Subject: [PATCH 0505/1397] Slightly improve navbar wrapping for long status title + narrow window Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/App.tsx | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index 1b15f01800..dd69cff38c 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -339,9 +339,13 @@ function App() { padding="md" > - - - + + + Prometheus{agentMode && " Agent"} - + {navLinks} - + From 60ab1cc5a5a18fdab21a4bee15ba67e19a00d762 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 9 Sep 2024 12:43:02 +0200 Subject: [PATCH 0506/1397] BUGFIX: TSDB: panic in chunk querier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Followup to #14831 Signed-off-by: György Krajcsovits --- tsdb/head_test.go | 64 +++++++++++++++++++++++++++++++++++-------- tsdb/ooo_head_read.go | 9 ++++++ 2 files changed, 61 insertions(+), 12 deletions(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index c338ddaddc..4e9792a100 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -3493,6 +3493,56 @@ func TestWaitForPendingReadersInTimeRange(t *testing.T) { } func TestQueryOOOHeadDuringTruncate(t *testing.T) { + testQueryOOOHeadDuringTruncate(t, + func(db *DB, minT, maxT int64) (storage.LabelQuerier, error) { + return db.Querier(minT, maxT) + }, + func(t *testing.T, lq storage.LabelQuerier, minT, _ int64) { + // Samples + q, ok := lq.(storage.Querier) + require.True(t, ok) + ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) + require.True(t, ss.Next()) + s := ss.At() + require.False(t, ss.Next()) // One series. + it := s.Iterator(nil) + require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. + require.Equal(t, int64(minT), it.AtT()) // It is an in-order sample. + require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. + require.Equal(t, int64(minT+50), it.AtT()) // it is an out-of-order sample. + require.NoError(t, it.Err()) + }, + ) +} + +func TestChunkQueryOOOHeadDuringTruncate(t *testing.T) { + testQueryOOOHeadDuringTruncate(t, + func(db *DB, minT, maxT int64) (storage.LabelQuerier, error) { + return db.ChunkQuerier(minT, maxT) + }, + func(t *testing.T, lq storage.LabelQuerier, minT, _ int64) { + // Chunks + q, ok := lq.(storage.ChunkQuerier) + require.True(t, ok) + ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) + require.True(t, ss.Next()) + s := ss.At() + require.False(t, ss.Next()) // One series. + metaIt := s.Iterator(nil) + require.True(t, metaIt.Next()) + meta := metaIt.At() + // Samples + it := meta.Chunk.Iterator(nil) + require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. + require.Equal(t, int64(minT), it.AtT()) // It is an in-order sample. + require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. + require.Equal(t, int64(minT+50), it.AtT()) // it is an out-of-order sample. + require.NoError(t, it.Err()) + }, + ) +} + +func testQueryOOOHeadDuringTruncate(t *testing.T, makeQuerier func(db *DB, minT, maxT int64) (storage.LabelQuerier, error), verify func(t *testing.T, q storage.LabelQuerier, minT, maxT int64)) { const maxT int64 = 6000 dir := t.TempDir() @@ -3545,7 +3595,7 @@ func TestQueryOOOHeadDuringTruncate(t *testing.T) { // Wait for the compaction to start. <-allowQueryToStart - q, err := db.Querier(1500, 2500) + q, err := makeQuerier(db, 1500, 2500) require.NoError(t, err) queryStarted <- struct{}{} // Unblock the compaction. ctx := context.Background() @@ -3562,17 +3612,7 @@ func TestQueryOOOHeadDuringTruncate(t *testing.T) { require.Empty(t, annots) require.Equal(t, []string{"b"}, res) - // Samples - ss := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) - require.True(t, ss.Next()) - s := ss.At() - require.False(t, ss.Next()) // One series. - it := s.Iterator(nil) - require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. - require.Equal(t, int64(1500), it.AtT()) // It is an in-order sample. - require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. - require.Equal(t, int64(1550), it.AtT()) // it is an out-of-order sample. - require.NoError(t, it.Err()) + verify(t, q, 1500, 2500) require.NoError(t, q.Close()) // Cannot be deferred as the compaction waits for queries to close before finishing. diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index a3c959bc43..66ae93325d 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -586,15 +586,24 @@ func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIso } func (q *HeadAndOOOChunkQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + if q.querier == nil { + return nil, nil, nil + } return q.querier.LabelValues(ctx, name, hints, matchers...) } func (q *HeadAndOOOChunkQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + if q.querier == nil { + return nil, nil, nil + } return q.querier.LabelNames(ctx, hints, matchers...) } func (q *HeadAndOOOChunkQuerier) Close() error { q.chunkr.Close() + if q.querier == nil { + return nil + } return q.querier.Close() } From d3f4e7c22319bd5dac664898950df7307b2ec676 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 9 Sep 2024 12:51:02 +0200 Subject: [PATCH 0507/1397] Remove unnecessary conversion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- tsdb/head_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 4e9792a100..7b5349cfca 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -3507,9 +3507,9 @@ func TestQueryOOOHeadDuringTruncate(t *testing.T) { require.False(t, ss.Next()) // One series. it := s.Iterator(nil) require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. - require.Equal(t, int64(minT), it.AtT()) // It is an in-order sample. + require.Equal(t, minT, it.AtT()) // It is an in-order sample. require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. - require.Equal(t, int64(minT+50), it.AtT()) // it is an out-of-order sample. + require.Equal(t, minT+50, it.AtT()) // it is an out-of-order sample. require.NoError(t, it.Err()) }, ) @@ -3534,9 +3534,9 @@ func TestChunkQueryOOOHeadDuringTruncate(t *testing.T) { // Samples it := meta.Chunk.Iterator(nil) require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. - require.Equal(t, int64(minT), it.AtT()) // It is an in-order sample. + require.Equal(t, minT, it.AtT()) // It is an in-order sample. require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data. - require.Equal(t, int64(minT+50), it.AtT()) // it is an out-of-order sample. + require.Equal(t, minT+50, it.AtT()) // it is an out-of-order sample. require.NoError(t, it.Err()) }, ) From 0a88943594b7c95d1a6ae4a9b06011690730b866 Mon Sep 17 00:00:00 2001 From: Julien Date: Fri, 6 Sep 2024 14:02:44 +0200 Subject: [PATCH 0508/1397] Scrape: test for q-value compliance with RFC 9110 in Accept header Signed-off-by: Julien --- scrape/scrape_test.go | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index a69a19d7f7..637d5a79cd 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -34,6 +34,7 @@ import ( "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/google/go-cmp/cmp" + "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" dto "github.com/prometheus/client_model/go" @@ -2379,8 +2380,11 @@ func TestTargetScraperScrapeOK(t *testing.T) { expectedTimeout = "1.5" ) - var protobufParsing bool - var allowUTF8 bool + var ( + protobufParsing bool + allowUTF8 bool + qValuePattern = regexp.MustCompile(`q=([0-9]+(\.\d+)?)`) + ) server := httptest.NewServer( http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -2393,6 +2397,17 @@ func TestTargetScraperScrapeOK(t *testing.T) { "Expected Accept header to prefer application/vnd.google.protobuf.") } + contentTypes := strings.Split(accept, ",") + for _, ct := range contentTypes { + match := qValuePattern.FindStringSubmatch(ct) + require.Len(t, match, 3) + qValue, err := strconv.ParseFloat(match[1], 64) + require.NoError(t, err, "Error parsing q value") + require.GreaterOrEqual(t, qValue, float64(0)) + require.LessOrEqual(t, qValue, float64(1)) + require.LessOrEqual(t, len(strings.Split(match[1], ".")[1]), 3, "q value should have at most 3 decimal places") + } + timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds") require.Equal(t, expectedTimeout, timeout, "Expected scrape timeout header.") From 11f344e4d3c1afbba9fc4237b14c6f84f8cf0d01 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 9 Sep 2024 14:32:27 +0200 Subject: [PATCH 0509/1397] Move AM discovery page from "Monitoring status" -> "Server status" Seems to at least make more sense to me like that. Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/App.tsx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index dd69cff38c..9cba777029 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -109,13 +109,6 @@ const monitoringStatusPages = [ element: , inAgentMode: true, }, - { - title: "Alertmanager discovery", - path: "/discovered-alertmanagers", - icon: , - element: , - inAgentMode: false, - }, ]; const serverStatusPages = [ @@ -147,6 +140,13 @@ const serverStatusPages = [ element: , inAgentMode: true, }, + { + title: "Alertmanager discovery", + path: "/discovered-alertmanagers", + icon: , + element: , + inAgentMode: false, + }, ]; const allStatusPages = [...monitoringStatusPages, ...serverStatusPages]; From dadad9bd1040dc61047a1131732cefb0fd74802e Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Mon, 9 Sep 2024 16:26:28 +0200 Subject: [PATCH 0510/1397] Add changelog entries in preparation of 3.0-beta Signed-off-by: Jan Fajerski --- CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0813f44efd..fcb5d0c363 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,8 +11,17 @@ _Please add changes here that are only in the release-3.0 branch. These will be ## unreleased * [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200 +* [FEATURE] Automatic reloading of the Prometheus configuration file at a specified interval #14769 * [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706 * [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612 +* [ENHANCEMENT] Put OM text p.CreatedTimestamp behind feature flag. #14815 +* [ENHANCEMENT] Improve promql traces with more context. #14816 +* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 +* [BUGFIX] Do not re-use spans between histograms. #14771 +* [BUGFIX] Protobuf scraping: reset exemplar position. #14810 +* [BUGFIX] TSDB: fix panic in query during truncation with OOO head. #14831 +* [BUGFIX] TSDB: panic in chunk querier. #14874 +* [BUGFIX] promql.Engine.Close: No-op if nil. #14861 * [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042 ## 2.54.1 / 2024-08-27 From 35ef7d41ce6a3dbb996a2afe922421ae0683800b Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Mon, 9 Sep 2024 11:22:20 -0400 Subject: [PATCH 0511/1397] remove rfratto as a tsdb/agent maintainer Signed-off-by: Robert Fratto --- MAINTAINERS.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 3661ddaa0a..7f4153abc1 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -13,13 +13,12 @@ Maintainers for specific parts of the codebase: * `k8s`: Frederic Branczyk ( / @brancz) * `documentation` * `prometheus-mixin`: Matthias Loibl ( / @metalmatze) -* `model/histogram` and other code related to native histograms: Björn Rabenstein ( / @beorn7), +* `model/histogram` and other code related to native histograms: Björn Rabenstein ( / @beorn7), George Krajcsovits ( / @krajorama) * `storage` * `remote`: Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( / @npazosmendez), Alex Greenbank ( / @alexgreenbank) * `otlptranslator`: Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez) * `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez) - * `agent`: Robert Fratto ( / @rfratto) * `web` * `ui`: Julius Volz ( / @juliusv) * `module`: Augustin Husson ( @nexucis) From 785d595855d8920d4f007228398f29d53034118d Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 9 Sep 2024 18:28:57 +0200 Subject: [PATCH 0512/1397] ui build: create requires web/ui/static dir ad hoc Signed-off-by: Jan Fajerski --- .gitignore | 3 +-- web/ui/build_ui.sh | 11 +++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index f4ed7242ba..0d99305f69 100644 --- a/.gitignore +++ b/.gitignore @@ -22,8 +22,7 @@ benchmark.txt /documentation/examples/remote_storage/example_write_adapter/example_write_adapter npm_licenses.tar.bz2 -/web/ui/static/react-app -/web/ui/static/mantine-ui +/web/ui/static /vendor /.build diff --git a/web/ui/build_ui.sh b/web/ui/build_ui.sh index 7046ca0596..360ec33ea8 100644 --- a/web/ui/build_ui.sh +++ b/web/ui/build_ui.sh @@ -21,6 +21,7 @@ then fi buildOrder=(lezer-promql codemirror-promql) +assetsDir="./static" function buildModule() { for module in "${buildOrder[@]}"; do @@ -32,15 +33,17 @@ function buildModule() { function buildReactApp() { echo "build react-app" (cd react-app && npm run build) - rm -rf ./static/react-app - mv ./react-app/build ./static/react-app + mkdir -p ${assetsDir} + rm -rf ${assetsDir}/react-app + mv ./react-app/build ${assetsDir}/react-app } function buildMantineUI() { echo "build mantine-ui" npm run build -w @prometheus-io/mantine-ui - rm -rf ./static/mantine-ui - mv ./mantine-ui/dist ./static/mantine-ui + mkdir -p ${assetsDir} + rm -rf ${assetsDir}/mantine-ui + mv ./mantine-ui/dist ${assetsDir}/mantine-ui } for i in "$@"; do From c1080990ac14602c9389c50f2110c411d24af182 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 9 Sep 2024 18:55:32 +0200 Subject: [PATCH 0513/1397] Bring back documentation link in the form of an action button IMO this looks nicer than adding it as a normal page nav link as in https://github.com/prometheus/prometheus/pull/14878 Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/App.tsx | 29 ++++++++++++------- .../src/components/SettingsMenu.tsx | 7 ++++- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index 9cba777029..99ed2316d3 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -6,6 +6,7 @@ import classes from "./App.module.css"; import PrometheusLogo from "./images/prometheus-logo.svg"; import { + ActionIcon, AppShell, Box, Burger, @@ -22,6 +23,7 @@ import { useDisclosure } from "@mantine/hooks"; import { IconBell, IconBellFilled, + IconBook, IconChevronDown, IconChevronRight, IconCloudDataConnection, @@ -306,17 +308,24 @@ function App() { ))} + + ); - {/* */} + + ); @@ -359,9 +368,8 @@ function App() { {navLinks} - - - + + {navActionIcons} {navLinks} - - + {navActionIcons} diff --git a/web/ui/mantine-ui/src/components/SettingsMenu.tsx b/web/ui/mantine-ui/src/components/SettingsMenu.tsx index 0d004bd4b2..c0631b1b47 100644 --- a/web/ui/mantine-ui/src/components/SettingsMenu.tsx +++ b/web/ui/mantine-ui/src/components/SettingsMenu.tsx @@ -18,7 +18,12 @@ const SettingsMenu: FC = () => { return ( - + From 569b6abfa3f876419879989d9e17d9a4f3cfc01b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=A1s=20Pazos?= Date: Mon, 9 Sep 2024 11:46:31 -0300 Subject: [PATCH 0514/1397] fix(utf8): propagate validationScheme config to scraping options MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Nicolás Pazos --- scrape/scrape.go | 1 + scrape/scrape_test.go | 85 +++++++++++++++++++++++++++++++------------ 2 files changed, 63 insertions(+), 23 deletions(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index ea98432be6..26969fe28e 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -524,6 +524,7 @@ func (sp *scrapePool) sync(targets []*Target) { interval: interval, timeout: timeout, scrapeClassicHistograms: scrapeClassicHistograms, + validationScheme: validationScheme, }) if err != nil { l.setForcedError(err) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index a69a19d7f7..608dd7bb74 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -3114,18 +3114,7 @@ func TestScrapeReportLimit(t *testing.T) { ScrapeTimeout: model.Duration(100 * time.Millisecond), } - var ( - scrapes int - scrapedTwice = make(chan bool) - ) - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, "metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n") - scrapes++ - if scrapes == 2 { - close(scrapedTwice) - } - })) + ts, scrapedTwice := newScrapableServer("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n") defer ts.Close() sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) @@ -3168,6 +3157,52 @@ func TestScrapeReportLimit(t *testing.T) { require.True(t, found) } +func TestScrapeUTF8(t *testing.T) { + s := teststorage.New(t) + defer s.Close() + model.NameValidationScheme = model.UTF8Validation + t.Cleanup(func() { model.NameValidationScheme = model.LegacyValidation }) + + cfg := &config.ScrapeConfig{ + JobName: "test", + Scheme: "http", + ScrapeInterval: model.Duration(100 * time.Millisecond), + ScrapeTimeout: model.Duration(100 * time.Millisecond), + MetricNameValidationScheme: config.UTF8ValidationConfig, + } + ts, scrapedTwice := newScrapableServer("{\"with.dots\"} 42\n") + defer ts.Close() + + sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) + require.NoError(t, err) + defer sp.stop() + + testURL, err := url.Parse(ts.URL) + require.NoError(t, err) + sp.Sync([]*targetgroup.Group{ + { + Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}}, + }, + }) + + select { + case <-time.After(5 * time.Second): + t.Fatalf("target was not scraped twice") + case <-scrapedTwice: + // If the target has been scraped twice, report samples from the first + // scrape have been inserted in the database. + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) + require.NoError(t, err) + defer q.Close() + series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "with.dots")) + + require.True(t, series.Next(), "series not found in tsdb") +} + func TestScrapeLoopLabelLimit(t *testing.T) { tests := []struct { title string @@ -3364,16 +3399,7 @@ test_summary_count 199 // The expected "quantile" values do not have the trailing ".0". expectedQuantileValues := []string{"0.5", "0.9", "0.95", "0.99", "1"} - scrapeCount := 0 - scraped := make(chan bool) - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, metricsText) - scrapeCount++ - if scrapeCount > 2 { - close(scraped) - } - })) + ts, scrapedTwice := newScrapableServer(metricsText) defer ts.Close() sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) @@ -3392,7 +3418,7 @@ test_summary_count 199 select { case <-time.After(5 * time.Second): t.Fatalf("target was not scraped") - case <-scraped: + case <-scrapedTwice: } ctx, cancel := context.WithCancel(context.Background()) @@ -3841,3 +3867,16 @@ scrape_configs: require.Equal(t, expectedSchema, h.Schema) } } + +func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice chan bool) { + var scrapes int + scrapedTwice = make(chan bool) + + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, scrapeText) + scrapes++ + if scrapes == 2 { + close(scrapedTwice) + } + })), scrapedTwice +} From 928f093eb04240ee6a9548b4a61e41083493b524 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Tue, 10 Sep 2024 08:31:10 +0200 Subject: [PATCH 0515/1397] Update promci action Pick up new promci action to automatically configure `GOMEMLIMIT`. Signed-off-by: SuperQ --- .github/workflows/ci.yml | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3a181bb7cc..1bb7d9a3ea 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,13 +12,9 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. image: quay.io/prometheus/golang-builder:1.23-base - env: - # Preliminary fix to make Go tests with race detector not use too much memory, - # see https://github.com/prometheus/prometheus/issues/14858. - GOMEMLIMIT: 10GiB steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: prometheus/promci@1a7aaf104b2dcbe64ffd9f98073ca3f33ca616eb # v0.4.2 - uses: ./.github/promci/actions/setup_environment - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 - run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false @@ -32,7 +28,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.23-base steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: prometheus/promci@1a7aaf104b2dcbe64ffd9f98073ca3f33ca616eb # v0.4.2 - uses: ./.github/promci/actions/setup_environment - run: go test --tags=dedupelabels ./... - run: GOARCH=386 go test ./cmd/prometheus @@ -65,7 +61,7 @@ jobs: steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: prometheus/promci@1a7aaf104b2dcbe64ffd9f98073ca3f33ca616eb # v0.4.2 - uses: ./.github/promci/actions/setup_environment with: enable_go: false @@ -122,7 +118,7 @@ jobs: thread: [ 0, 1, 2 ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: prometheus/promci@1a7aaf104b2dcbe64ffd9f98073ca3f33ca616eb # v0.4.2 - uses: ./.github/promci/actions/build with: promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" @@ -145,7 +141,7 @@ jobs: # should also be updated. steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: prometheus/promci@1a7aaf104b2dcbe64ffd9f98073ca3f33ca616eb # v0.4.2 - uses: ./.github/promci/actions/build with: parallelism: 12 @@ -207,7 +203,7 @@ jobs: if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: prometheus/promci@1a7aaf104b2dcbe64ffd9f98073ca3f33ca616eb # v0.4.2 - uses: ./.github/promci/actions/publish_main with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -221,7 +217,7 @@ jobs: if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: prometheus/promci@1a7aaf104b2dcbe64ffd9f98073ca3f33ca616eb # v0.4.2 - uses: ./.github/promci/actions/publish_release with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -236,7 +232,7 @@ jobs: steps: - name: Checkout uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 + - uses: prometheus/promci@1a7aaf104b2dcbe64ffd9f98073ca3f33ca616eb # v0.4.2 - name: Install nodejs uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: From 1b281f074e8736dc2b3f8c34e4c94370c62ec1d0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 09:05:21 +0200 Subject: [PATCH 0516/1397] Bump @types/node from 22.5.2 to 22.5.4 in /web/ui (#14873) Bumps [@types/node](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/node) from 22.5.2 to 22.5.4. - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/node) --- updated-dependencies: - dependency-name: "@types/node" dependency-type: indirect update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/package-lock.json | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index b3b85e6a6a..92bcf3dd34 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -2971,11 +2971,10 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "22.5.2", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.5.2.tgz", - "integrity": "sha512-acJsPTEqYqulZS/Yp/S3GgeE6GZ0qYODUR8aVr/DkhHQ8l9nd4j5x1/ZJy9/gHrRlFMqkO6i0I3E27Alu4jjPg==", + "version": "22.5.4", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.5.4.tgz", + "integrity": "sha512-FDuKUJQm/ju9fT/SeX/6+gBzoPzlVCzfzmGkwKvRHQVxi4BntVbyIwf6a4Xn62mrvndLiml6z/UBXIdEVjQLXg==", "dev": true, - "license": "MIT", "dependencies": { "undici-types": "~6.19.2" } From be0c0bd8476dbf6d4a008f0861b528f112cde1d7 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Tue, 10 Sep 2024 09:06:51 +0200 Subject: [PATCH 0517/1397] Fix error flood by downgrading OTel dependencies (#14884) Fixes #14859, although we'll have to see about a long-term fix. Hopefully it'll be fixed upstream with a follow-up version. Signed-off-by: Julius Volz --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 845e3277b8..4a2dd1c779 100644 --- a/go.mod +++ b/go.mod @@ -64,7 +64,7 @@ require ( github.com/vultr/govultr/v2 v2.17.2 go.opentelemetry.io/collector/pdata v1.14.1 go.opentelemetry.io/collector/semconv v0.108.1 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 go.opentelemetry.io/otel v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 @@ -81,7 +81,7 @@ require ( golang.org/x/text v0.18.0 golang.org/x/time v0.6.0 golang.org/x/tools v0.24.0 - google.golang.org/api v0.196.0 + google.golang.org/api v0.195.0 google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed google.golang.org/grpc v1.66.0 google.golang.org/protobuf v1.34.2 diff --git a/go.sum b/go.sum index edb5b650bd..4fc4f93bd8 100644 --- a/go.sum +++ b/go.sum @@ -736,8 +736,8 @@ go.opentelemetry.io/collector/pdata v1.14.1 h1:wXZjtQA7Vy5HFqco+yA95ENyMQU5heBB1 go.opentelemetry.io/collector/pdata v1.14.1/go.mod h1:z1dTjwwtcoXxZx2/nkHysjxMeaxe9pEmYTEr4SMNIx8= go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= @@ -1056,8 +1056,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.196.0 h1:k/RafYqebaIJBO3+SMnfEGtFVlvp5vSgqTUF54UN/zg= -google.golang.org/api v0.196.0/go.mod h1:g9IL21uGkYgvQ5BZg6BAtoGJQIm8r6EgaAbpNey5wBE= +google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= +google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= From 500fcac03a20f7fc5ad6955cf131a7cc746abd1d Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Fri, 6 Sep 2024 17:53:59 +0100 Subject: [PATCH 0518/1397] Update CHANGELOG for beta Signed-off-by: Fiona Liao --- CHANGELOG.md | 32 +++++++++++++------- VERSION | 2 +- web/ui/mantine-ui/package.json | 4 +-- web/ui/module/codemirror-promql/package.json | 4 +-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 4 +-- web/ui/package.json | 2 +- web/ui/react-app/package.json | 4 +-- 8 files changed, 32 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fcb5d0c363..9efd1b82c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,28 +1,38 @@ # Changelog -## unreleased for 3.0.0 - -_Please add changes here that are only in the release-3.0 branch. These will be the source for the changelog when cutting the first release candidate for v3.0.0._ - +## 3.0.0-beta.0 / 2024-09-05 * [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904 * [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365 * [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365 - -## unreleased - +* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14770 +* [CHANGE] Console: Remove example files for the console feature. Users can continue using the console feature by supplying their own JavaScript and templates. #14807 +* [CHANGE] SD: Enable the new service discovery manager by default. This SD manager does not restart unchanged discoveries upon reloading. This makes reloads faster and reduces pressure on service discoveries' sources. The corresponding `new-service-discovery-manager` feature flag has been removed. #14770 +* [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747 +* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #14770, #14526 +* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643 +* [FEATURE] Promtool: Allow additional labels to be added to blocks created from openmetrics. #14402 * [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200 * [FEATURE] Automatic reloading of the Prometheus configuration file at a specified interval #14769 * [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706 * [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612 -* [ENHANCEMENT] Put OM text p.CreatedTimestamp behind feature flag. #14815 -* [ENHANCEMENT] Improve promql traces with more context. #14816 * [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 +* [ENHANCEMENT] Scrape: Only parse created timestamp if `created-timestamp-zero-ingestion` feature flag is enabled. This is as a lot of memory is used when parsing the created timestamp in the OM text format. #14815 +* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379 +* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477 +* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls.#14816 +* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042 +* [BUGFIX] TSDB: Fix shard initialization after WAL repair.#14731 +* [BUGFIX] UTF-8: Ensure correct validation when legacy mode turned on. #14736 +* [BUGFIX] SD: Make discovery manager notify consumers of dropped targets for still defined jobs. #13147 +* [BUGFIX] SD: Prevent the new service discovery manager from storing stale targets. #13622 +* [BUGFIX] Remote Write 2.0: Ensure metadata records are sent from the WAL to remote write during WAL replay. #14766 +* [BUGFIX] Scrape: Only parse created timestamp if `created-timestamp-zero-ingestion` feature flag is enabled. This is as a lot of memory is used when parsing the created timestamp in the OM text format. #14815 +* [BUGFIX] Scrape: Do no override target parameter labels with config params. #11029 +* [BUGFIX] Scrape: Reset exemplar position when scraping histograms in protobuf. #14810 * [BUGFIX] Do not re-use spans between histograms. #14771 -* [BUGFIX] Protobuf scraping: reset exemplar position. #14810 * [BUGFIX] TSDB: fix panic in query during truncation with OOO head. #14831 * [BUGFIX] TSDB: panic in chunk querier. #14874 * [BUGFIX] promql.Engine.Close: No-op if nil. #14861 -* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042 ## 2.54.1 / 2024-08-27 diff --git a/VERSION b/VERSION index 4657faf0bd..7e9b524994 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.0.0-alpha.0 +3.0.0-beta.0 diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 0cb3fdaafa..918a230bea 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,7 +1,7 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "0.54.1", + "version": "3.0.0-beta.0", "type": "module", "scripts": { "start": "vite", @@ -27,7 +27,7 @@ "@mantine/notifications": "^7.11.2", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "^0.54.1", + "@prometheus-io/codemirror-promql": "3.0.0-beta.0", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^2.47.0", "@tanstack/react-query": "^5.22.2", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index e8fe7c1806..da6a1b6224 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.54.1", + "version": "3.0.0-beta.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.54.1", + "@prometheus-io/lezer-promql": "3.0.0-beta.0", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 8093936dca..f1ffee36ea 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.54.1", + "version": "3.0.0-beta.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index b3b85e6a6a..7ef24a08db 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.54.1", + "version": "3.0.0-beta.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.54.1", + "version": "3.0.0-beta.0", "workspaces": [ "mantine-ui", "module/*" diff --git a/web/ui/package.json b/web/ui/package.json index 9a39069dda..036d39e4a6 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,7 +1,7 @@ { "name": "prometheus-io", "description": "Monorepo for the Prometheus UI", - "version": "0.54.1", + "version": "3.0.0-beta.0", "private": true, "scripts": { "build": "bash build_ui.sh --all", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index c194e83358..475f3a6e0c 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.54.1", + "version": "3.0.0-beta.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.17.0", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.54.1", + "@prometheus-io/codemirror-promql": "3.0.0-beta.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", From f729a704b8c20a2b1da7ac214563df125f084108 Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Sat, 7 Sep 2024 18:33:18 +0100 Subject: [PATCH 0519/1397] Fix a couple of PR links in changelog Signed-off-by: Fiona Liao --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9efd1b82c7..894b9695ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,11 +4,11 @@ * [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904 * [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365 * [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365 -* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14770 +* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705 * [CHANGE] Console: Remove example files for the console feature. Users can continue using the console feature by supplying their own JavaScript and templates. #14807 * [CHANGE] SD: Enable the new service discovery manager by default. This SD manager does not restart unchanged discoveries upon reloading. This makes reloads faster and reduces pressure on service discoveries' sources. The corresponding `new-service-discovery-manager` feature flag has been removed. #14770 * [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747 -* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #14770, #14526 +* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526 * [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643 * [FEATURE] Promtool: Allow additional labels to be added to blocks created from openmetrics. #14402 * [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200 @@ -21,7 +21,7 @@ * [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477 * [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls.#14816 * [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042 -* [BUGFIX] TSDB: Fix shard initialization after WAL repair.#14731 +* [BUGFIX] TSDB: Fix shard initialization after WAL repair. #14731 * [BUGFIX] UTF-8: Ensure correct validation when legacy mode turned on. #14736 * [BUGFIX] SD: Make discovery manager notify consumers of dropped targets for still defined jobs. #13147 * [BUGFIX] SD: Prevent the new service discovery manager from storing stale targets. #13622 From a2bf2bf4ef4b764d531b61d1da91131e7147e56b Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Mon, 9 Sep 2024 20:02:50 +0100 Subject: [PATCH 0520/1397] Change npm deps to 0.3xx Signed-off-by: Fiona Liao --- Makefile | 2 +- web/ui/mantine-ui/package.json | 4 ++-- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 4 ++-- web/ui/package.json | 2 +- web/ui/react-app/package.json | 4 ++-- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Makefile b/Makefile index ea66309b50..cf55c29628 100644 --- a/Makefile +++ b/Makefile @@ -42,7 +42,7 @@ upgrade-npm-deps: .PHONY: ui-bump-version ui-bump-version: - version=$$(sed s/2/0/ < VERSION) && ./scripts/ui_release.sh --bump-version "$${version}" + version=$$(./scripts/get_module_version.sh) && ./scripts/ui_release.sh --bump-version "$${version}" cd web/ui && npm install git add "./web/ui/package-lock.json" "./**/package.json" diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 918a230bea..ec8ef89026 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,7 +1,7 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "3.0.0-beta.0", + "version": "0.300.0-beta.0", "type": "module", "scripts": { "start": "vite", @@ -27,7 +27,7 @@ "@mantine/notifications": "^7.11.2", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "3.0.0-beta.0", + "@prometheus-io/codemirror-promql": "0.300.0-beta.0", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^2.47.0", "@tanstack/react-query": "^5.22.2", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index da6a1b6224..204e7cf34f 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "3.0.0-beta.0", + "version": "0.300.0-beta.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "3.0.0-beta.0", + "@prometheus-io/lezer-promql": "0.300.0-beta.0", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index f1ffee36ea..125c37b96a 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "3.0.0-beta.0", + "version": "0.300.0-beta.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 7ef24a08db..66b4889360 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "3.0.0-beta.0", + "version": "0.300.0-beta.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "3.0.0-beta.0", + "version": "0.300.0-beta.0", "workspaces": [ "mantine-ui", "module/*" diff --git a/web/ui/package.json b/web/ui/package.json index 036d39e4a6..dbe7b0d3cb 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,7 +1,7 @@ { "name": "prometheus-io", "description": "Monorepo for the Prometheus UI", - "version": "3.0.0-beta.0", + "version": "0.300.0-beta.0", "private": true, "scripts": { "build": "bash build_ui.sh --all", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 475f3a6e0c..2f3373eb94 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "3.0.0-beta.0", + "version": "0.300.0-beta.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.17.0", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "3.0.0-beta.0", + "@prometheus-io/codemirror-promql": "0.300.0-beta.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", From 6a1c7cf70bc072c76af14528ce4608abeceec2d3 Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Mon, 9 Sep 2024 21:02:12 +0100 Subject: [PATCH 0521/1397] Add release description and UI entry in changelog Signed-off-by: Fiona Liao Co-authored-by: Owen Williams --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 894b9695ac..4b4974fb01 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,12 @@ # Changelog ## 3.0.0-beta.0 / 2024-09-05 + +Release 3.0.0-beta.0 includes new features such as a brand new UI and UTF-8 support enabled by default. As a new major version, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. Most users should be able to try this release out of the box without any configuration changes. + +As is traditional with a beta release, we do **not** recommend users install 3.0.0-beta on critical production systems, but we do want everyone to test it out and find bugs. + +* [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. * [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904 * [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365 * [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365 From a5e079c6e4e5cb2f0aa24b6c8d8de07bad0688bb Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Mon, 9 Sep 2024 21:05:47 +0100 Subject: [PATCH 0522/1397] Add issue ref to UI update Signed-off-by: Fiona Liao --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b4974fb01..ca1fd72076 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ Release 3.0.0-beta.0 includes new features such as a brand new UI and UTF-8 supp As is traditional with a beta release, we do **not** recommend users install 3.0.0-beta on critical production systems, but we do want everyone to test it out and find bugs. -* [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. +* [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. #14872 * [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904 * [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365 * [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365 From 80d92f8c91270f525033e17c07bb1ccc2ad33e60 Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Tue, 10 Sep 2024 10:22:23 +0100 Subject: [PATCH 0523/1397] Organise CHANGELOG Signed-off-by: Fiona Liao --- CHANGELOG.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ca1fd72076..fe37e9bb17 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,24 +21,24 @@ As is traditional with a beta release, we do **not** recommend users install 3.0 * [FEATURE] Automatic reloading of the Prometheus configuration file at a specified interval #14769 * [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706 * [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612 -* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 * [ENHANCEMENT] Scrape: Only parse created timestamp if `created-timestamp-zero-ingestion` feature flag is enabled. This is as a lot of memory is used when parsing the created timestamp in the OM text format. #14815 * [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379 * [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477 +* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 * [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls.#14816 -* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042 * [BUGFIX] TSDB: Fix shard initialization after WAL repair. #14731 * [BUGFIX] UTF-8: Ensure correct validation when legacy mode turned on. #14736 * [BUGFIX] SD: Make discovery manager notify consumers of dropped targets for still defined jobs. #13147 * [BUGFIX] SD: Prevent the new service discovery manager from storing stale targets. #13622 * [BUGFIX] Remote Write 2.0: Ensure metadata records are sent from the WAL to remote write during WAL replay. #14766 -* [BUGFIX] Scrape: Only parse created timestamp if `created-timestamp-zero-ingestion` feature flag is enabled. This is as a lot of memory is used when parsing the created timestamp in the OM text format. #14815 * [BUGFIX] Scrape: Do no override target parameter labels with config params. #11029 * [BUGFIX] Scrape: Reset exemplar position when scraping histograms in protobuf. #14810 -* [BUGFIX] Do not re-use spans between histograms. #14771 -* [BUGFIX] TSDB: fix panic in query during truncation with OOO head. #14831 -* [BUGFIX] TSDB: panic in chunk querier. #14874 +* [BUGFIX] Native Histograms: Do not re-use spans between histograms. #14771 +* [BUGFIX] Scrape: Only parse created timestamp if `created-timestamp-zero-ingestion` feature flag is enabled. This is as a lot of memory is used when parsing the created timestamp in the OM text format. #14815 +* [BUGFIX] TSDB: Fix panic in query during truncation with OOO head. #14831 +* [BUGFIX] TSDB: Fix panic in chunk querier. #14874 * [BUGFIX] promql.Engine.Close: No-op if nil. #14861 +* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042 ## 2.54.1 / 2024-08-27 From 1314c02a5f4e14695ffb3eaf4ee5ed76f1dd31c4 Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Tue, 10 Sep 2024 10:30:20 +0100 Subject: [PATCH 0524/1397] Add additional changelog entries Signed-off-by: Fiona Liao --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe37e9bb17..08f102e157 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,10 +22,13 @@ As is traditional with a beta release, we do **not** recommend users install 3.0 * [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706 * [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612 * [ENHANCEMENT] Scrape: Only parse created timestamp if `created-timestamp-zero-ingestion` feature flag is enabled. This is as a lot of memory is used when parsing the created timestamp in the OM text format. #14815 +* [ENHANCEMENT] Scrape: Add support for logging scrape failures to a specified file. #14734 * [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379 * [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477 * [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 * [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls.#14816 +* [ENAHNCEMENT] Add support for multiple listening addresses. #14665 +* [ENAHNCEMENT] Add the ability to set custom HTTP headers. This was already present but was previously undocumented and buggy. #14817 * [BUGFIX] TSDB: Fix shard initialization after WAL repair. #14731 * [BUGFIX] UTF-8: Ensure correct validation when legacy mode turned on. #14736 * [BUGFIX] SD: Make discovery manager notify consumers of dropped targets for still defined jobs. #13147 From 63a842ef439e0bd103189cd3250146d714996b2b Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Tue, 10 Sep 2024 10:34:23 +0100 Subject: [PATCH 0525/1397] Fix npm deps Signed-off-by: Fiona Liao --- web/ui/package-lock.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 66b4889360..c554b51e23 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -24,7 +24,7 @@ }, "mantine-ui": { "name": "@prometheus-io/mantine-ui", - "version": "0.54.1", + "version": "0.300.0-beta.0", "dependencies": { "@codemirror/autocomplete": "^6.18.0", "@codemirror/language": "^6.10.2", @@ -41,7 +41,7 @@ "@mantine/notifications": "^7.11.2", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "^0.54.1", + "@prometheus-io/codemirror-promql": "0.300.0-beta.0", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^2.47.0", "@tanstack/react-query": "^5.22.2", @@ -159,10 +159,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.54.1", + "version": "0.300.0-beta.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.54.1", + "@prometheus-io/lezer-promql": "0.300.0-beta.0", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -192,7 +192,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.54.1", + "version": "0.300.0-beta.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.1", From e1c2c51412f8e3fb642322dd813e68f9456b78e2 Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Tue, 10 Sep 2024 10:38:01 +0100 Subject: [PATCH 0526/1397] Try manually updating react-app deps Signed-off-by: Fiona Liao --- web/ui/react-app/package-lock.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index d456ca1f09..0fb4e10fc1 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -1,12 +1,12 @@ { "name": "@prometheus-io/app", - "version": "0.54.1", + "version": "0.300.0-beta.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@prometheus-io/app", - "version": "0.54.1", + "version": "0.300.0-beta.0", "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", @@ -24,7 +24,7 @@ "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.54.1", + "@prometheus-io/codemirror-promql": "0.300.0-beta.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", @@ -4341,12 +4341,12 @@ } }, "node_modules/@prometheus-io/codemirror-promql": { - "version": "0.54.1", - "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.54.1.tgz", + "version": "0.300.0-beta.0", + "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.300.0-beta.0.tgz", "integrity": "sha512-CkU5d+Nhbj+VjTYSlicIcFeL3KUYyEco/VHK+qM4TXgPQJxP04MCi642UVgLeuy9exThkCObj5oDJcApSNmxBw==", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.54.1", + "@prometheus-io/lezer-promql": "0.300.0-beta.0", "lru-cache": "^7.18.3" }, "engines": { @@ -4362,8 +4362,8 @@ } }, "node_modules/@prometheus-io/lezer-promql": { - "version": "0.54.1", - "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.54.1.tgz", + "version": "0.300.0-beta.0", + "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.300.0-beta.0.tgz", "integrity": "sha512-+QdeoN/PttM1iBeRtwSQWoaDIwnIgT9oIueTbAlvL01WM2eluD8j9vNiD0oJFzbcZ5clxwhvMP54InIt3vJaMg==", "license": "Apache-2.0", "peerDependencies": { From d33cdb9fbe851978b07e65efa572cc4754923ade Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Tue, 10 Sep 2024 10:42:07 +0100 Subject: [PATCH 0527/1397] Revert "Try manually updating react-app deps" This reverts commit e1c2c51412f8e3fb642322dd813e68f9456b78e2. Signed-off-by: Fiona Liao --- web/ui/react-app/package-lock.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 0fb4e10fc1..d456ca1f09 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -1,12 +1,12 @@ { "name": "@prometheus-io/app", - "version": "0.300.0-beta.0", + "version": "0.54.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@prometheus-io/app", - "version": "0.300.0-beta.0", + "version": "0.54.1", "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", @@ -24,7 +24,7 @@ "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.300.0-beta.0", + "@prometheus-io/codemirror-promql": "0.54.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", @@ -4341,12 +4341,12 @@ } }, "node_modules/@prometheus-io/codemirror-promql": { - "version": "0.300.0-beta.0", - "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.300.0-beta.0.tgz", + "version": "0.54.1", + "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.54.1.tgz", "integrity": "sha512-CkU5d+Nhbj+VjTYSlicIcFeL3KUYyEco/VHK+qM4TXgPQJxP04MCi642UVgLeuy9exThkCObj5oDJcApSNmxBw==", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.300.0-beta.0", + "@prometheus-io/lezer-promql": "0.54.1", "lru-cache": "^7.18.3" }, "engines": { @@ -4362,8 +4362,8 @@ } }, "node_modules/@prometheus-io/lezer-promql": { - "version": "0.300.0-beta.0", - "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.300.0-beta.0.tgz", + "version": "0.54.1", + "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.54.1.tgz", "integrity": "sha512-+QdeoN/PttM1iBeRtwSQWoaDIwnIgT9oIueTbAlvL01WM2eluD8j9vNiD0oJFzbcZ5clxwhvMP54InIt3vJaMg==", "license": "Apache-2.0", "peerDependencies": { From 8aab6458b8ac1e519f9ecee32ec33294166b28c9 Mon Sep 17 00:00:00 2001 From: Julien Date: Tue, 10 Sep 2024 12:12:00 +0200 Subject: [PATCH 0528/1397] Mantine UI: Move /discovered-alertmanagers to /alertmanager-discovery Signed-off-by: Julien --- web/ui/mantine-ui/src/App.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index 9cba777029..789c4b97a0 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -142,7 +142,7 @@ const serverStatusPages = [ }, { title: "Alertmanager discovery", - path: "/discovered-alertmanagers", + path: "/alertmanager-discovery", icon: , element: , inAgentMode: false, From be6d443947ee65ae0c67222804495177ce90e2bc Mon Sep 17 00:00:00 2001 From: Julien Date: Tue, 10 Sep 2024 12:39:27 +0200 Subject: [PATCH 0529/1397] Mantine UI: Use actual lookback delta in explain Signed-off-by: Julien --- web/ui/mantine-ui/index.html | 8 +++++--- .../mantine-ui/src/pages/query/ExplainViews/Selector.tsx | 6 ++++-- web/ui/mantine-ui/src/state/settingsSlice.ts | 7 +++++++ web/web.go | 1 + 4 files changed, 17 insertions(+), 5 deletions(-) diff --git a/web/ui/mantine-ui/index.html b/web/ui/mantine-ui/index.html index deb5f7f56d..d2723488ac 100644 --- a/web/ui/mantine-ui/index.html +++ b/web/ui/mantine-ui/index.html @@ -7,19 +7,21 @@ -```yaml - metric_relabel_configs: - - source_labels: - - quantile - target_label: quantile - regex: (\d+)\.0+ - - source_labels: - - le - - __name__ - target_label: le - regex: (\d+)\.0+;.*_bucket -``` - ## Experimental PromQL functions `--enable-feature=promql-experimental-functions` diff --git a/model/textparse/interface.go b/model/textparse/interface.go index 3b0e9a96e1..1a8f3dc48f 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -23,8 +23,7 @@ import ( "github.com/prometheus/prometheus/model/labels" ) -// Parser parses samples from a byte slice of samples in the official -// Prometheus and OpenMetrics text exposition formats. +// Parser parses samples from a byte slice of samples in different exposition formats. type Parser interface { // Series returns the bytes of a series with a simple float64 as a // value, the timestamp if set, and the value of the current sample. @@ -58,6 +57,8 @@ type Parser interface { // Metric writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. + // The values of the "le" labels of classic histograms and "quantile" labels + // of summaries should follow the OpenMetrics formatting rules. Metric(l *labels.Labels) string // Exemplar writes the exemplar of the current sample into the passed diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index 13629e66db..8d3ad75c18 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "math" + "strconv" "strings" "unicode/utf8" @@ -210,7 +211,7 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string { label := unreplace(s[a:b]) c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start - value := unreplace(s[c:d]) + value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d])) p.builder.Add(label, value) } @@ -724,3 +725,15 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error } return val, nil } + +// normalizeFloatsInLabelValues ensures that values of the "le" labels of classic histograms and "quantile" labels +// of summaries follow OpenMetrics formatting rules. +func normalizeFloatsInLabelValues(t model.MetricType, l, v string) string { + if (t == model.MetricTypeSummary && l == model.QuantileLabel) || (t == model.MetricTypeHistogram && l == model.BucketLabel) { + f, err := strconv.ParseFloat(v, 64) + if err == nil { + return formatOpenMetricsFloat(f) + } + } + return v +} diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index 467a237718..9c3c679ab5 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -74,6 +74,7 @@ foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5 foo_created{a="b"} 1520872607.123 foo_total{le="c"} 21.0 foo_created{le="c"} 1520872621.123 +foo_total{le="1"} 10.0 # HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far # TYPE bar summary bar_count 17.0 @@ -97,6 +98,7 @@ something_count 18 something_sum 324789.4 something_created 1520430001 something_bucket{le="0.0"} 1 +something_bucket{le="1"} 2 something_bucket{le="+Inf"} 18 # HELP yum Summary with _created between sum and quantiles # TYPE yum summary @@ -130,7 +132,7 @@ foobar{quantile="0.99"} 150.1` }, { m: `go_gc_duration_seconds{quantile="0"}`, v: 4.9351e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"), + lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"), }, { m: `go_gc_duration_seconds{quantile="0.25"}`, v: 7.424100000000001e-05, @@ -302,6 +304,10 @@ foobar{quantile="0.99"} 150.1` v: 21.0, lset: labels.FromStrings("__name__", "foo_total", "le", "c"), ct: int64p(1520872621123), + }, { + m: `foo_total{le="1"}`, + v: 10.0, + lset: labels.FromStrings("__name__", "foo_total", "le", "1"), }, { m: "bar", help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", @@ -385,6 +391,11 @@ foobar{quantile="0.99"} 150.1` v: 1, lset: labels.FromStrings("__name__", "something_bucket", "le", "0.0"), ct: int64p(1520430001000), + }, { + m: `something_bucket{le="1"}`, + v: 2, + lset: labels.FromStrings("__name__", "something_bucket", "le", "1.0"), + ct: int64p(1520430001000), }, { m: `something_bucket{le="+Inf"}`, v: 18, @@ -492,7 +503,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) { }, { m: `{"go.gc_duration_seconds",quantile="0"}`, v: 4.9351e-05, - lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.0"), ct: int64p(1520872607123), }, { m: `{"go.gc_duration_seconds",quantile="0.25"}`, diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index 5759769279..0ab932c665 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -239,7 +239,8 @@ func (p *PromParser) Metric(l *labels.Labels) string { label := unreplace(s[a:b]) c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start - value := unreplace(s[c:d]) + value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d])) + p.builder.Add(label, value) } diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index b726d8847a..e8cf66f539 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -31,6 +31,13 @@ go_gc_duration_seconds{quantile="0.25",} 7.424100000000001e-05 go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05 go_gc_duration_seconds{quantile="0.8", a="b"} 8.3835e-05 go_gc_duration_seconds{ quantile="0.9", a="b"} 8.3835e-05 +# HELP prometheus_http_request_duration_seconds Histogram of latencies for HTTP requests. +# TYPE prometheus_http_request_duration_seconds histogram +prometheus_http_request_duration_seconds_bucket{handler="/",le="1"} 423 +prometheus_http_request_duration_seconds_bucket{handler="/",le="2"} 1423 +prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"} 1423 +prometheus_http_request_duration_seconds_sum{handler="/"} 2000 +prometheus_http_request_duration_seconds_count{handler="/"} 1423 # Hrandom comment starting with prefix of HELP # wind_speed{A="2",c="3"} 12345 @@ -50,7 +57,8 @@ some:aggregate:rate5m{a_b="c"} 1 go_goroutines 33 123123 _metric_starting_with_underscore 1 testmetric{_label_starting_with_underscore="foo"} 1 -testmetric{label="\"bar\""} 1` +testmetric{label="\"bar\""} 1 +testmetric{le="10"} 1` input += "\n# HELP metric foo\x00bar" input += "\nnull_byte_metric{a=\"abc\x00\"} 1" @@ -64,7 +72,7 @@ testmetric{label="\"bar\""} 1` }, { m: `go_gc_duration_seconds{quantile="0"}`, v: 4.9351e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"), + lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"), }, { m: `go_gc_duration_seconds{quantile="0.25",}`, v: 7.424100000000001e-05, @@ -81,6 +89,32 @@ testmetric{label="\"bar\""} 1` m: `go_gc_duration_seconds{ quantile="0.9", a="b"}`, v: 8.3835e-05, lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"), + }, { + m: "prometheus_http_request_duration_seconds", + help: "Histogram of latencies for HTTP requests.", + }, { + m: "prometheus_http_request_duration_seconds", + typ: model.MetricTypeHistogram, + }, { + m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="1"}`, + v: 423, + lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "1.0"), + }, { + m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="2"}`, + v: 1423, + lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "2.0"), + }, { + m: `prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"}`, + v: 1423, + lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_bucket", "handler", "/", "le", "+Inf"), + }, { + m: `prometheus_http_request_duration_seconds_sum{handler="/"}`, + v: 2000, + lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_sum", "handler", "/"), + }, { + m: `prometheus_http_request_duration_seconds_count{handler="/"}`, + v: 1423, + lset: labels.FromStrings("__name__", "prometheus_http_request_duration_seconds_count", "handler", "/"), }, { comment: "# Hrandom comment starting with prefix of HELP", }, { @@ -151,6 +185,10 @@ testmetric{label="\"bar\""} 1` m: "testmetric{label=\"\\\"bar\\\"\"}", v: 1, lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), + }, { + m: `testmetric{le="10"}`, + v: 1, + lset: labels.FromStrings("__name__", "testmetric", "le", "10"), }, { m: "metric", help: "foo\x00bar", @@ -197,7 +235,7 @@ func TestUTF8PromParse(t *testing.T) { }, { m: `{"go.gc_duration_seconds",quantile="0"}`, v: 4.9351e-05, - lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), + lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.0"), }, { m: `{"go.gc_duration_seconds",quantile="0.25",}`, v: 7.424100000000001e-05, From cf128a04727cc232d8d96a1be8e089c11b7e3c88 Mon Sep 17 00:00:00 2001 From: machine424 Date: Wed, 7 Aug 2024 19:14:59 +0200 Subject: [PATCH 0790/1397] test(cmd/prometheus): speed up test execution by t.Parallel() when possible turn some loops into subtests to make use of t.Parallel() requires Go 1.22 to make use of https://go.dev/blog/loopvar-preview Signed-off-by: machine424 --- cmd/prometheus/main_test.go | 171 ++++++++++++++++++++----------- cmd/prometheus/main_unix_test.go | 1 + cmd/prometheus/query_log_test.go | 2 + 3 files changed, 116 insertions(+), 58 deletions(-) diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index d0c2846bec..4bd1c71b2d 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -125,6 +125,7 @@ func TestFailedStartupExitCode(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() fakeInputFile := "fake-input-file" expectedExitStatus := 2 @@ -211,83 +212,125 @@ func TestWALSegmentSizeBounds(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() - for size, expectedExitStatus := range map[string]int{"9MB": 1, "257MB": 1, "10": 2, "1GB": 1, "12MB": 0} { - prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) + for _, tc := range []struct { + size string + exitCode int + }{ + { + size: "9MB", + exitCode: 1, + }, + { + size: "257MB", + exitCode: 1, + }, + { + size: "10", + exitCode: 2, + }, + { + size: "1GB", + exitCode: 1, + }, + { + size: "12MB", + exitCode: 0, + }, + } { + t.Run(tc.size, func(t *testing.T) { + t.Parallel() + prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+tc.size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) - // Log stderr in case of failure. - stderr, err := prom.StderrPipe() - require.NoError(t, err) - go func() { - slurp, _ := io.ReadAll(stderr) - t.Log(string(slurp)) - }() + // Log stderr in case of failure. + stderr, err := prom.StderrPipe() + require.NoError(t, err) + go func() { + slurp, _ := io.ReadAll(stderr) + t.Log(string(slurp)) + }() - err = prom.Start() - require.NoError(t, err) + err = prom.Start() + require.NoError(t, err) - if expectedExitStatus == 0 { - done := make(chan error, 1) - go func() { done <- prom.Wait() }() - select { - case err := <-done: - require.Fail(t, "prometheus should be still running: %v", err) - case <-time.After(startupTime): - prom.Process.Kill() - <-done + if tc.exitCode == 0 { + done := make(chan error, 1) + go func() { done <- prom.Wait() }() + select { + case err := <-done: + require.Fail(t, "prometheus should be still running: %v", err) + case <-time.After(startupTime): + prom.Process.Kill() + <-done + } + return } - continue - } - err = prom.Wait() - require.Error(t, err) - var exitError *exec.ExitError - require.ErrorAs(t, err, &exitError) - status := exitError.Sys().(syscall.WaitStatus) - require.Equal(t, expectedExitStatus, status.ExitStatus()) + err = prom.Wait() + require.Error(t, err) + var exitError *exec.ExitError + require.ErrorAs(t, err, &exitError) + status := exitError.Sys().(syscall.WaitStatus) + require.Equal(t, tc.exitCode, status.ExitStatus()) + }) } } func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) { - t.Parallel() - if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() - for size, expectedExitStatus := range map[string]int{"512KB": 1, "1MB": 0} { - prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) + for _, tc := range []struct { + size string + exitCode int + }{ + { + size: "512KB", + exitCode: 1, + }, + { + size: "1MB", + exitCode: 0, + }, + } { + t.Run(tc.size, func(t *testing.T) { + t.Parallel() + prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+tc.size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data")) - // Log stderr in case of failure. - stderr, err := prom.StderrPipe() - require.NoError(t, err) - go func() { - slurp, _ := io.ReadAll(stderr) - t.Log(string(slurp)) - }() + // Log stderr in case of failure. + stderr, err := prom.StderrPipe() + require.NoError(t, err) + go func() { + slurp, _ := io.ReadAll(stderr) + t.Log(string(slurp)) + }() - err = prom.Start() - require.NoError(t, err) + err = prom.Start() + require.NoError(t, err) - if expectedExitStatus == 0 { - done := make(chan error, 1) - go func() { done <- prom.Wait() }() - select { - case err := <-done: - require.Fail(t, "prometheus should be still running: %v", err) - case <-time.After(startupTime): - prom.Process.Kill() - <-done + if tc.exitCode == 0 { + done := make(chan error, 1) + go func() { done <- prom.Wait() }() + select { + case err := <-done: + require.Fail(t, "prometheus should be still running: %v", err) + case <-time.After(startupTime): + prom.Process.Kill() + <-done + } + return } - continue - } - err = prom.Wait() - require.Error(t, err) - var exitError *exec.ExitError - require.ErrorAs(t, err, &exitError) - status := exitError.Sys().(syscall.WaitStatus) - require.Equal(t, expectedExitStatus, status.ExitStatus()) + err = prom.Wait() + require.Error(t, err) + var exitError *exec.ExitError + require.ErrorAs(t, err, &exitError) + status := exitError.Sys().(syscall.WaitStatus) + require.Equal(t, tc.exitCode, status.ExitStatus()) + }) } } @@ -353,6 +396,8 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames } func TestAgentSuccessfulStartup(t *testing.T) { + t.Parallel() + prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig) require.NoError(t, prom.Start()) @@ -371,6 +416,8 @@ func TestAgentSuccessfulStartup(t *testing.T) { } func TestAgentFailedStartupWithServerFlag(t *testing.T) { + t.Parallel() + prom := exec.Command(promPath, "-test.main", "--agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) output := bytes.Buffer{} @@ -398,6 +445,8 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) { } func TestAgentFailedStartupWithInvalidConfig(t *testing.T) { + t.Parallel() + prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) require.NoError(t, prom.Start()) @@ -419,6 +468,7 @@ func TestModeSpecificFlags(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() testcases := []struct { mode string @@ -433,6 +483,7 @@ func TestModeSpecificFlags(t *testing.T) { for _, tc := range testcases { t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) { + t.Parallel() args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"} if tc.mode == "agent" { @@ -484,6 +535,8 @@ func TestDocumentation(t *testing.T) { if runtime.GOOS == "windows" { t.SkipNow() } + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -508,6 +561,8 @@ func TestDocumentation(t *testing.T) { } func TestRwProtoMsgFlagParser(t *testing.T) { + t.Parallel() + defaultOpts := config.RemoteWriteProtoMsgs{ config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2, } diff --git a/cmd/prometheus/main_unix_test.go b/cmd/prometheus/main_unix_test.go index 2011fb123f..94eec27e79 100644 --- a/cmd/prometheus/main_unix_test.go +++ b/cmd/prometheus/main_unix_test.go @@ -34,6 +34,7 @@ func TestStartupInterrupt(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t)) diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index f05ad9df2a..25abf5e965 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -456,6 +456,7 @@ func TestQueryLog(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() cwd, err := os.Getwd() require.NoError(t, err) @@ -474,6 +475,7 @@ func TestQueryLog(t *testing.T) { } t.Run(p.String(), func(t *testing.T) { + t.Parallel() p.run(t) }) } From 482bb453c6f61c8f4ed4f05c9a1cbdc60cc1f3c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 21 Oct 2024 11:03:07 +0200 Subject: [PATCH 0791/1397] Followup to #15164 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update test cases Signed-off-by: György Krajcsovits --- model/textparse/nhcbparse_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go index a52e8637e8..7cff217126 100644 --- a/model/textparse/nhcbparse_test.go +++ b/model/textparse/nhcbparse_test.go @@ -131,7 +131,7 @@ foobar{quantile="0.99"} 150.1` }, { m: `go_gc_duration_seconds{quantile="0"}`, v: 4.9351e-05, - lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"), + lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.0"), }, { m: `go_gc_duration_seconds{quantile="0.25"}`, v: 7.424100000000001e-05, From 70742a64aa97762a062e3595e0c0cf50788bee6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 21 Oct 2024 11:03:47 +0200 Subject: [PATCH 0792/1397] Follow up #15178 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Renaming Signed-off-by: György Krajcsovits --- scrape/scrape_test.go | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index fef4d0b7fa..35d5f14ac9 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -3844,24 +3844,24 @@ metric: < for metricsTextName, metricsText := range metricsTexts { for name, tc := range map[string]struct { - scrapeClassicHistograms bool - convertClassicHistograms bool + alwaysScrapeClassicHistograms bool + convertClassicHistograms bool }{ "convert with scrape": { - scrapeClassicHistograms: true, - convertClassicHistograms: true, + alwaysScrapeClassicHistograms: true, + convertClassicHistograms: true, }, "convert without scrape": { - scrapeClassicHistograms: false, - convertClassicHistograms: true, + alwaysScrapeClassicHistograms: false, + convertClassicHistograms: true, }, "scrape without convert": { - scrapeClassicHistograms: true, - convertClassicHistograms: false, + alwaysScrapeClassicHistograms: true, + convertClassicHistograms: false, }, "neither scrape nor convert": { - scrapeClassicHistograms: false, - convertClassicHistograms: false, + alwaysScrapeClassicHistograms: false, + convertClassicHistograms: false, }, } { var expectedClassicHistCount, expectedNativeHistCount int @@ -3870,16 +3870,16 @@ metric: < expectedNativeHistCount = 1 expectCustomBuckets = false expectedClassicHistCount = 0 - if metricsText.hasClassic && tc.scrapeClassicHistograms { + if metricsText.hasClassic && tc.alwaysScrapeClassicHistograms { expectedClassicHistCount = 1 } } else if metricsText.hasClassic { switch { - case tc.scrapeClassicHistograms && tc.convertClassicHistograms: + case tc.alwaysScrapeClassicHistograms && tc.convertClassicHistograms: expectedClassicHistCount = 1 expectedNativeHistCount = 1 expectCustomBuckets = true - case !tc.scrapeClassicHistograms && tc.convertClassicHistograms: + case !tc.alwaysScrapeClassicHistograms && tc.convertClassicHistograms: expectedClassicHistCount = 0 expectedNativeHistCount = 1 expectCustomBuckets = true @@ -3894,13 +3894,13 @@ metric: < defer simpleStorage.Close() config := &config.ScrapeConfig{ - JobName: "test", - SampleLimit: 100, - Scheme: "http", - ScrapeInterval: model.Duration(100 * time.Millisecond), - ScrapeTimeout: model.Duration(100 * time.Millisecond), - ScrapeClassicHistograms: tc.scrapeClassicHistograms, - ConvertClassicHistograms: tc.convertClassicHistograms, + JobName: "test", + SampleLimit: 100, + Scheme: "http", + ScrapeInterval: model.Duration(100 * time.Millisecond), + ScrapeTimeout: model.Duration(100 * time.Millisecond), + AlwaysScrapeClassicHistograms: tc.alwaysScrapeClassicHistograms, + ConvertClassicHistograms: tc.convertClassicHistograms, } scrapeCount := 0 From a23aed5634169ad400947a7ff91955d6a64cb5b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 21 Oct 2024 11:10:50 +0200 Subject: [PATCH 0793/1397] More followup to #15164 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Scrape test for NHCB modified. Signed-off-by: György Krajcsovits --- scrape/scrape_test.go | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 35d5f14ac9..6187119bfd 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -3758,17 +3758,11 @@ metric: < }, } - checkBucketValues := func(expectedCount int, contentType string, series storage.SeriesSet) { + checkBucketValues := func(expectedCount int, series storage.SeriesSet) { labelName := "le" var expectedValues []string if expectedCount > 0 { - if contentType == "application/vnd.google.protobuf" { - // The expected "le" values have the trailing ".0". - expectedValues = []string{"0.005", "0.01", "0.025", "0.05", "0.1", "0.25", "0.5", "1.0", "2.5", "5.0", "10.0", "+Inf"} - } else { - // The expected "le" values do not have the trailing ".0". - expectedValues = []string{"0.005", "0.01", "0.025", "0.05", "0.1", "0.25", "0.5", "1", "2.5", "5", "10", "+Inf"} - } + expectedValues = []string{"0.005", "0.01", "0.025", "0.05", "0.1", "0.25", "0.5", "1.0", "2.5", "5.0", "10.0", "+Inf"} } foundLeValues := map[string]bool{} @@ -3984,7 +3978,7 @@ metric: < checkFloatSeries(series, expectedClassicHistCount, 10.) series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_histogram_%d_bucket", i))) - checkBucketValues(expectedClassicHistCount, metricsText.contentType, series) + checkBucketValues(expectedClassicHistCount, series) series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_histogram_%d", i))) From 4283ae73dcc3439d851c227dc770310f20f24e40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 21 Oct 2024 13:22:58 +0200 Subject: [PATCH 0794/1397] Rename convert_classic_histograms to convert_classic_histograms_to_nhcb MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On reviewer request. Signed-off-by: György Krajcsovits --- config/config.go | 2 +- scrape/manager.go | 2 +- scrape/scrape.go | 19 +++++++++++-------- scrape/scrape_test.go | 32 ++++++++++++++++---------------- 4 files changed, 29 insertions(+), 26 deletions(-) diff --git a/config/config.go b/config/config.go index 962a0f4a73..657c4fc759 100644 --- a/config/config.go +++ b/config/config.go @@ -656,7 +656,7 @@ type ScrapeConfig struct { // Whether to scrape a classic histogram, even if it is also exposed as a native histogram. AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"` // Whether to convert all scraped classic histograms into a native histogram with custom buckets. - ConvertClassicHistograms bool `yaml:"convert_classic_histograms,omitempty"` + ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` // File to which scrape failures are logged. ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The HTTP resource path on which to fetch metrics from targets. diff --git a/scrape/manager.go b/scrape/manager.go index 9791db0e8b..f3dad2a048 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -178,7 +178,7 @@ func (m *Manager) reload() { m.logger.Error("error reloading target set", "err", "invalid config id:"+setName) continue } - if scrapeConfig.ConvertClassicHistograms && m.opts.EnableCreatedTimestampZeroIngestion { + if scrapeConfig.ConvertClassicHistogramsToNHCB && m.opts.EnableCreatedTimestampZeroIngestion { // TODO(krajorama): fix https://github.com/prometheus/prometheus/issues/15137 m.logger.Error("error reloading target set", "err", "cannot convert classic histograms to native histograms with custom buckets and ingest created timestamp zero samples at the same time due to https://github.com/prometheus/prometheus/issues/15137") continue diff --git a/scrape/scrape.go b/scrape/scrape.go index 290855b3a3..c252d57f69 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -113,7 +113,7 @@ type scrapeLoopOptions struct { interval time.Duration timeout time.Duration alwaysScrapeClassicHist bool - convertClassicHistograms bool + convertClassicHistToNHCB bool validationScheme model.ValidationScheme fallbackScrapeProtocol string @@ -182,7 +182,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed opts.interval, opts.timeout, opts.alwaysScrapeClassicHist, - opts.convertClassicHistograms, + opts.convertClassicHistToNHCB, options.EnableNativeHistogramsIngestion, options.EnableCreatedTimestampZeroIngestion, options.ExtraMetrics, @@ -488,7 +488,7 @@ func (sp *scrapePool) sync(targets []*Target) { mrc = sp.config.MetricRelabelConfigs fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms - convertClassicHistograms = sp.config.ConvertClassicHistograms + convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCB ) validationScheme := model.UTF8Validation @@ -530,7 +530,7 @@ func (sp *scrapePool) sync(targets []*Target) { interval: interval, timeout: timeout, alwaysScrapeClassicHist: alwaysScrapeClassicHist, - convertClassicHistograms: convertClassicHistograms, + convertClassicHistToNHCB: convertClassicHistToNHCB, validationScheme: validationScheme, fallbackScrapeProtocol: fallbackScrapeProtocol, }) @@ -894,7 +894,7 @@ type scrapeLoop struct { interval time.Duration timeout time.Duration alwaysScrapeClassicHist bool - convertClassicHistograms bool + convertClassicHistToNHCB bool validationScheme model.ValidationScheme fallbackScrapeProtocol string @@ -1196,7 +1196,7 @@ func newScrapeLoop(ctx context.Context, interval time.Duration, timeout time.Duration, alwaysScrapeClassicHist bool, - convertClassicHistograms bool, + convertClassicHistToNHCB bool, enableNativeHistogramIngestion bool, enableCTZeroIngestion bool, reportExtraMetrics bool, @@ -1252,7 +1252,7 @@ func newScrapeLoop(ctx context.Context, interval: interval, timeout: timeout, alwaysScrapeClassicHist: alwaysScrapeClassicHist, - convertClassicHistograms: convertClassicHistograms, + convertClassicHistToNHCB: convertClassicHistToNHCB, enableNativeHistogramIngestion: enableNativeHistogramIngestion, enableCTZeroIngestion: enableCTZeroIngestion, reportExtraMetrics: reportExtraMetrics, @@ -1563,7 +1563,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ) return } - if sl.convertClassicHistograms { + if sl.convertClassicHistToNHCB { p = textparse.NewNHCBParser(p, sl.symbolTable, sl.alwaysScrapeClassicHist) } if err != nil { @@ -1751,6 +1751,9 @@ loop: } else { ref, err = app.AppendHistogram(ref, lset, t, nil, fh) } + if err != nil { + fmt.Printf("Error when appending histogram in scrape loop: %s\n", err) + } } else { ref, err = app.Append(ref, lset, t, val) } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 6187119bfd..9a70d74117 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -3478,7 +3478,7 @@ test_summary_count 199 } // Testing whether we can automatically convert scraped classic histograms into native histograms with custom buckets. -func TestConvertClassicHistograms(t *testing.T) { +func TestConvertClassicHistogramsToNHCB(t *testing.T) { genTestCounterText := func(name string, value int, withMetadata bool) string { if withMetadata { return fmt.Sprintf(` @@ -3839,23 +3839,23 @@ metric: < for metricsTextName, metricsText := range metricsTexts { for name, tc := range map[string]struct { alwaysScrapeClassicHistograms bool - convertClassicHistograms bool + convertClassicHistToNHCB bool }{ "convert with scrape": { alwaysScrapeClassicHistograms: true, - convertClassicHistograms: true, + convertClassicHistToNHCB: true, }, "convert without scrape": { alwaysScrapeClassicHistograms: false, - convertClassicHistograms: true, + convertClassicHistToNHCB: true, }, "scrape without convert": { alwaysScrapeClassicHistograms: true, - convertClassicHistograms: false, + convertClassicHistToNHCB: false, }, "neither scrape nor convert": { alwaysScrapeClassicHistograms: false, - convertClassicHistograms: false, + convertClassicHistToNHCB: false, }, } { var expectedClassicHistCount, expectedNativeHistCount int @@ -3869,15 +3869,15 @@ metric: < } } else if metricsText.hasClassic { switch { - case tc.alwaysScrapeClassicHistograms && tc.convertClassicHistograms: + case tc.alwaysScrapeClassicHistograms && tc.convertClassicHistToNHCB: expectedClassicHistCount = 1 expectedNativeHistCount = 1 expectCustomBuckets = true - case !tc.alwaysScrapeClassicHistograms && tc.convertClassicHistograms: + case !tc.alwaysScrapeClassicHistograms && tc.convertClassicHistToNHCB: expectedClassicHistCount = 0 expectedNativeHistCount = 1 expectCustomBuckets = true - case !tc.convertClassicHistograms: + case !tc.convertClassicHistToNHCB: expectedClassicHistCount = 1 expectedNativeHistCount = 0 } @@ -3888,13 +3888,13 @@ metric: < defer simpleStorage.Close() config := &config.ScrapeConfig{ - JobName: "test", - SampleLimit: 100, - Scheme: "http", - ScrapeInterval: model.Duration(100 * time.Millisecond), - ScrapeTimeout: model.Duration(100 * time.Millisecond), - AlwaysScrapeClassicHistograms: tc.alwaysScrapeClassicHistograms, - ConvertClassicHistograms: tc.convertClassicHistograms, + JobName: "test", + SampleLimit: 100, + Scheme: "http", + ScrapeInterval: model.Duration(100 * time.Millisecond), + ScrapeTimeout: model.Duration(100 * time.Millisecond), + AlwaysScrapeClassicHistograms: tc.alwaysScrapeClassicHistograms, + ConvertClassicHistogramsToNHCB: tc.convertClassicHistToNHCB, } scrapeCount := 0 From 5ee0980cd1d50881f9693a85c486f25bca2976da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 21 Oct 2024 13:35:33 +0200 Subject: [PATCH 0795/1397] Add unit test to show that current wrapper is sub-optimal MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit https://github.com/prometheus/prometheus/pull/14978#discussion_r1800755481 Signed-off-by: György Krajcsovits --- model/textparse/nhcbparse_test.go | 175 +++++++++++++++++++++++++++++- 1 file changed, 174 insertions(+), 1 deletion(-) diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go index 7cff217126..37fcccb9d5 100644 --- a/model/textparse/nhcbparse_test.go +++ b/model/textparse/nhcbparse_test.go @@ -14,13 +14,18 @@ package textparse import ( + "bytes" + "encoding/binary" "testing" - "github.com/prometheus/common/model" + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/require" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) func TestNHCBParserOnOMParser(t *testing.T) { @@ -513,3 +518,171 @@ something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000 got := testParse(t, p) requireEntries(t, exp, got) } + +// Verify that the NHCBParser does not parse the NHCB when the exponential is present. +func TestNHCBParserProtoBufParser_NoNHCBWhenExponential(t *testing.T) { + inputBuf := createTestProtoBufHistogram(t) + // Initialize the protobuf parser so that it returns classic histograms as + // well when there's both classic and exponential histograms. + p := NewProtobufParser(inputBuf.Bytes(), true, labels.NewSymbolTable()) + + // Initialize the NHCBParser so that it returns classic histograms as well + // when there's both classic and exponential histograms. + p = NewNHCBParser(p, labels.NewSymbolTable(), true) + + exp := []parsedEntry{ + { + m: "test_histogram", + help: "Test histogram with classic and exponential buckets.", + }, + { + m: "test_histogram", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram", + shs: &histogram.Histogram{ + Schema: 3, + Count: 175, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + ZeroCount: 2, + PositiveSpans: []histogram.Span{{Offset: -161, Length: 1}, {Offset: 8, Length: 3}}, + NegativeSpans: []histogram.Span{{Offset: -162, Length: 1}, {Offset: 23, Length: 4}}, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings("__name__", "test_histogram"), + t: int64p(1234568), + }, + { + m: "test_histogram_count", + v: 175, + lset: labels.FromStrings("__name__", "test_histogram_count"), + t: int64p(1234568), + }, + { + m: "test_histogram_sum", + v: 0.0008280461746287094, + lset: labels.FromStrings("__name__", "test_histogram_sum"), + t: int64p(1234568), + }, + { + m: "test_histogram_bucket\xffle\xff-0.0004899999999999998", + v: 2, + lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"), + t: int64p(1234568), + }, + { + m: "test_histogram_bucket\xffle\xff-0.0003899999999999998", + v: 4, + lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), + t: int64p(1234568), + }, + { + m: "test_histogram_bucket\xffle\xff-0.0002899999999999998", + v: 16, + lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), + t: int64p(1234568), + }, + { + m: "test_histogram_bucket\xffle\xff+Inf", + v: 175, + lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), + t: int64p(1234568), + }, + { + // TODO(krajorama): optimize: this should not be here. In case there's + // an exponential histogram we should not scrape the classic histogram. + // TSDB will throw this away with storage.errDuplicateSampleForTimestamp + // at Commit(), but it needs to be parsed here after the exponential + // histogram. + m: "test_histogram{}", + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 175, + Sum: 0.0008280461746287094, + PositiveSpans: []histogram.Span{{Length: 4}}, + PositiveBuckets: []int64{2, 0, 10, 147}, + CustomValues: []float64{-0.0004899999999999998, -0.0003899999999999998, -0.0002899999999999998}, + }, + lset: labels.FromStrings("__name__", "test_histogram"), + t: int64p(1234568), + }, + } + got := testParse(t, p) + requireEntries(t, exp, got) +} + +func createTestProtoBufHistogram(t *testing.T) *bytes.Buffer { + testMetricFamilies := []string{`name: "test_histogram" +help: "Test histogram with classic and exponential buckets." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +> +`} + + varintBuf := make([]byte, binary.MaxVarintLen32) + buf := &bytes.Buffer{} + + for _, tmf := range testMetricFamilies { + pb := &dto.MetricFamily{} + // From text to proto message. + require.NoError(t, proto.UnmarshalText(tmf, pb)) + // From proto message to binary protobuf. + protoBuf, err := proto.Marshal(pb) + require.NoError(t, err) + + // Write first length, then binary protobuf. + varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) + buf.Write(varintBuf[:varintLength]) + buf.Write(protoBuf) + } + + return buf +} From eaee6bacc7960e3f00fd057458228fce28dddd21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 21 Oct 2024 13:40:16 +0200 Subject: [PATCH 0796/1397] Fix failing benchmarks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- model/textparse/benchmark_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model/textparse/benchmark_test.go b/model/textparse/benchmark_test.go index 98aadb0ed4..bc9c2d1db1 100644 --- a/model/textparse/benchmark_test.go +++ b/model/textparse/benchmark_test.go @@ -40,7 +40,7 @@ var newTestParserFns = map[string]newParser{ "omtext": func(b []byte, st *labels.SymbolTable) Parser { return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) }, - "omtext_with_nhcb": func(b []byte, st *labels.SymbolTable) Parser { + "nhcb_over_omtext": func(b []byte, st *labels.SymbolTable) Parser { p := NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) return NewNHCBParser(p, st, false) }, From a6947e1e6da848c2b1f83d5144eb6ac5a8083e5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 21 Oct 2024 13:45:33 +0200 Subject: [PATCH 0797/1397] Remove omcounterdata.txt as redundant MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- model/textparse/benchmark_test.go | 2 -- model/textparse/testdata/omcounterdata.txt | 9 --------- 2 files changed, 11 deletions(-) delete mode 100644 model/textparse/testdata/omcounterdata.txt diff --git a/model/textparse/benchmark_test.go b/model/textparse/benchmark_test.go index bc9c2d1db1..f6d3a95590 100644 --- a/model/textparse/benchmark_test.go +++ b/model/textparse/benchmark_test.go @@ -86,8 +86,6 @@ func BenchmarkParse(b *testing.B) { // NHCB. {dataFile: "omhistogramdata.txt", parser: "omtext"}, // Measure OM parser baseline for histograms. {dataFile: "omhistogramdata.txt", parser: "nhcb_over_omtext"}, // Measure NHCB over OM parser. - {dataFile: "omcounterdata.txt", parser: "omtext"}, // Measure OM parser baseline for counters. - {dataFile: "omcounterdata.txt", parser: "nhcb_over_omtext"}, // Measure NHCB over OM parser. } { var buf []byte dataCase := bcase.dataFile diff --git a/model/textparse/testdata/omcounterdata.txt b/model/textparse/testdata/omcounterdata.txt deleted file mode 100644 index 15459c018e..0000000000 --- a/model/textparse/testdata/omcounterdata.txt +++ /dev/null @@ -1,9 +0,0 @@ -# HELP rpc_requests Total number of RPC requests received. -# TYPE rpc_requests counter -rpc_requests_total{service="exponential"} 22.0 -rpc_requests_created{service="exponential"} 1.726839813016893e+09 -rpc_requests_total{service="normal"} 15.0 -rpc_requests_created{service="normal"} 1.726839813016717e+09 -rpc_requests_total{service="uniform"} 11.0 -rpc_requests_created{service="uniform"} 1.7268398130168471e+09 -# EOF From 555bd6292a1be32aa546731c6913a7ff19fe8311 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 21 Oct 2024 13:48:21 +0200 Subject: [PATCH 0798/1397] Better docstring on test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- model/textparse/nhcbparse_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go index 37fcccb9d5..80d8466467 100644 --- a/model/textparse/nhcbparse_test.go +++ b/model/textparse/nhcbparse_test.go @@ -593,10 +593,10 @@ func TestNHCBParserProtoBufParser_NoNHCBWhenExponential(t *testing.T) { }, { // TODO(krajorama): optimize: this should not be here. In case there's - // an exponential histogram we should not scrape the classic histogram. - // TSDB will throw this away with storage.errDuplicateSampleForTimestamp - // at Commit(), but it needs to be parsed here after the exponential - // histogram. + // an exponential histogram we should not convert the classic histogram + // to NHCB. In the end TSDB will throw this away with + // storage.errDuplicateSampleForTimestamp error at Commit(), but it + // is better to avoid this conversion in the first place. m: "test_histogram{}", shs: &histogram.Histogram{ Schema: histogram.CustomBucketsSchema, From bee1eb77206f9973b0b9d2528b6c2b7f0506223f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 21 Oct 2024 14:02:32 +0200 Subject: [PATCH 0799/1397] goimports run MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- model/textparse/nhcbparse_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go index 80d8466467..80b65fd225 100644 --- a/model/textparse/nhcbparse_test.go +++ b/model/textparse/nhcbparse_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/require" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" From 25ef4d34839c7cca87fec09d4c616e1ada9dce78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 21 Oct 2024 15:40:48 +0200 Subject: [PATCH 0800/1397] benchmark, rename parser omtext_with_nhcb MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- model/textparse/benchmark_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/model/textparse/benchmark_test.go b/model/textparse/benchmark_test.go index f6d3a95590..bd0d5089ac 100644 --- a/model/textparse/benchmark_test.go +++ b/model/textparse/benchmark_test.go @@ -40,7 +40,7 @@ var newTestParserFns = map[string]newParser{ "omtext": func(b []byte, st *labels.SymbolTable) Parser { return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) }, - "nhcb_over_omtext": func(b []byte, st *labels.SymbolTable) Parser { + "omtext_with_nhcb": func(b []byte, st *labels.SymbolTable) Parser { p := NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) return NewNHCBParser(p, st, false) }, @@ -85,7 +85,7 @@ func BenchmarkParse(b *testing.B) { // NHCB. {dataFile: "omhistogramdata.txt", parser: "omtext"}, // Measure OM parser baseline for histograms. - {dataFile: "omhistogramdata.txt", parser: "nhcb_over_omtext"}, // Measure NHCB over OM parser. + {dataFile: "omhistogramdata.txt", parser: "omtext_with_nhcb"}, // Measure NHCB over OM parser. } { var buf []byte dataCase := bcase.dataFile From 877fd2a60e027732c54490049a3a849657d5e08c Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Mon, 21 Oct 2024 16:01:34 +0200 Subject: [PATCH 0801/1397] Update scrape/scrape.go Signed-off-by: George Krajcsovits --- scrape/scrape.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index c252d57f69..f5f02d245f 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1751,9 +1751,6 @@ loop: } else { ref, err = app.AppendHistogram(ref, lset, t, nil, fh) } - if err != nil { - fmt.Printf("Error when appending histogram in scrape loop: %s\n", err) - } } else { ref, err = app.Append(ref, lset, t, val) } From d2802c6facf16dee253135f839aff071836b1665 Mon Sep 17 00:00:00 2001 From: Yijie Qin Date: Mon, 21 Oct 2024 19:04:40 -0400 Subject: [PATCH 0802/1397] api: Add rule group pagination to list rules api (#14017) * Add paginated feature to list rules api Signed-off-by: Yijie Qin * Refactor to simplify code: * Reduce number of variables * Reduce type convesion Signed-off-by: Raphael Silva * Simplify paginated implementation * Remove maxAlerts parameter. * Reuse existing API responses by using omitempty in some fields Signed-off-by: Raphael Silva * Simplify pagination implementation * Eliminate the need to sort the rule groups. Signed-off-by: Raphael Silva * Fix linting error Signed-off-by: Raphael Silva * Add more unit tests Signed-off-by: Raphael Silva * Update pagination parameters to be consistent with existing parameters Signed-off-by: Raphael Silva * Rename max_rule_groups to max_groups Signed-off-by: Raphael Silva * Refactor to simplify code Signed-off-by: Raphael Silva * Refactor to simplify the calculation of next token Signed-off-by: Raphael Silva * Handle corner case in pagination request Signed-off-by: Raphael Silva * Handle corner cases for pagination of list rules Signed-off-by: Raphael Silva * Update documentation for list rules parameters Signed-off-by: Raphael Silva * Refactor comments Signed-off-by: Raphael Silva * Simplify pagination implementation * Eliminate need for extra structs to store pagination parameters Signed-off-by: Raphael Silva * Update docs/querying/api.md Co-authored-by: Julius Volz Signed-off-by: Raphael Philipe Mendes da Silva * Update web/api/v1/api.go Co-authored-by: Bartlomiej Plotka Signed-off-by: Raphael Philipe Mendes da Silva * Update comment describing the need for next token Signed-off-by: Raphael Silva --------- Signed-off-by: Yijie Qin Signed-off-by: Raphael Silva Signed-off-by: Raphael Philipe Mendes da Silva Co-authored-by: Raphael Silva Co-authored-by: Julius Volz Co-authored-by: Bartlomiej Plotka --- docs/querying/api.md | 2 + web/api/v1/api.go | 71 ++++++++++++- web/api/v1/api_test.go | 220 ++++++++++++++++++++++++++++++++++++++++- 3 files changed, 291 insertions(+), 2 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index 1095171b2f..6b7ae0524b 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -764,6 +764,8 @@ URL query parameters: - `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done. - `exclude_alerts=`: only return rules, do not return active alerts. - `match[]=`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional. +- `group_limit=`: The `group_limit` parameter allows you to specify a limit for the number of rule groups that is returned in a single response. If the total number of rule groups exceeds the specified `group_limit` value, the response will include a `groupNextToken` property. You can use the value of this `groupNextToken` property in subsequent requests in the `group_next_token` parameter to paginate over the remaining rule groups. The `groupNextToken` property will not be present in the final response, indicating that you have retrieved all the available rule groups. Please note that there are no guarantees regarding the consistency of the response if the rule groups are being modified during the pagination process. +- `group_next_token`: the pagination token that was returned in previous request when the `group_limit` property is set. The pagination token is used to iteratively paginate over a large number of rule groups. To use the `group_next_token` parameter, the `group_limit` parameter also need to be present. If a rule group that coincides with the next token is removed while you are paginating over the rule groups, a response with status code 400 will be returned. ```json $ curl http://localhost:9090/api/v1/rules diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 9fb01f5767..b37605f5d5 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -15,6 +15,8 @@ package v1 import ( "context" + "crypto/sha1" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -1371,7 +1373,8 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { // RuleDiscovery has info for all rules. type RuleDiscovery struct { - RuleGroups []*RuleGroup `json:"groups"` + RuleGroups []*RuleGroup `json:"groups"` + GroupNextToken string `json:"groupNextToken:omitempty"` } // RuleGroup has info for rules which are part of a group. @@ -1458,8 +1461,23 @@ func (api *API) rules(r *http.Request) apiFuncResult { return invalidParamError(err, "exclude_alerts") } + maxGroups, nextToken, parseErr := parseListRulesPaginationRequest(r) + if parseErr != nil { + return *parseErr + } + rgs := make([]*RuleGroup, 0, len(ruleGroups)) + + foundToken := false + for _, grp := range ruleGroups { + if maxGroups > 0 && nextToken != "" && !foundToken { + if nextToken != getRuleGroupNextToken(grp.File(), grp.Name()) { + continue + } + foundToken = true + } + if len(rgSet) > 0 { if _, ok := rgSet[grp.Name()]; !ok { continue @@ -1504,6 +1522,7 @@ func (api *API) rules(r *http.Request) apiFuncResult { if !excludeAlerts { activeAlerts = rulesAlertsToAPIAlerts(rule.ActiveAlerts()) } + enrichedRule = AlertingRule{ State: rule.State().String(), Name: rule.Name(), @@ -1519,6 +1538,7 @@ func (api *API) rules(r *http.Request) apiFuncResult { LastEvaluation: rule.GetEvaluationTimestamp(), Type: "alerting", } + case *rules.RecordingRule: if !returnRecording { break @@ -1545,9 +1565,20 @@ func (api *API) rules(r *http.Request) apiFuncResult { // If the rule group response has no rules, skip it - this means we filtered all the rules of this group. if len(apiRuleGroup.Rules) > 0 { + if maxGroups > 0 && len(rgs) == int(maxGroups) { + // We've reached the capacity of our page plus one. That means that for sure there will be at least one + // rule group in a subsequent request. Therefore a next token is required. + res.GroupNextToken = getRuleGroupNextToken(grp.File(), grp.Name()) + break + } rgs = append(rgs, apiRuleGroup) } } + + if maxGroups > 0 && nextToken != "" && !foundToken { + return invalidParamError(fmt.Errorf("invalid group_next_token '%v'. were rule groups changed?", nextToken), "group_next_token") + } + res.RuleGroups = rgs return apiFuncResult{res, nil, nil, nil} } @@ -1566,6 +1597,44 @@ func parseExcludeAlerts(r *http.Request) (bool, error) { return excludeAlerts, nil } +func parseListRulesPaginationRequest(r *http.Request) (int64, string, *apiFuncResult) { + var ( + parsedMaxGroups int64 = -1 + err error + ) + maxGroups := r.URL.Query().Get("group_limit") + nextToken := r.URL.Query().Get("group_next_token") + + if nextToken != "" && maxGroups == "" { + errResult := invalidParamError(fmt.Errorf("group_limit needs to be present in order to paginate over the groups"), "group_next_token") + return -1, "", &errResult + } + + if maxGroups != "" { + parsedMaxGroups, err = strconv.ParseInt(maxGroups, 10, 32) + if err != nil { + errResult := invalidParamError(fmt.Errorf("group_limit needs to be a valid number: %w", err), "group_limit") + return -1, "", &errResult + } + if parsedMaxGroups <= 0 { + errResult := invalidParamError(fmt.Errorf("group_limit needs to be greater than 0"), "group_limit") + return -1, "", &errResult + } + } + + if parsedMaxGroups > 0 { + return parsedMaxGroups, nextToken, nil + } + + return -1, "", nil +} + +func getRuleGroupNextToken(file, group string) string { + h := sha1.New() + h.Write([]byte(file + ";" + group)) + return hex.EncodeToString(h.Sum(nil)) +} + type prometheusConfig struct { YAML string `json:"yaml"` } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 7ac2fe5693..35ad4a9ad3 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -338,7 +338,15 @@ func (m *rulesRetrieverMock) CreateRuleGroups() { ShouldRestore: false, Opts: opts, }) - m.ruleGroups = []*rules.Group{group} + group2 := rules.NewGroup(rules.GroupOptions{ + Name: "grp2", + File: "/path/to/file", + Interval: time.Second, + Rules: []rules.Rule{r[0]}, + ShouldRestore: false, + Opts: opts, + }) + m.ruleGroups = []*rules.Group{group, group2} } func (m *rulesRetrieverMock) AlertingRules() []*rules.AlertingRule { @@ -2241,6 +2249,25 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, }, }, + { + Name: "grp2", + File: "/path/to/file", + Interval: 1, + Limit: 0, + Rules: []Rule{ + AlertingRule{ + State: "inactive", + Name: "test_metric3", + Query: "absent(test_metric3) != 1", + Duration: 1, + Labels: labels.Labels{}, + Annotations: labels.Labels{}, + Alerts: []*Alert{}, + Health: "ok", + Type: "alerting", + }, + }, + }, }, }, zeroFunc: rulesZeroFunc, @@ -2329,6 +2356,25 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, }, }, + { + Name: "grp2", + File: "/path/to/file", + Interval: 1, + Limit: 0, + Rules: []Rule{ + AlertingRule{ + State: "inactive", + Name: "test_metric3", + Query: "absent(test_metric3) != 1", + Duration: 1, + Labels: labels.Labels{}, + Annotations: labels.Labels{}, + Alerts: nil, + Health: "ok", + Type: "alerting", + }, + }, + }, }, }, zeroFunc: rulesZeroFunc, @@ -2410,6 +2456,25 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, }, }, + { + Name: "grp2", + File: "/path/to/file", + Interval: 1, + Limit: 0, + Rules: []Rule{ + AlertingRule{ + State: "inactive", + Name: "test_metric3", + Query: "absent(test_metric3) != 1", + Duration: 1, + Labels: labels.Labels{}, + Annotations: labels.Labels{}, + Alerts: []*Alert{}, + Health: "ok", + Type: "alerting", + }, + }, + }, }, }, zeroFunc: rulesZeroFunc, @@ -2681,6 +2746,159 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, zeroFunc: rulesZeroFunc, }, + { + endpoint: api.rules, + query: url.Values{ + "group_limit": []string{"1"}, + }, + response: &RuleDiscovery{ + GroupNextToken: getRuleGroupNextToken("/path/to/file", "grp2"), + RuleGroups: []*RuleGroup{ + { + Name: "grp", + File: "/path/to/file", + Interval: 1, + Limit: 0, + Rules: []Rule{ + AlertingRule{ + State: "inactive", + Name: "test_metric3", + Query: "absent(test_metric3) != 1", + Duration: 1, + Labels: labels.Labels{}, + Annotations: labels.Labels{}, + Alerts: []*Alert{}, + Health: "ok", + Type: "alerting", + }, + AlertingRule{ + State: "inactive", + Name: "test_metric4", + Query: "up == 1", + Duration: 1, + Labels: labels.Labels{}, + Annotations: labels.Labels{}, + Alerts: []*Alert{}, + Health: "ok", + Type: "alerting", + }, + AlertingRule{ + State: "pending", + Name: "test_metric5", + Query: "vector(1)", + Duration: 1, + Labels: labels.FromStrings("name", "tm5"), + Annotations: labels.Labels{}, + Alerts: []*Alert{ + { + Labels: labels.FromStrings("alertname", "test_metric5", "name", "tm5"), + Annotations: labels.Labels{}, + State: "pending", + Value: "1e+00", + }, + }, + Health: "ok", + Type: "alerting", + }, + AlertingRule{ + State: "inactive", + Name: "test_metric6", + Query: "up == 1", + Duration: 1, + Labels: labels.FromStrings("testlabel", "rule"), + Annotations: labels.Labels{}, + Alerts: []*Alert{}, + Health: "ok", + Type: "alerting", + }, + AlertingRule{ + State: "inactive", + Name: "test_metric7", + Query: "up == 1", + Duration: 1, + Labels: labels.FromStrings("templatedlabel", "{{ $externalURL }}"), + Annotations: labels.Labels{}, + Alerts: []*Alert{}, + Health: "ok", + Type: "alerting", + }, + RecordingRule{ + Name: "recording-rule-1", + Query: "vector(1)", + Labels: labels.Labels{}, + Health: "ok", + Type: "recording", + }, + RecordingRule{ + Name: "recording-rule-2", + Query: "vector(1)", + Labels: labels.FromStrings("testlabel", "rule"), + Health: "ok", + Type: "recording", + }, + }, + }, + }, + }, + zeroFunc: rulesZeroFunc, + }, + { + endpoint: api.rules, + query: url.Values{ + "group_limit": []string{"1"}, + "group_next_token": []string{getRuleGroupNextToken("/path/to/file", "grp2")}, + }, + response: &RuleDiscovery{ + RuleGroups: []*RuleGroup{ + { + Name: "grp2", + File: "/path/to/file", + Interval: 1, + Limit: 0, + Rules: []Rule{ + AlertingRule{ + State: "inactive", + Name: "test_metric3", + Query: "absent(test_metric3) != 1", + Duration: 1, + Labels: labels.Labels{}, + Annotations: labels.Labels{}, + Alerts: []*Alert{}, + Health: "ok", + Type: "alerting", + }, + }, + }, + }, + }, + zeroFunc: rulesZeroFunc, + }, + { // invalid pagination request + endpoint: api.rules, + query: url.Values{ + "group_next_token": []string{getRuleGroupNextToken("/path/to/file", "grp2")}, + }, + errType: errorBadData, + zeroFunc: rulesZeroFunc, + }, + { // invalid group_limit + endpoint: api.rules, + query: url.Values{ + "group_limit": []string{"0"}, + "group_next_token": []string{getRuleGroupNextToken("/path/to/file", "grp2")}, + }, + errType: errorBadData, + zeroFunc: rulesZeroFunc, + }, + { // Pagination token is invalid due to changes in the rule groups + endpoint: api.rules, + query: url.Values{ + "group_limit": []string{"1"}, + "group_next_token": []string{getRuleGroupNextToken("/removed/file", "notfound")}, + }, + errType: errorBadData, + zeroFunc: rulesZeroFunc, + }, { endpoint: api.queryExemplars, query: url.Values{ From bb27c6b8966efbf3213f7fad787e4efed3b1c53d Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 22 Oct 2024 09:31:02 +0100 Subject: [PATCH 0803/1397] Create release 2.55.0 Signed-off-by: Bryan Boreham --- CHANGELOG.md | 7 ++----- VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/package.json | 2 +- web/ui/react-app/package.json | 4 ++-- 7 files changed, 16 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a2b7bf9f8..dd5d4bd211 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,13 +2,9 @@ ## unreleased -## 2.55.0-rc.1 / 2024-10-16 +## 2.55.0 / 2024-10-22 * [FEATURE] PromQL: Add experimental `info` function. #14495 -* [BUGFIX] PromQL: make sort_by_label stable. #14985 - -## 2.55.0-rc.0 / 2024-09-20 - * [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727 * [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817 * [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815 @@ -31,6 +27,7 @@ * [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934 * [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948, #15120 * [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729 +* [BUGFIX] PromQL: make sort_by_label stable. #14985 * [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147 * [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622 * [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810 diff --git a/VERSION b/VERSION index 2e8119e64e..c2576f1624 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.55.0-rc.1 +2.55.0 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index c3ec6a343d..888a4c5e5c 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.55.0-rc.1", + "version": "0.55.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.55.0-rc.1", + "@prometheus-io/lezer-promql": "0.55.0", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index f9306adcb9..b234426dd6 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.55.0-rc.1", + "version": "0.55.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 381458f1ba..c62896bc3f 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.55.0-rc.1", + "version": "0.55.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.55.0-rc.1", + "version": "0.55.0", "workspaces": [ "react-app", "module/*" @@ -30,10 +30,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.55.0-rc.1", + "version": "0.55.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.55.0-rc.1", + "@prometheus-io/lezer-promql": "0.55.0", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -69,7 +69,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.55.0-rc.1", + "version": "0.55.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.1", @@ -19352,7 +19352,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.55.0-rc.1", + "version": "0.55.0", "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", @@ -19370,7 +19370,7 @@ "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.55.0-rc.1", + "@prometheus-io/codemirror-promql": "0.55.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", diff --git a/web/ui/package.json b/web/ui/package.json index 9371d795e0..135b793ba2 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -28,5 +28,5 @@ "ts-jest": "^29.2.2", "typescript": "^4.9.5" }, - "version": "0.55.0-rc.1" + "version": "0.55.0" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index d91adb7edc..79cb86b31f 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.55.0-rc.1", + "version": "0.55.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.17.0", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.55.0-rc.1", + "@prometheus-io/codemirror-promql": "0.55.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", From eb523a6b29469d2753f56e9a92611a19161d1bc2 Mon Sep 17 00:00:00 2001 From: machine424 Date: Wed, 25 Sep 2024 20:02:52 +0200 Subject: [PATCH 0804/1397] fix(storage/mergeQuerier): add a reproducer for data race that occurs when one of the queriers alters the passed matchers and propose a fix Signed-off-by: machine424 --- storage/merge.go | 8 ++++++++ tsdb/querier_test.go | 26 ++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/storage/merge.go b/storage/merge.go index 2424b26ab7..b6980fb2f9 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -153,13 +153,21 @@ func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints ) // Schedule all Selects for all queriers we know about. for _, querier := range q.queriers { + // copy the matchers as some queriers may alter the slice. + // See https://github.com/prometheus/prometheus/issues/14723 + // matchersCopy := make([]*labels.Matcher, len(matchers)) + // copy(matchersCopy, matchers) + wg.Add(1) go func(qr genericQuerier) { + // go func(qr genericQuerier, m []*labels.Matcher) { defer wg.Done() // We need to sort for NewMergeSeriesSet to work. + // seriesSetChan <- qr.Select(ctx, true, hints, m...) seriesSetChan <- qr.Select(ctx, true, hints, matchers...) }(querier) + // }(querier, matchersCopy) } go func() { wg.Wait() diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 77772937a7..c52d6fed9e 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -3787,3 +3787,29 @@ func (m mockReaderOfLabels) Series(storage.SeriesRef, *labels.ScratchBuilder, *[ func (m mockReaderOfLabels) Symbols() index.StringIter { panic("Series called") } + +// TestMergeQuerierConcurrentSelectMatchers reproduces the data race bug from +// https://github.com/prometheus/prometheus/issues/14723, when one of the queriers (blockQuerier in this case) +// alters the passed matchers. +func TestMergeQuerierConcurrentSelectMatchers(t *testing.T) { + block, err := OpenBlock(nil, createBlock(t, t.TempDir(), genSeries(1, 1, 0, 1)), nil) + require.NoError(t, err) + p, err := NewBlockQuerier(block, 0, 1) + require.NoError(t, err) + + // A secondary querier is required to enable concurrent select; a blockQuerier is used for simplicity. + s, err := NewBlockQuerier(block, 0, 1) + require.NoError(t, err) + + originalMatchers := []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchRegexp, "baz", ".*"), + labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), + } + matchers := append([]*labels.Matcher{}, originalMatchers...) + + mergedQuerier := storage.NewMergeQuerier([]storage.Querier{p}, []storage.Querier{s}, storage.ChainedSeriesMerge) + defer mergedQuerier.Close() + mergedQuerier.Select(context.Background(), false, nil, matchers...) + + require.Equal(t, originalMatchers, matchers) +} From cebcdce78a7412c8821e9b1e794f0c2b5e714043 Mon Sep 17 00:00:00 2001 From: machine424 Date: Fri, 27 Sep 2024 16:03:50 +0200 Subject: [PATCH 0805/1397] fix(storage/mergeQuerier): copy the matcjers slice before passing it to queriers as some of them may alter it. Signed-off-by: machine424 --- storage/merge.go | 13 +++++-------- tsdb/querier_test.go | 8 +++++++- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/storage/merge.go b/storage/merge.go index b6980fb2f9..a4d0934b16 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -155,19 +155,16 @@ func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints for _, querier := range q.queriers { // copy the matchers as some queriers may alter the slice. // See https://github.com/prometheus/prometheus/issues/14723 - // matchersCopy := make([]*labels.Matcher, len(matchers)) - // copy(matchersCopy, matchers) + matchersCopy := make([]*labels.Matcher, len(matchers)) + copy(matchersCopy, matchers) wg.Add(1) - go func(qr genericQuerier) { - // go func(qr genericQuerier, m []*labels.Matcher) { + go func(qr genericQuerier, m []*labels.Matcher) { defer wg.Done() // We need to sort for NewMergeSeriesSet to work. - // seriesSetChan <- qr.Select(ctx, true, hints, m...) - seriesSetChan <- qr.Select(ctx, true, hints, matchers...) - }(querier) - // }(querier, matchersCopy) + seriesSetChan <- qr.Select(ctx, true, hints, m...) + }(querier, matchersCopy) } go func() { wg.Wait() diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index c52d6fed9e..aca6c845b1 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -3794,6 +3794,9 @@ func (m mockReaderOfLabels) Symbols() index.StringIter { func TestMergeQuerierConcurrentSelectMatchers(t *testing.T) { block, err := OpenBlock(nil, createBlock(t, t.TempDir(), genSeries(1, 1, 0, 1)), nil) require.NoError(t, err) + defer func() { + require.NoError(t, block.Close()) + }() p, err := NewBlockQuerier(block, 0, 1) require.NoError(t, err) @@ -3808,7 +3811,10 @@ func TestMergeQuerierConcurrentSelectMatchers(t *testing.T) { matchers := append([]*labels.Matcher{}, originalMatchers...) mergedQuerier := storage.NewMergeQuerier([]storage.Querier{p}, []storage.Querier{s}, storage.ChainedSeriesMerge) - defer mergedQuerier.Close() + defer func() { + require.NoError(t, mergedQuerier.Close()) + }() + mergedQuerier.Select(context.Background(), false, nil, matchers...) require.Equal(t, originalMatchers, matchers) From 3afcda82befc41b2a7834069593c2a6dc24a6e2c Mon Sep 17 00:00:00 2001 From: alexgreenbank Date: Tue, 22 Oct 2024 14:19:01 +0100 Subject: [PATCH 0806/1397] docs: add keep_firing_for in alerting rules Signed-off-by: alexgreenbank --- docs/configuration/alerting_rules.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/configuration/alerting_rules.md b/docs/configuration/alerting_rules.md index 4d7c75e495..cd33dba8e3 100644 --- a/docs/configuration/alerting_rules.md +++ b/docs/configuration/alerting_rules.md @@ -27,6 +27,7 @@ groups: - alert: HighRequestLatency expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5 for: 10m + keep_firing_for: 5m labels: severity: page annotations: @@ -40,6 +41,13 @@ the alert continues to be active during each evaluation for 10 minutes before firing the alert. Elements that are active, but not firing yet, are in the pending state. Alerting rules without the `for` clause will become active on the first evaluation. +There is also an optional `keep_firing_for` clause that tells Prometheus to keep +this alert firing for the specified duration after the firing condition was last met. +This can be used to prevent situations such as flapping alerts, false resolutions +due to lack of data loss, etc. Alerting rules without the `keep_firing_for` clause +will deactivate on the first evaluation where the condition is not met (assuming +any optional `for` duration desribed above has been satisfied). + The `labels` clause allows specifying a set of additional labels to be attached to the alert. Any existing conflicting labels will be overwritten. The label values can be templated. From 1b4e7f74e6e14832090edf1cce9690bcfbc7b95c Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Tue, 22 Oct 2024 15:24:36 +0200 Subject: [PATCH 0807/1397] feat(tools): add debug printouts to rules unit testing (#15196) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * promtool: Add debug flag for rule tests This makes it print out the tsdb state (both input_series and rules that are run) at the end of a test, making reasoning about tests much easier. Signed-off-by: David Leadbeater * Reuse generated test name from junit testing Signed-off-by: György Krajcsovits --------- Signed-off-by: David Leadbeater Signed-off-by: György Krajcsovits Co-authored-by: David Leadbeater --- cmd/promtool/main.go | 2 ++ cmd/promtool/unittest.go | 47 +++++++++++++++++++++++++++++------ cmd/promtool/unittest_test.go | 6 ++--- docs/command-line/promtool.md | 1 + 4 files changed, 46 insertions(+), 10 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 26618855c7..49676ee5c4 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -217,6 +217,7 @@ func main() { "test-rule-file", "The unit test file.", ).Required().ExistingFiles() + testRulesDebug := testRulesCmd.Flag("debug", "Enable unit test debugging.").Default("false").Bool() testRulesDiff := testRulesCmd.Flag("diff", "[Experimental] Print colored differential output between expected & received output.").Default("false").Bool() defaultDBPath := "data/" @@ -392,6 +393,7 @@ func main() { }, *testRulesRun, *testRulesDiff, + *testRulesDebug, *testRulesFiles...), ) diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 667e748061..78dacdc569 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -46,11 +46,11 @@ import ( // RulesUnitTest does unit testing of rules based on the unit testing files provided. // More info about the file format can be found in the docs. -func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { - return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, files...) +func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug bool, files ...string) int { + return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, debug, files...) } -func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { +func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug bool, files ...string) int { failed := false junit := &junitxml.JUnitXML{} @@ -60,7 +60,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, } for _, f := range files { - if errs := ruleUnitTest(f, queryOpts, run, diffFlag, junit.Suite(f)); errs != nil { + if errs := ruleUnitTest(f, queryOpts, run, diffFlag, debug, junit.Suite(f)); errs != nil { fmt.Fprintln(os.Stderr, " FAILED:") for _, e := range errs { fmt.Fprintln(os.Stderr, e.Error()) @@ -82,7 +82,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, return successExitCode } -func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool, ts *junitxml.TestSuite) []error { +func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag, debug bool, ts *junitxml.TestSuite) []error { b, err := os.ReadFile(filename) if err != nil { ts.Abort(err) @@ -131,7 +131,7 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg if t.Interval == 0 { t.Interval = unitTestInp.EvaluationInterval } - ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...) + ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, unitTestInp.RuleFiles...) if ers != nil { for _, e := range ers { tc.Fail(e.Error()) @@ -198,7 +198,14 @@ type testGroup struct { } // test performs the unit tests. -func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) { +func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug bool, ruleFiles ...string) (outErr []error) { + if debug { + testStart := time.Now() + fmt.Printf("DEBUG: Starting test %s\n", testname) + defer func() { + fmt.Printf("DEBUG: Test %s finished, took %v\n", testname, time.Since(testStart)) + }() + } // Setup testing suite. suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts) if err != nil { @@ -482,6 +489,32 @@ Outer: } } + if debug { + ts := tg.maxEvalTime() + // Potentially a test can be specified at a time with fractional seconds, + // which PromQL cannot represent, so round up to the next whole second. + ts = (ts + time.Second).Truncate(time.Second) + expr := fmt.Sprintf(`{__name__=~".+"}[%v]`, ts) + q, err := suite.QueryEngine().NewInstantQuery(context.Background(), suite.Queryable(), nil, expr, mint.Add(ts)) + if err != nil { + fmt.Printf("DEBUG: Failed querying, expr: %q, err: %v\n", expr, err) + return errs + } + res := q.Exec(suite.Context()) + if res.Err != nil { + fmt.Printf("DEBUG: Failed query exec, expr: %q, err: %v\n", expr, res.Err) + return errs + } + switch v := res.Value.(type) { + case promql.Matrix: + fmt.Printf("DEBUG: Dump of all data (input_series and rules) at %v:\n", ts) + fmt.Println(v.String()) + default: + fmt.Printf("DEBUG: Got unexpected type %T\n", v) + return errs + } + } + if len(errs) > 0 { return errs } diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go index 9bbac28e9f..9b73dcdc1c 100644 --- a/cmd/promtool/unittest_test.go +++ b/cmd/promtool/unittest_test.go @@ -141,14 +141,14 @@ func TestRulesUnitTest(t *testing.T) { reuseCount[tt.want] += len(tt.args.files) } t.Run(tt.name, func(t *testing.T) { - if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want { + if got := RulesUnitTest(tt.queryOpts, nil, false, false, tt.args.files...); got != tt.want { t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want) } }) } t.Run("Junit xml output ", func(t *testing.T) { var buf bytes.Buffer - if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, reuseFiles...); got != 1 { + if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, false, reuseFiles...); got != 1 { t.Errorf("RulesUnitTestResults() = %v, want 1", got) } var test junitxml.JUnitXML @@ -230,7 +230,7 @@ func TestRulesUnitTestRun(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := RulesUnitTest(tt.queryOpts, tt.args.run, false, tt.args.files...) + got := RulesUnitTest(tt.queryOpts, tt.args.run, false, false, tt.args.files...) require.Equal(t, tt.want, got) }) } diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 996a996555..5e2a8f6bb1 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -462,6 +462,7 @@ Unit tests for rules. | Flag | Description | Default | | --- | --- | --- | | --run ... | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | | +| --debug | Enable unit test debugging. | `false` | | --diff | [Experimental] Print colored differential output between expected & received output. | `false` | From aa81210c8b90cff2c3e72d1e9a3128115889b276 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Tue, 22 Oct 2024 18:49:25 +0200 Subject: [PATCH 0808/1397] NHCB scrape: refactor state handling and speed up scrape test (#15193) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * NHCB: scrape use state field and not booleans From comment https://github.com/prometheus/prometheus/pull/14978#discussion_r1800898724 Also make compareLabels read only and move storeLabels to the first processed classic histogram series. Signed-off-by: György Krajcsovits * Speed up TestConvertClassicHistogramsToNHCB 3x Reduce the startup time and timeouts Signed-off-by: György Krajcsovits * lint fix Signed-off-by: György Krajcsovits --------- Signed-off-by: György Krajcsovits --- model/textparse/nhcbparse.go | 69 ++++++++++++++++++------------------ scrape/scrape_test.go | 6 ++-- 2 files changed, 37 insertions(+), 38 deletions(-) diff --git a/model/textparse/nhcbparse.go b/model/textparse/nhcbparse.go index 7c2db69906..22384f1ec8 100644 --- a/model/textparse/nhcbparse.go +++ b/model/textparse/nhcbparse.go @@ -28,6 +28,14 @@ import ( "github.com/prometheus/prometheus/util/convertnhcb" ) +type collectionState int + +const ( + stateStart collectionState = iota + stateCollecting + stateEmitting +) + // The NHCBParser wraps a Parser and converts classic histograms to native // histograms with custom buckets. // @@ -48,6 +56,9 @@ type NHCBParser struct { // Labels builder. builder labels.ScratchBuilder + // State of the parser. + state collectionState + // Caches the values from the underlying parser. // For Series and Histogram. bytes []byte @@ -64,9 +75,9 @@ type NHCBParser struct { // Caches the entry itself if we are inserting a converted NHCB // halfway through. - entry Entry - err error - justInsertedNHCB bool + entry Entry + err error + // Caches the values and metric for the inserted converted NHCB. bytesNHCB []byte hNHCB *histogram.Histogram @@ -77,11 +88,10 @@ type NHCBParser struct { // Collates values from the classic histogram series to build // the converted histogram later. - tempLsetNHCB labels.Labels - tempNHCB convertnhcb.TempHistogram - tempExemplars []exemplar.Exemplar - tempExemplarCount int - isCollationInProgress bool + tempLsetNHCB labels.Labels + tempNHCB convertnhcb.TempHistogram + tempExemplars []exemplar.Exemplar + tempExemplarCount int // Remembers the last base histogram metric name (assuming it's // a classic histogram) so we can tell if the next float series @@ -105,7 +115,7 @@ func (p *NHCBParser) Series() ([]byte, *int64, float64) { } func (p *NHCBParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { - if p.justInsertedNHCB { + if p.state == stateEmitting { return p.bytesNHCB, p.ts, p.hNHCB, p.fhNHCB } return p.bytes, p.ts, p.h, p.fh @@ -128,7 +138,7 @@ func (p *NHCBParser) Comment() []byte { } func (p *NHCBParser) Metric(l *labels.Labels) string { - if p.justInsertedNHCB { + if p.state == stateEmitting { *l = p.lsetNHCB return p.metricStringNHCB } @@ -137,7 +147,7 @@ func (p *NHCBParser) Metric(l *labels.Labels) string { } func (p *NHCBParser) Exemplar(ex *exemplar.Exemplar) bool { - if p.justInsertedNHCB { + if p.state == stateEmitting { if len(p.exemplars) == 0 { return false } @@ -153,8 +163,8 @@ func (p *NHCBParser) CreatedTimestamp() *int64 { } func (p *NHCBParser) Next() (Entry, error) { - if p.justInsertedNHCB { - p.justInsertedNHCB = false + if p.state == stateEmitting { + p.state = stateStart if p.entry == EntrySeries { isNHCB := p.handleClassicHistogramSeries(p.lset) if isNHCB && !p.keepClassicHistograms { @@ -202,34 +212,21 @@ func (p *NHCBParser) Next() (Entry, error) { } // Return true if labels have changed and we should emit the NHCB. -// Update the stored labels if the labels have changed. func (p *NHCBParser) compareLabels() bool { - // Collection not in progress. - if p.lastHistogramName == "" { - if p.typ == model.MetricTypeHistogram { - p.storeBaseLabels() - } + if p.state != stateCollecting { return false } if p.typ != model.MetricTypeHistogram { - // Different metric type, emit the NHCB. - p.lastHistogramName = "" + // Different metric type. return true } - if p.lastHistogramName != convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) { // Different metric name. - p.storeBaseLabels() return true } nextHash, _ := p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel) - if p.lastHistogramLabelsHash != nextHash { - // Different label values. - p.storeBaseLabels() - return true - } - - return false + // Different label values. + return p.lastHistogramLabelsHash != nextHash } // Save the label set of the classic histogram without suffix and bucket `le` label. @@ -275,7 +272,10 @@ func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool { } func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, suffix string, updateHist func(*convertnhcb.TempHistogram)) { - p.isCollationInProgress = true + if p.state != stateCollecting { + p.storeBaseLabels() + } + p.state = stateCollecting p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, suffix) p.storeExemplars() updateHist(&p.tempNHCB) @@ -308,9 +308,9 @@ func (p *NHCBParser) swapExemplars() { } // processNHCB converts the collated classic histogram series to NHCB and caches the info -// to be returned to callers. +// to be returned to callers. Retruns true if the conversion was successful. func (p *NHCBParser) processNHCB() bool { - if !p.isCollationInProgress { + if p.state != stateCollecting { return false } ub := make([]float64, 0, len(p.tempNHCB.BucketCounts)) @@ -338,7 +338,6 @@ func (p *NHCBParser) processNHCB() bool { p.lsetNHCB = p.tempLsetNHCB p.swapExemplars() p.tempNHCB = convertnhcb.NewTempHistogram() - p.isCollationInProgress = false - p.justInsertedNHCB = true + p.state = stateEmitting return true } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 9a70d74117..da964a2308 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -3891,8 +3891,8 @@ metric: < JobName: "test", SampleLimit: 100, Scheme: "http", - ScrapeInterval: model.Duration(100 * time.Millisecond), - ScrapeTimeout: model.Duration(100 * time.Millisecond), + ScrapeInterval: model.Duration(50 * time.Millisecond), + ScrapeTimeout: model.Duration(25 * time.Millisecond), AlwaysScrapeClassicHistograms: tc.alwaysScrapeClassicHistograms, ConvertClassicHistogramsToNHCB: tc.convertClassicHistToNHCB, } @@ -3931,7 +3931,7 @@ metric: < })) defer ts.Close() - sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{EnableNativeHistogramsIngestion: true}, newTestScrapeMetrics(t)) + sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond), EnableNativeHistogramsIngestion: true}, newTestScrapeMetrics(t)) require.NoError(t, err) defer sp.stop() From cccbe72514e4f5a86b490f9a45edecb401f091fe Mon Sep 17 00:00:00 2001 From: Vanshika <102902652+Vanshikav123@users.noreply.github.com> Date: Wed, 23 Oct 2024 21:04:28 +0530 Subject: [PATCH 0809/1397] TSDB: Fix some edge cases when OOO is enabled (#14710) Fix some edge cases when OOO is enabled Signed-off-by: Vanshikav123 Signed-off-by: Vanshika <102902652+Vanshikav123@users.noreply.github.com> Signed-off-by: Jesus Vazquez Co-authored-by: Jesus Vazquez --- CHANGELOG.md | 92 +++++++++++++- cmd/prometheus/main.go | 3 + rules/fixtures/rules1.yaml | 5 + rules/group.go | 4 + rules/manager_test.go | 47 ++++++++ scrape/helpers_test.go | 4 + scrape/scrape.go | 4 +- scrape/scrape_test.go | 173 ++++++++++++++++++++++++++- storage/fanout.go | 10 ++ storage/interface.go | 8 ++ storage/remote/write.go | 5 + storage/remote/write_handler_test.go | 4 + tsdb/agent/db.go | 5 + tsdb/head_append.go | 20 +++- util/teststorage/storage.go | 14 ++- 15 files changed, 388 insertions(+), 10 deletions(-) create mode 100644 rules/fixtures/rules1.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index f1321829e3..72d9f7a110 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,8 +3,9 @@ ## unreleased * [CHANGE] Scraping: Remove implicit fallback to the Prometheus text format in case of invalid/missing Content-Type and fail the scrape instead. Add ability to specify a `fallback_scrape_protocol` in the scrape config. #15136 -* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941 -* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941 +* [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710 +- [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941 +- [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941 ## 3.0.0-beta.1 / 2024-10-09 @@ -20,7 +21,6 @@ * [ENHANCEMENT] PromQL: Introduce exponential interpolation for native histograms. #14677 * [ENHANCEMENT] TSDB: Add support for ingestion of out-of-order native histogram samples. #14850, #14546 * [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers. #13909 -* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694 * [ENHANCEMENT] Kubernetes SD: Support sidecar containers in endpoint discovery. #14929 * [ENHANCEMENT] Consul SD: Support catalog filters. #11224 * [PERF] TSDB: Parallelize deletion of postings after head compaction. #14975 @@ -41,6 +41,10 @@ Release 3.0.0-beta.0 includes new features such as a brand new UI and UTF-8 supp As is traditional with a beta release, we do **not** recommend users install 3.0.0-beta on critical production systems, but we do want everyone to test it out and find bugs. +<<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> b10c3696c (Revert "updated changelog") * [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. #14872 * [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904 * [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365 @@ -52,6 +56,7 @@ As is traditional with a beta release, we do **not** recommend users install 3.0 * [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526 * [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643 * [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 +<<<<<<< HEAD * [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769 * [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029 @@ -85,6 +90,87 @@ As is traditional with a beta release, we do **not** recommend users install 3.0 * [BUGFIX] Remote-Write: Return 4xx not 5xx when timeseries has duplicate label. #14716 * [BUGFIX] Experimental Native Histograms: many fixes for incorrect results, panics, warnings. #14513, #14575, #14598, #14609, #14611, #14771, #14821 * [BUGFIX] TSDB: Only count unknown record types in `record_decode_failures_total` metric. #14042 +======= +- [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. #14872 +- [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904 +- [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365 +- [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365 +- [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705 +- [CHANGE] Console: Remove example files for the console feature. Users can continue using the console feature by supplying their own JavaScript and templates. #14807 +- [CHANGE] SD: Enable the new service discovery manager by default. This SD manager does not restart unchanged discoveries upon reloading. This makes reloads faster and reduces pressure on service discoveries' sources. The corresponding `new-service-discovery-manager` feature flag has been removed. #14770 +- [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747 +- [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526 +- [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643 +- [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 +- [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029 + +## 2.55.0-rc.0 / 2024-09-20 + +- [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727 +- [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769 +- [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817 +- [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815 +- [FEATURE] Scraping: `scrape_failure_log_file` option to log failures to a file. #14734 +- [FEATURE] OTLP receiver: Optional promotion of resource attributes to series labels. #14200 +- [FEATURE] Remote-Write: Support Google Cloud Monitoring authorization. #14346 +- [FEATURE] Promtool: `tsdb create-blocks` new option to add labels. #14403 +- [FEATURE] Promtool: `promtool test` adds `--junit` flag to format results. #14506 +- [ENHANCEMENT] OTLP receiver: Warn on exponential histograms with zero count and non-zero sum. #14706 +- [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612 +- [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379 +- [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450 +- [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477 +- [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655 +- [ENHANCEMENT] PromQL: Clarify error message logged when Go runtime panic occurs during query evaluation. #14621 +- [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413 +- [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls. #14816 +- [ENHANCEMENT] API: Support multiple listening addresses. #14665 +- [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934 +- [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948 +- [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729 +- [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147 +- [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622 +- [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810 +- [BUGFIX] Remote-Write: fix metadata sending for experimental Remote-Write V2. #14766 +- [BUGFIX] Remote-Write: Return 4xx not 5xx when timeseries has duplicate label. #14716 +- [BUGFIX] Experimental Native Histograms: many fixes for incorrect results, panics, warnings. #14513, #14575, #14598, #14609, #14611, #14771, #14821 +- [BUGFIX] TSDB: Only count unknown record types in `record_decode_failures_total` metric. #14042 +>>>>>>> 58173ab1e (updated changelog) +======= +* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029 + +## 2.55.0-rc.0 / 2024-09-20 + +* [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727 +* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769 +* [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817 +* [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815 +* [FEATURE] Scraping: `scrape_failure_log_file` option to log failures to a file. #14734 +* [FEATURE] OTLP receiver: Optional promotion of resource attributes to series labels. #14200 +* [FEATURE] Remote-Write: Support Google Cloud Monitoring authorization. #14346 +* [FEATURE] Promtool: `tsdb create-blocks` new option to add labels. #14403 +* [FEATURE] Promtool: `promtool test` adds `--junit` flag to format results. #14506 +* [ENHANCEMENT] OTLP receiver: Warn on exponential histograms with zero count and non-zero sum. #14706 +* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612 +* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379 +* [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450 +* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477 +* [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655 +* [ENHANCEMENT] PromQL: Clarify error message logged when Go runtime panic occurs during query evaluation. #14621 +* [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413 +* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls. #14816 +* [ENHANCEMENT] API: Support multiple listening addresses. #14665 +* [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934 +* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948 +* [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729 +* [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147 +* [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622 +* [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810 +* [BUGFIX] Remote-Write: fix metadata sending for experimental Remote-Write V2. #14766 +* [BUGFIX] Remote-Write: Return 4xx not 5xx when timeseries has duplicate label. #14716 +* [BUGFIX] Experimental Native Histograms: many fixes for incorrect results, panics, warnings. #14513, #14575, #14598, #14609, #14611, #14771, #14821 +* [BUGFIX] TSDB: Only count unknown record types in `record_decode_failures_total` metric. #14042 +>>>>>>> b10c3696c (Revert "updated changelog") ## 2.54.1 / 2024-08-27 diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 4a70d63bf8..045389770e 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -1639,6 +1639,9 @@ func (s *readyStorage) Appender(ctx context.Context) storage.Appender { type notReadyAppender struct{} +// SetOptions does nothing in this appender implementation. +func (n notReadyAppender) SetOptions(opts *storage.AppendOptions) {} + func (n notReadyAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } diff --git a/rules/fixtures/rules1.yaml b/rules/fixtures/rules1.yaml new file mode 100644 index 0000000000..76fbf71f3b --- /dev/null +++ b/rules/fixtures/rules1.yaml @@ -0,0 +1,5 @@ +groups: + - name: test_1 + rules: + - record: test_2 + expr: vector(2) diff --git a/rules/group.go b/rules/group.go index e9ef2be3ad..7dd046b57a 100644 --- a/rules/group.go +++ b/rules/group.go @@ -75,6 +75,7 @@ type Group struct { // concurrencyController controls the rules evaluation concurrency. concurrencyController RuleConcurrencyController + appOpts *storage.AppendOptions } // GroupEvalIterationFunc is used to implement and extend rule group @@ -145,6 +146,7 @@ func NewGroup(o GroupOptions) *Group { metrics: metrics, evalIterationFunc: evalIterationFunc, concurrencyController: concurrencyController, + appOpts: &storage.AppendOptions{DiscardOutOfOrder: true}, } } @@ -564,6 +566,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { if s.H != nil { _, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H) } else { + app.SetOptions(g.appOpts) _, err = app.Append(0, s.Metric, s.T, s.F) } @@ -660,6 +663,7 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) { return } app := g.opts.Appendable.Appender(ctx) + app.SetOptions(g.appOpts) queryOffset := g.QueryOffset() for _, s := range g.staleSeries { // Rule that produced series no longer configured, mark it stale. diff --git a/rules/manager_test.go b/rules/manager_test.go index 198d6bd077..6afac993d8 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -1195,6 +1195,53 @@ func countStaleNaN(t *testing.T, st storage.Storage) int { return c } +func TestRuleMovedBetweenGroups(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + storage := teststorage.New(t, 600000) + defer storage.Close() + opts := promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 10, + Timeout: 10 * time.Second, + } + engine := promql.NewEngine(opts) + ruleManager := NewManager(&ManagerOptions{ + Appendable: storage, + Queryable: storage, + QueryFunc: EngineQueryFunc(engine, storage), + Context: context.Background(), + Logger: promslog.NewNopLogger(), + }) + var stopped bool + ruleManager.start() + defer func() { + if !stopped { + ruleManager.Stop() + } + }() + + rule2 := "fixtures/rules2.yaml" + rule1 := "fixtures/rules1.yaml" + + // Load initial configuration of rules2 + require.NoError(t, ruleManager.Update(1*time.Second, []string{rule2}, labels.EmptyLabels(), "", nil)) + + // Wait for rule to be evaluated + time.Sleep(3 * time.Second) + + // Reload configuration of rules1 + require.NoError(t, ruleManager.Update(1*time.Second, []string{rule1}, labels.EmptyLabels(), "", nil)) + + // Wait for rule to be evaluated in new location and potential staleness marker + time.Sleep(3 * time.Second) + + require.Equal(t, 0, countStaleNaN(t, storage)) // Not expecting any stale markers. +} + func TestGroupHasAlertingRules(t *testing.T) { tests := []struct { group *Group diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go index 4f7918f79e..12a56d7071 100644 --- a/scrape/helpers_test.go +++ b/scrape/helpers_test.go @@ -43,6 +43,8 @@ func (a nopAppendable) Appender(_ context.Context) storage.Appender { type nopAppender struct{} +func (a nopAppender) SetOptions(opts *storage.AppendOptions) {} + func (a nopAppender) Append(storage.SeriesRef, labels.Labels, int64, float64) (storage.SeriesRef, error) { return 0, nil } @@ -114,6 +116,8 @@ type collectResultAppender struct { pendingMetadata []metadata.Metadata } +func (a *collectResultAppender) SetOptions(opts *storage.AppendOptions) {} + func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { a.mtx.Lock() defer a.mtx.Unlock() diff --git a/scrape/scrape.go b/scrape/scrape.go index f5f02d245f..7e270bb3a3 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1864,7 +1864,9 @@ loop: if err == nil { sl.cache.forEachStale(func(lset labels.Labels) bool { // Series no longer exposed, mark it stale. + app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true}) _, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN)) + app.SetOptions(nil) switch { case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): // Do not count these in logging, as this is expected if a target @@ -1970,7 +1972,7 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration tim func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err error) { ts := timestamp.FromTime(start) - + app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true}) stale := math.Float64frombits(value.StaleNaN) b := labels.NewBuilder(labels.EmptyLabels()) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index da964a2308..f75e1db89a 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -86,6 +86,97 @@ func TestNewScrapePool(t *testing.T) { require.NotNil(t, sp.newLoop, "newLoop function not initialized.") } +func TestStorageHandlesOutOfOrderTimestamps(t *testing.T) { + // Test with default OutOfOrderTimeWindow (0) + t.Run("Out-Of-Order Sample Disabled", func(t *testing.T) { + s := teststorage.New(t) + defer s.Close() + + runScrapeLoopTest(t, s, false) + }) + + // Test with specific OutOfOrderTimeWindow (600000) + t.Run("Out-Of-Order Sample Enabled", func(t *testing.T) { + s := teststorage.New(t, 600000) + defer s.Close() + + runScrapeLoopTest(t, s, true) + }) +} + +func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrder bool) { + // Create an appender for adding samples to the storage. + app := s.Appender(context.Background()) + capp := &collectResultAppender{next: app} + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0) + + // Current time for generating timestamps. + now := time.Now() + + // Calculate timestamps for the samples based on the current time. + now = now.Truncate(time.Minute) // round down the now timestamp to the nearest minute + timestampInorder1 := now + timestampOutOfOrder := now.Add(-5 * time.Minute) + timestampInorder2 := now.Add(5 * time.Minute) + + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "", timestampInorder1) + require.NoError(t, err) + + _, _, _, err = sl.append(slApp, []byte(`metric_a{a="1",b="1"} 2`), "", timestampOutOfOrder) + require.NoError(t, err) + + _, _, _, err = sl.append(slApp, []byte(`metric_a{a="1",b="1"} 3`), "", timestampInorder2) + require.NoError(t, err) + + require.NoError(t, slApp.Commit()) + + // Query the samples back from the storage. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano()) + require.NoError(t, err) + defer q.Close() + + // Use a matcher to filter the metric name. + series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "metric_a")) + + var results []floatSample + for series.Next() { + it := series.At().Iterator(nil) + for it.Next() == chunkenc.ValFloat { + t, v := it.At() + results = append(results, floatSample{ + metric: series.At().Labels(), + t: t, + f: v, + }) + } + require.NoError(t, it.Err()) + } + require.NoError(t, series.Err()) + + // Define the expected results + want := []floatSample{ + { + metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), + t: timestamp.FromTime(timestampInorder1), + f: 1, + }, + { + metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), + t: timestamp.FromTime(timestampInorder2), + f: 3, + }, + } + + if expectOutOfOrder { + require.NotEqual(t, want, results, "Expected results to include out-of-order sample:\n%s", results) + } else { + require.Equal(t, want, results, "Appended samples not as expected:\n%s", results) + } +} + func TestDroppedTargetsList(t *testing.T) { var ( app = &nopAppendable{} @@ -1157,6 +1248,87 @@ func BenchmarkScrapeLoopAppendOM(b *testing.B) { } } +func TestSetOptionsHandlingStaleness(t *testing.T) { + s := teststorage.New(t, 600000) + defer s.Close() + + signal := make(chan struct{}, 1) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Function to run the scrape loop + runScrapeLoop := func(ctx context.Context, t *testing.T, cue int, action func(*scrapeLoop)) { + var ( + scraper = &testScraper{} + app = func(ctx context.Context) storage.Appender { + return s.Appender(ctx) + } + ) + sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + numScrapes := 0 + scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + numScrapes++ + if numScrapes == cue { + action(sl) + } + w.Write([]byte(fmt.Sprintf("metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes))) + return nil + } + sl.run(nil) + } + go func() { + runScrapeLoop(ctx, t, 2, func(sl *scrapeLoop) { + go sl.stop() + // Wait a bit then start a new target. + time.Sleep(100 * time.Millisecond) + go func() { + runScrapeLoop(ctx, t, 4, func(_ *scrapeLoop) { + cancel() + }) + signal <- struct{}{} + }() + }) + }() + + select { + case <-signal: + case <-time.After(10 * time.Second): + t.Fatalf("Scrape wasn't stopped.") + } + + ctx1, cancel := context.WithCancel(context.Background()) + defer cancel() + + q, err := s.Querier(0, time.Now().UnixNano()) + + require.NoError(t, err) + defer q.Close() + + series := q.Select(ctx1, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "metric_a")) + + var results []floatSample + for series.Next() { + it := series.At().Iterator(nil) + for it.Next() == chunkenc.ValFloat { + t, v := it.At() + results = append(results, floatSample{ + metric: series.At().Labels(), + t: t, + f: v, + }) + } + require.NoError(t, it.Err()) + } + require.NoError(t, series.Err()) + var c int + for _, s := range results { + if value.IsStaleNaN(s.f) { + c++ + } + } + require.Equal(t, 0, c, "invalid count of staleness markers after stopping the engine") +} + func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { appender := &collectResultAppender{} var ( @@ -4032,7 +4204,6 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t * case <-time.After(5 * time.Second): t.Fatalf("Scrape wasn't stopped.") } - // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for // each scrape successful or not. require.Len(t, appender.resultFloats, 27, "Appended samples not as expected:\n%s", appender) diff --git a/storage/fanout.go b/storage/fanout.go index 6ff5178955..4d076788a7 100644 --- a/storage/fanout.go +++ b/storage/fanout.go @@ -147,6 +147,16 @@ type fanoutAppender struct { secondaries []Appender } +// SetOptions propagates the hints to both primary and secondary appenders. +func (f *fanoutAppender) SetOptions(opts *AppendOptions) { + if f.primary != nil { + f.primary.SetOptions(opts) + } + for _, appender := range f.secondaries { + appender.SetOptions(opts) + } +} + func (f *fanoutAppender) Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error) { ref, err := f.primary.Append(ref, l, t, v) if err != nil { diff --git a/storage/interface.go b/storage/interface.go index b7ef14ce96..56bb53dfe0 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -243,6 +243,10 @@ func (f QueryableFunc) Querier(mint, maxt int64) (Querier, error) { return f(mint, maxt) } +type AppendOptions struct { + DiscardOutOfOrder bool +} + // Appender provides batched appends against a storage. // It must be completed with a call to Commit or Rollback and must not be reused afterwards. // @@ -271,6 +275,10 @@ type Appender interface { // Appender has to be discarded after rollback. Rollback() error + // SetOptions configures the appender with specific append options such as + // discarding out-of-order samples even if out-of-order is enabled in the TSDB. + SetOptions(opts *AppendOptions) + ExemplarAppender HistogramAppender MetadataUpdater diff --git a/storage/remote/write.go b/storage/remote/write.go index 20e4ed10d1..00e4fa3a0c 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -278,6 +278,7 @@ func (rws *WriteStorage) Close() error { type timestampTracker struct { writeStorage *WriteStorage + appendOptions *storage.AppendOptions samples int64 exemplars int64 histograms int64 @@ -285,6 +286,10 @@ type timestampTracker struct { highestRecvTimestamp *maxTimestamp } +func (t *timestampTracker) SetOptions(opts *storage.AppendOptions) { + t.appendOptions = opts +} + // Append implements storage.Appender. func (t *timestampTracker) Append(_ storage.SeriesRef, _ labels.Labels, ts int64, _ float64) (storage.SeriesRef, error) { t.samples++ diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index d91949131b..580c7c143e 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -833,6 +833,10 @@ func (m *mockAppendable) Appender(_ context.Context) storage.Appender { return m } +func (m *mockAppendable) SetOptions(opts *storage.AppendOptions) { + panic("unimplemented") +} + func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { if m.appendSampleErr != nil { return 0, m.appendSampleErr diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index b2c40b2017..5de84c93af 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -763,6 +763,7 @@ func (db *DB) Close() error { type appender struct { *DB + hints *storage.AppendOptions pendingSeries []record.RefSeries pendingSamples []record.RefSample @@ -783,6 +784,10 @@ type appender struct { floatHistogramSeries []*memSeries } +func (a *appender) SetOptions(opts *storage.AppendOptions) { + a.hints = opts +} + func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { // series references and chunk references are identical for agent mode. headRef := chunks.HeadSeriesRef(ref) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index adfd5d4bf7..170e740448 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -40,6 +40,12 @@ type initAppender struct { var _ storage.GetRef = &initAppender{} +func (a *initAppender) SetOptions(opts *storage.AppendOptions) { + if a.app != nil { + a.app.SetOptions(opts) + } +} + func (a *initAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { if a.app != nil { return a.app.Append(ref, lset, t, v) @@ -326,6 +332,11 @@ type headAppender struct { appendID, cleanupAppendIDsBelow uint64 closed bool + hints *storage.AppendOptions +} + +func (a *headAppender) SetOptions(opts *storage.AppendOptions) { + a.hints = opts } func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { @@ -359,13 +370,18 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 } s.Lock() + + defer s.Unlock() // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // to skip that sample from the WAL and write only in the WBL. - _, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, a.oooTimeWindow) + isOOO, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, a.oooTimeWindow) if err == nil { + if isOOO && a.hints != nil && a.hints.DiscardOutOfOrder { + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Inc() + return 0, storage.ErrOutOfOrderSample + } s.pendingCommit = true } - s.Unlock() if delta > 0 { a.head.metrics.oooHistogram.Observe(float64(delta) / 1000) } diff --git a/util/teststorage/storage.go b/util/teststorage/storage.go index 7d1f9dda24..e15d591e0c 100644 --- a/util/teststorage/storage.go +++ b/util/teststorage/storage.go @@ -30,15 +30,15 @@ import ( // New returns a new TestStorage for testing purposes // that removes all associated files on closing. -func New(t testutil.T) *TestStorage { - stor, err := NewWithError() +func New(t testutil.T, outOfOrderTimeWindow ...int64) *TestStorage { + stor, err := NewWithError(outOfOrderTimeWindow...) require.NoError(t, err) return stor } // NewWithError returns a new TestStorage for user facing tests, which reports // errors directly. -func NewWithError() (*TestStorage, error) { +func NewWithError(outOfOrderTimeWindow ...int64) (*TestStorage, error) { dir, err := os.MkdirTemp("", "test_storage") if err != nil { return nil, fmt.Errorf("opening test directory: %w", err) @@ -51,6 +51,14 @@ func NewWithError() (*TestStorage, error) { opts.MaxBlockDuration = int64(24 * time.Hour / time.Millisecond) opts.RetentionDuration = 0 opts.EnableNativeHistograms = true + + // Set OutOfOrderTimeWindow if provided, otherwise use default (0) + if len(outOfOrderTimeWindow) > 0 { + opts.OutOfOrderTimeWindow = outOfOrderTimeWindow[0] + } else { + opts.OutOfOrderTimeWindow = 0 // Default value is zero + } + db, err := tsdb.Open(dir, nil, nil, opts, tsdb.NewDBStats()) if err != nil { return nil, fmt.Errorf("opening test storage: %w", err) From 2182b832711586f8d8a4c34f5820ea9265d818b6 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Thu, 24 Oct 2024 07:38:58 +0200 Subject: [PATCH 0810/1397] feat(nhcb): implement created timestamp handling (#15198) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Call through to the underlaying parser if we are not in a histogram and the entry is a series or exponential native histogram. Otherwise store and retrieve CT for NHCB. * fix(omparser): losing exemplars when CT is parsed Fixes: #15137 Ignore exemplars while peeking ahead during CT parsing. Simplify state reset with defer(). Signed-off-by: György Krajcsovits --- model/textparse/interface_test.go | 6 ++-- model/textparse/nhcbparse.go | 36 ++++++++++++++-------- model/textparse/nhcbparse_test.go | 46 ++++++++++++++++++----------- model/textparse/openmetricsparse.go | 44 +++++++++++++++++++-------- 4 files changed, 88 insertions(+), 44 deletions(-) diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go index 6136fbc915..72c8284f2d 100644 --- a/model/textparse/interface_test.go +++ b/model/textparse/interface_test.go @@ -239,13 +239,13 @@ func testParse(t *testing.T, p Parser) (ret []parsedEntry) { } p.Metric(&got.lset) - for e := (exemplar.Exemplar{}); p.Exemplar(&e); { - got.es = append(got.es, e) - } // Parser reuses int pointer. if ct := p.CreatedTimestamp(); ct != nil { got.ct = int64p(*ct) } + for e := (exemplar.Exemplar{}); p.Exemplar(&e); { + got.es = append(got.es, e) + } case EntryType: m, got.typ = p.Type() got.m = string(m) diff --git a/model/textparse/nhcbparse.go b/model/textparse/nhcbparse.go index 22384f1ec8..eab9fa7e6e 100644 --- a/model/textparse/nhcbparse.go +++ b/model/textparse/nhcbparse.go @@ -84,6 +84,7 @@ type NHCBParser struct { fhNHCB *histogram.FloatHistogram lsetNHCB labels.Labels exemplars []exemplar.Exemplar + ctNHCB *int64 metricStringNHCB string // Collates values from the classic histogram series to build @@ -92,6 +93,7 @@ type NHCBParser struct { tempNHCB convertnhcb.TempHistogram tempExemplars []exemplar.Exemplar tempExemplarCount int + tempCT *int64 // Remembers the last base histogram metric name (assuming it's // a classic histogram) so we can tell if the next float series @@ -159,6 +161,16 @@ func (p *NHCBParser) Exemplar(ex *exemplar.Exemplar) bool { } func (p *NHCBParser) CreatedTimestamp() *int64 { + switch p.state { + case stateStart: + if p.entry == EntrySeries || p.entry == EntryHistogram { + return p.parser.CreatedTimestamp() + } + case stateCollecting: + return p.parser.CreatedTimestamp() + case stateEmitting: + return p.ctNHCB + } return nil } @@ -174,22 +186,20 @@ func (p *NHCBParser) Next() (Entry, error) { } return p.entry, p.err } - et, err := p.parser.Next() - if err != nil { - if errors.Is(err, io.EOF) && p.processNHCB() { - p.entry = et - p.err = err + + p.entry, p.err = p.parser.Next() + if p.err != nil { + if errors.Is(p.err, io.EOF) && p.processNHCB() { return EntryHistogram, nil } - return EntryInvalid, err + return EntryInvalid, p.err } - switch et { + switch p.entry { case EntrySeries: p.bytes, p.ts, p.value = p.parser.Series() p.metricString = p.parser.Metric(&p.lset) // Check the label set to see if we can continue or need to emit the NHCB. if p.compareLabels() && p.processNHCB() { - p.entry = et return EntryHistogram, nil } isNHCB := p.handleClassicHistogramSeries(p.lset) @@ -197,7 +207,7 @@ func (p *NHCBParser) Next() (Entry, error) { // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. return p.Next() } - return et, err + return p.entry, p.err case EntryHistogram: p.bytes, p.ts, p.h, p.fh = p.parser.Histogram() p.metricString = p.parser.Metric(&p.lset) @@ -205,10 +215,9 @@ func (p *NHCBParser) Next() (Entry, error) { p.bName, p.typ = p.parser.Type() } if p.processNHCB() { - p.entry = et return EntryHistogram, nil } - return et, err + return p.entry, p.err } // Return true if labels have changed and we should emit the NHCB. @@ -274,8 +283,9 @@ func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool { func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, suffix string, updateHist func(*convertnhcb.TempHistogram)) { if p.state != stateCollecting { p.storeBaseLabels() + p.tempCT = p.parser.CreatedTimestamp() + p.state = stateCollecting } - p.state = stateCollecting p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, suffix) p.storeExemplars() updateHist(&p.tempNHCB) @@ -337,7 +347,9 @@ func (p *NHCBParser) processNHCB() bool { p.bytesNHCB = []byte(p.metricStringNHCB) p.lsetNHCB = p.tempLsetNHCB p.swapExemplars() + p.ctNHCB = p.tempCT p.tempNHCB = convertnhcb.NewTempHistogram() p.state = stateEmitting + p.tempCT = nil return true } diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go index 80b65fd225..1ead2e30e2 100644 --- a/model/textparse/nhcbparse_test.go +++ b/model/textparse/nhcbparse_test.go @@ -292,14 +292,14 @@ foobar{quantile="0.99"} 150.1` lset: labels.FromStrings("__name__", "foo_total"), t: int64p(1520879607789), es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "counter-test"), Value: 5}}, - // TODO(krajorama): ct: int64p(1520872607123), + ct: int64p(1520872607123), }, { m: `foo_total{a="b"}`, v: 17.0, lset: labels.FromStrings("__name__", "foo_total", "a", "b"), t: int64p(1520879607789), es: []exemplar.Exemplar{{Labels: labels.FromStrings("id", "counter-test"), Value: 5}}, - // TODO(krajorama): ct: int64p(1520872607123), + ct: int64p(1520872607123), }, { m: "bar", help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", @@ -310,22 +310,22 @@ foobar{quantile="0.99"} 150.1` m: "bar_count", v: 17.0, lset: labels.FromStrings("__name__", "bar_count"), - // TODO(krajorama): ct: int64p(1520872608124), + ct: int64p(1520872608124), }, { m: "bar_sum", v: 324789.3, lset: labels.FromStrings("__name__", "bar_sum"), - // TODO(krajorama): ct: int64p(1520872608124), + ct: int64p(1520872608124), }, { m: `bar{quantile="0.95"}`, v: 123.7, lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"), - // TODO(krajorama): ct: int64p(1520872608124), + ct: int64p(1520872608124), }, { m: `bar{quantile="0.99"}`, v: 150.0, lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"), - // TODO(krajorama): ct: int64p(1520872608124), + ct: int64p(1520872608124), }, { m: "baz", help: "Histogram with the same objective as above's summary", @@ -343,7 +343,7 @@ foobar{quantile="0.99"} 150.1` CustomValues: []float64{0.0}, // We do not store the +Inf boundary. }, lset: labels.FromStrings("__name__", "baz"), - // TODO(krajorama): ct: int64p(1520872609125), + ct: int64p(1520872609125), }, { m: "fizz_created", help: "Gauge which shouldn't be parsed as CT", @@ -371,7 +371,7 @@ foobar{quantile="0.99"} 150.1` CustomValues: []float64{0.0}, // We do not store the +Inf boundary. }, lset: labels.FromStrings("__name__", "something"), - // TODO(krajorama): ct: int64p(1520430001000), + ct: int64p(1520430001000), }, { m: `something{a="b"}`, shs: &histogram.Histogram{ @@ -383,7 +383,7 @@ foobar{quantile="0.99"} 150.1` CustomValues: []float64{0.0}, // We do not store the +Inf boundary. }, lset: labels.FromStrings("__name__", "something", "a", "b"), - // TODO(krajorama): ct: int64p(1520430002000), + ct: int64p(1520430002000), }, { m: "yum", help: "Summary with _created between sum and quantiles", @@ -394,22 +394,22 @@ foobar{quantile="0.99"} 150.1` m: `yum_count`, v: 20, lset: labels.FromStrings("__name__", "yum_count"), - // TODO(krajorama): ct: int64p(1520430003000), + ct: int64p(1520430003000), }, { m: `yum_sum`, v: 324789.5, lset: labels.FromStrings("__name__", "yum_sum"), - // TODO(krajorama): ct: int64p(1520430003000), + ct: int64p(1520430003000), }, { m: `yum{quantile="0.95"}`, v: 123.7, lset: labels.FromStrings("__name__", "yum", "quantile", "0.95"), - // TODO(krajorama): ct: int64p(1520430003000), + ct: int64p(1520430003000), }, { m: `yum{quantile="0.99"}`, v: 150.0, lset: labels.FromStrings("__name__", "yum", "quantile", "0.99"), - // TODO(krajorama): ct: int64p(1520430003000), + ct: int64p(1520430003000), }, { m: "foobar", help: "Summary with _created as the first line", @@ -420,22 +420,22 @@ foobar{quantile="0.99"} 150.1` m: `foobar_count`, v: 21, lset: labels.FromStrings("__name__", "foobar_count"), - // TODO(krajorama): ct: int64p(1520430004000), + ct: int64p(1520430004000), }, { m: `foobar_sum`, v: 324789.6, lset: labels.FromStrings("__name__", "foobar_sum"), - // TODO(krajorama): ct: int64p(1520430004000), + ct: int64p(1520430004000), }, { m: `foobar{quantile="0.95"}`, v: 123.8, lset: labels.FromStrings("__name__", "foobar", "quantile", "0.95"), - // TODO(krajorama): ct: int64p(1520430004000), + ct: int64p(1520430004000), }, { m: `foobar{quantile="0.99"}`, v: 150.1, lset: labels.FromStrings("__name__", "foobar", "quantile", "0.99"), - // TODO(krajorama): ct: int64p(1520430004000), + ct: int64p(1520430004000), }, { m: "metric", help: "foo\x00bar", @@ -555,42 +555,49 @@ func TestNHCBParserProtoBufParser_NoNHCBWhenExponential(t *testing.T) { }, lset: labels.FromStrings("__name__", "test_histogram"), t: int64p(1234568), + ct: int64p(1000), }, { m: "test_histogram_count", v: 175, lset: labels.FromStrings("__name__", "test_histogram_count"), t: int64p(1234568), + ct: int64p(1000), }, { m: "test_histogram_sum", v: 0.0008280461746287094, lset: labels.FromStrings("__name__", "test_histogram_sum"), t: int64p(1234568), + ct: int64p(1000), }, { m: "test_histogram_bucket\xffle\xff-0.0004899999999999998", v: 2, lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"), t: int64p(1234568), + ct: int64p(1000), }, { m: "test_histogram_bucket\xffle\xff-0.0003899999999999998", v: 4, lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), t: int64p(1234568), + ct: int64p(1000), }, { m: "test_histogram_bucket\xffle\xff-0.0002899999999999998", v: 16, lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), t: int64p(1234568), + ct: int64p(1000), }, { m: "test_histogram_bucket\xffle\xff+Inf", v: 175, lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), t: int64p(1234568), + ct: int64p(1000), }, { // TODO(krajorama): optimize: this should not be here. In case there's @@ -609,6 +616,7 @@ func TestNHCBParserProtoBufParser_NoNHCBWhenExponential(t *testing.T) { }, lset: labels.FromStrings("__name__", "test_histogram"), t: int64p(1234568), + ct: int64p(1000), }, } got := testParse(t, p) @@ -621,6 +629,10 @@ help: "Test histogram with classic and exponential buckets." type: HISTOGRAM metric: < histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > sample_count: 175 sample_sum: 0.0008280461746287094 bucket: < diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index 70c24d9ec6..3ae9c7ddfc 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -102,6 +102,8 @@ type OpenMetricsParser struct { // Created timestamp parsing state. ct int64 ctHashSet uint64 + // ignoreExemplar instructs the parser to not overwrite exemplars (to keep them while peeking ahead). + ignoreExemplar bool // visitedMFName is the metric family name of the last visited metric when peeking ahead // for _created series during the execution of the CreatedTimestamp method. visitedMFName []byte @@ -296,6 +298,14 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 { p.skipCTSeries = false + p.ignoreExemplar = true + savedStart := p.start + defer func() { + p.ignoreExemplar = false + p.start = savedStart + p.l = resetLexer + }() + for { eType, err := p.Next() if err != nil { @@ -303,12 +313,12 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 { // This might result in partial scrape with wrong/missing CT, but only // spec improvement would help. // TODO: Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. - p.resetCTParseValues(resetLexer) + p.resetCTParseValues() return nil } if eType != EntrySeries { // Assume we hit different family, no CT line found. - p.resetCTParseValues(resetLexer) + p.resetCTParseValues() return nil } @@ -322,14 +332,14 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 { peekedHash := p.seriesHash(&buf, peekedName[:len(peekedName)-8]) if peekedHash != currHash { // Found CT line for a different series, for our series no CT. - p.resetCTParseValues(resetLexer) + p.resetCTParseValues() return nil } // All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds. // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps ct := int64(p.val * 1000.0) - p.setCTParseValues(ct, currHash, currName, true, resetLexer) + p.setCTParseValues(ct, currHash, currName, true) return &ct } } @@ -371,17 +381,15 @@ func (p *OpenMetricsParser) seriesHash(offsetsArr *[]byte, metricFamilyName []by // setCTParseValues sets the parser to the state after CreatedTimestamp method was called and CT was found. // This is useful to prevent re-parsing the same series again and early return the CT value. -func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, mfName []byte, skipCTSeries bool, resetLexer *openMetricsLexer) { +func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, mfName []byte, skipCTSeries bool) { p.ct = ct - p.l = resetLexer p.ctHashSet = ctHashSet p.visitedMFName = mfName p.skipCTSeries = skipCTSeries // Do we need to set it? } // resetCtParseValues resets the parser to the state before CreatedTimestamp method was called. -func (p *OpenMetricsParser) resetCTParseValues(resetLexer *openMetricsLexer) { - p.l = resetLexer +func (p *OpenMetricsParser) resetCTParseValues() { p.ctHashSet = 0 p.skipCTSeries = true } @@ -417,10 +425,12 @@ func (p *OpenMetricsParser) Next() (Entry, error) { p.start = p.l.i p.offsets = p.offsets[:0] - p.eOffsets = p.eOffsets[:0] - p.exemplar = p.exemplar[:0] - p.exemplarVal = 0 - p.hasExemplarTs = false + if !p.ignoreExemplar { + p.eOffsets = p.eOffsets[:0] + p.exemplar = p.exemplar[:0] + p.exemplarVal = 0 + p.hasExemplarTs = false + } switch t := p.nextToken(); t { case tEOFWord: @@ -545,6 +555,16 @@ func (p *OpenMetricsParser) Next() (Entry, error) { func (p *OpenMetricsParser) parseComment() error { var err error + + if p.ignoreExemplar { + for t := p.nextToken(); t != tLinebreak; t = p.nextToken() { + if t == tEOF { + return errors.New("data does not end with # EOF") + } + } + return nil + } + // Parse the labels. p.eOffsets, err = p.parseLVals(p.eOffsets, true) if err != nil { From 7ca90e5729d7602a95afa4537b72229a5cbaf674 Mon Sep 17 00:00:00 2001 From: Jonathan Ballet Date: Thu, 24 Oct 2024 08:53:36 +0200 Subject: [PATCH 0811/1397] doc: fix formatting Signed-off-by: Jonathan Ballet --- docs/querying/api.md | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index 6b7ae0524b..0352496f18 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -568,7 +568,7 @@ Instant vectors are returned as result type `vector`. The corresponding Each series could have the `"value"` key, or the `"histogram"` key, but not both. Series are not guaranteed to be returned in any particular order unless a function -such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label)` +such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label) is used. ### Scalars @@ -905,7 +905,7 @@ curl -G http://localhost:9091/api/v1/targets/metadata \ ``` The following example returns metadata for all metrics for all targets with -label `instance="127.0.0.1:9090`. +label `instance="127.0.0.1:9090"`. ```json curl -G http://localhost:9091/api/v1/targets/metadata \ @@ -1190,9 +1190,11 @@ The following endpoint returns various cardinality statistics about the Promethe GET /api/v1/status/tsdb ``` URL query parameters: + - `limit=`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned. -The `data` section of the query result consists of +The `data` section of the query result consists of: + - **headStats**: This provides the following data about the head block of the TSDB: - **numSeries**: The number of series. - **chunkCount**: The number of chunks. @@ -1268,13 +1270,13 @@ The following endpoint returns information about the WAL replay: GET /api/v1/status/walreplay ``` -**read**: The number of segments replayed so far. -**total**: The total number segments needed to be replayed. -**progress**: The progress of the replay (0 - 100%). -**state**: The state of the replay. Possible states: -- **waiting**: Waiting for the replay to start. -- **in progress**: The replay is in progress. -- **done**: The replay has finished. +- **read**: The number of segments replayed so far. +- **total**: The total number segments needed to be replayed. +- **progress**: The progress of the replay (0 - 100%). +- **state**: The state of the replay. Possible states: + - **waiting**: Waiting for the replay to start. + - **in progress**: The replay is in progress. + - **done**: The replay has finished. ```json $ curl http://localhost:9090/api/v1/status/walreplay From 469573b13b728a0d5a96b7dc55a205d06c712abf Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Thu, 24 Oct 2024 18:14:05 +0200 Subject: [PATCH 0812/1397] fix(nhcb): do not return nhcb from parse if exponential is present (#15209) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit From: https://github.com/prometheus/prometheus/pull/14978#discussion_r1800755481 Also encode the requirement table set in #13532 Signed-off-by: György Krajcsovits --- model/textparse/nhcbparse.go | 37 +++- model/textparse/nhcbparse_test.go | 353 ++++++++++++++++++++++-------- 2 files changed, 286 insertions(+), 104 deletions(-) diff --git a/model/textparse/nhcbparse.go b/model/textparse/nhcbparse.go index eab9fa7e6e..79f5c892ac 100644 --- a/model/textparse/nhcbparse.go +++ b/model/textparse/nhcbparse.go @@ -98,9 +98,11 @@ type NHCBParser struct { // Remembers the last base histogram metric name (assuming it's // a classic histogram) so we can tell if the next float series // is part of the same classic histogram. - lastHistogramName string - lastHistogramLabelsHash uint64 - hBuffer []byte + lastHistogramName string + lastHistogramLabelsHash uint64 + lastHistogramExponential bool + // Reused buffer for hashing labels. + hBuffer []byte } func NewNHCBParser(p Parser, st *labels.SymbolTable, keepClassicHistograms bool) Parser { @@ -199,10 +201,21 @@ func (p *NHCBParser) Next() (Entry, error) { p.bytes, p.ts, p.value = p.parser.Series() p.metricString = p.parser.Metric(&p.lset) // Check the label set to see if we can continue or need to emit the NHCB. - if p.compareLabels() && p.processNHCB() { - return EntryHistogram, nil + var isNHCB bool + if p.compareLabels() { + // Labels differ. Check if we can emit the NHCB. + if p.processNHCB() { + return EntryHistogram, nil + } + isNHCB = p.handleClassicHistogramSeries(p.lset) + } else { + // Labels are the same. Check if after an exponential histogram. + if p.lastHistogramExponential { + isNHCB = false + } else { + isNHCB = p.handleClassicHistogramSeries(p.lset) + } } - isNHCB := p.handleClassicHistogramSeries(p.lset) if isNHCB && !p.keepClassicHistograms { // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. return p.Next() @@ -211,6 +224,7 @@ func (p *NHCBParser) Next() (Entry, error) { case EntryHistogram: p.bytes, p.ts, p.h, p.fh = p.parser.Histogram() p.metricString = p.parser.Metric(&p.lset) + p.storeExponentialLabels() case EntryType: p.bName, p.typ = p.parser.Type() } @@ -239,9 +253,16 @@ func (p *NHCBParser) compareLabels() bool { } // Save the label set of the classic histogram without suffix and bucket `le` label. -func (p *NHCBParser) storeBaseLabels() { +func (p *NHCBParser) storeClassicLabels() { p.lastHistogramName = convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel) + p.lastHistogramExponential = false +} + +func (p *NHCBParser) storeExponentialLabels() { + p.lastHistogramName = p.lset.Get(labels.MetricName) + p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer) + p.lastHistogramExponential = true } // handleClassicHistogramSeries collates the classic histogram series to be converted to NHCB @@ -282,7 +303,7 @@ func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool { func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, suffix string, updateHist func(*convertnhcb.TempHistogram)) { if p.state != stateCollecting { - p.storeBaseLabels() + p.storeClassicLabels() p.tempCT = p.parser.CreatedTimestamp() p.state = stateCollecting } diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go index 1ead2e30e2..b97de0f7e6 100644 --- a/model/textparse/nhcbparse_test.go +++ b/model/textparse/nhcbparse_test.go @@ -16,6 +16,7 @@ package textparse import ( "bytes" "encoding/binary" + "strconv" "testing" "github.com/gogo/protobuf/proto" @@ -493,7 +494,6 @@ something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000 {Labels: labels.FromStrings("id", "something-test"), Value: 0.5}, {Labels: labels.FromStrings("id", "something-test"), Value: 8.0}, }, - // TODO(krajorama): ct: int64p(1520430001000), }, { m: `something{a="b"}`, shs: &histogram.Histogram{ @@ -509,7 +509,6 @@ something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000 {Labels: labels.FromStrings("id", "something-test"), Value: 0.0, HasTs: true, Ts: 123321}, {Labels: labels.FromStrings("id", "something-test"), Value: 2e100, HasTs: true, Ts: 123000}, }, - // TODO(krajorama): ct: int64p(1520430002000), }, } @@ -520,112 +519,208 @@ something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000 requireEntries(t, exp, got) } -// Verify that the NHCBParser does not parse the NHCB when the exponential is present. +// Verify the requirement tables from +// https://github.com/prometheus/prometheus/issues/13532 . +// "classic" means the option "always_scrape_classic_histograms". +// "nhcb" means the option "convert_classic_histograms_to_nhcb". +// +// Currently only with the ProtoBuf parser that supports exponential +// histograms. +// +// Case 1. Only classic histogram is exposed. +// +// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |. +// | classic=false, nhcb=false | YES | NO | NO |. +// | classic=true, nhcb=false | YES | NO | NO |. +// | classic=false, nhcb=true | NO | NO | YES |. +// | classic=true, nhcb=true | YES | NO | YES |. +// +// Case 2. Both classic and exponential histograms are exposed. +// +// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |. +// | classic=false, nhcb=false | NO | YES | NO |. +// | classic=true, nhcb=false | YES | YES | NO |. +// | classic=false, nhcb=true | NO | YES | NO |. +// | classic=true, nhcb=true | YES | YES | NO |. +// +// Case 3. Only exponential histogram is exposed. +// +// | Scrape Config | Expect classic | Expect exponential | Expect NHCB |. +// | classic=false, nhcb=false | NO | YES | NO |. +// | classic=true, nhcb=false | NO | YES | NO |. +// | classic=false, nhcb=true | NO | YES | NO |. +// | classic=true, nhcb=true | NO | YES | NO |. func TestNHCBParserProtoBufParser_NoNHCBWhenExponential(t *testing.T) { - inputBuf := createTestProtoBufHistogram(t) - // Initialize the protobuf parser so that it returns classic histograms as - // well when there's both classic and exponential histograms. - p := NewProtobufParser(inputBuf.Bytes(), true, labels.NewSymbolTable()) + type requirement struct { + expectClassic bool + expectExponential bool + expectNHCB bool + } - // Initialize the NHCBParser so that it returns classic histograms as well - // when there's both classic and exponential histograms. - p = NewNHCBParser(p, labels.NewSymbolTable(), true) - - exp := []parsedEntry{ + cases := []map[string]requirement{ + // Case 1. { - m: "test_histogram", - help: "Test histogram with classic and exponential buckets.", + "classic=false, nhcb=false": {expectClassic: true, expectExponential: false, expectNHCB: false}, + "classic=true, nhcb=false": {expectClassic: true, expectExponential: false, expectNHCB: false}, + "classic=false, nhcb=true": {expectClassic: false, expectExponential: false, expectNHCB: true}, + "classic=true, nhcb=true": {expectClassic: true, expectExponential: false, expectNHCB: true}, }, + // Case 2. { - m: "test_histogram", - typ: model.MetricTypeHistogram, + "classic=false, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=true, nhcb=false": {expectClassic: true, expectExponential: true, expectNHCB: false}, + "classic=false, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=true, nhcb=true": {expectClassic: true, expectExponential: true, expectNHCB: false}, }, + // Case 3. { - m: "test_histogram", - shs: &histogram.Histogram{ - Schema: 3, - Count: 175, - Sum: 0.0008280461746287094, - ZeroThreshold: 2.938735877055719e-39, - ZeroCount: 2, - PositiveSpans: []histogram.Span{{Offset: -161, Length: 1}, {Offset: 8, Length: 3}}, - NegativeSpans: []histogram.Span{{Offset: -162, Length: 1}, {Offset: 23, Length: 4}}, - PositiveBuckets: []int64{1, 2, -1, -1}, - NegativeBuckets: []int64{1, 3, -2, -1, 1}, - }, - lset: labels.FromStrings("__name__", "test_histogram"), - t: int64p(1234568), - ct: int64p(1000), - }, - { - m: "test_histogram_count", - v: 175, - lset: labels.FromStrings("__name__", "test_histogram_count"), - t: int64p(1234568), - ct: int64p(1000), - }, - { - m: "test_histogram_sum", - v: 0.0008280461746287094, - lset: labels.FromStrings("__name__", "test_histogram_sum"), - t: int64p(1234568), - ct: int64p(1000), - }, - { - m: "test_histogram_bucket\xffle\xff-0.0004899999999999998", - v: 2, - lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"), - t: int64p(1234568), - ct: int64p(1000), - }, - { - m: "test_histogram_bucket\xffle\xff-0.0003899999999999998", - v: 4, - lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), - t: int64p(1234568), - ct: int64p(1000), - }, - { - m: "test_histogram_bucket\xffle\xff-0.0002899999999999998", - v: 16, - lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), - t: int64p(1234568), - ct: int64p(1000), - }, - { - m: "test_histogram_bucket\xffle\xff+Inf", - v: 175, - lset: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), - t: int64p(1234568), - ct: int64p(1000), - }, - { - // TODO(krajorama): optimize: this should not be here. In case there's - // an exponential histogram we should not convert the classic histogram - // to NHCB. In the end TSDB will throw this away with - // storage.errDuplicateSampleForTimestamp error at Commit(), but it - // is better to avoid this conversion in the first place. - m: "test_histogram{}", - shs: &histogram.Histogram{ - Schema: histogram.CustomBucketsSchema, - Count: 175, - Sum: 0.0008280461746287094, - PositiveSpans: []histogram.Span{{Length: 4}}, - PositiveBuckets: []int64{2, 0, 10, 147}, - CustomValues: []float64{-0.0004899999999999998, -0.0003899999999999998, -0.0002899999999999998}, - }, - lset: labels.FromStrings("__name__", "test_histogram"), - t: int64p(1234568), - ct: int64p(1000), + "classic=false, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=true, nhcb=false": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=false, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false}, + "classic=true, nhcb=true": {expectClassic: false, expectExponential: true, expectNHCB: false}, }, } - got := testParse(t, p) - requireEntries(t, exp, got) + + type testCase struct { + name string + classic bool + nhcb bool + exp []parsedEntry + } + + testCases := []testCase{} + for _, classic := range []bool{false, true} { + for _, nhcb := range []bool{false, true} { + tc := testCase{ + name: "classic=" + strconv.FormatBool(classic) + ", nhcb=" + strconv.FormatBool(nhcb), + classic: classic, + nhcb: nhcb, + exp: []parsedEntry{}, + } + for i, caseI := range cases { + req := caseI[tc.name] + metric := "test_histogram" + strconv.Itoa(i+1) + tc.exp = append(tc.exp, parsedEntry{ + m: metric, + help: "Test histogram " + strconv.Itoa(i+1), + }) + tc.exp = append(tc.exp, parsedEntry{ + m: metric, + typ: model.MetricTypeHistogram, + }) + if req.expectExponential { + // Always expect exponential histogram first. + exponentialSeries := []parsedEntry{ + { + m: metric, + shs: &histogram.Histogram{ + Schema: 3, + Count: 175, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + ZeroCount: 2, + PositiveSpans: []histogram.Span{{Offset: -161, Length: 1}, {Offset: 8, Length: 3}}, + NegativeSpans: []histogram.Span{{Offset: -162, Length: 1}, {Offset: 23, Length: 4}}, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings("__name__", metric), + t: int64p(1234568), + ct: int64p(1000), + }, + } + tc.exp = append(tc.exp, exponentialSeries...) + } + if req.expectClassic { + // Always expect classic histogram series after exponential. + classicSeries := []parsedEntry{ + { + m: metric + "_count", + v: 175, + lset: labels.FromStrings("__name__", metric+"_count"), + t: int64p(1234568), + ct: int64p(1000), + }, + { + m: metric + "_sum", + v: 0.0008280461746287094, + lset: labels.FromStrings("__name__", metric+"_sum"), + t: int64p(1234568), + ct: int64p(1000), + }, + { + m: metric + "_bucket\xffle\xff-0.0004899999999999998", + v: 2, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0004899999999999998"), + t: int64p(1234568), + ct: int64p(1000), + }, + { + m: metric + "_bucket\xffle\xff-0.0003899999999999998", + v: 4, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0003899999999999998"), + t: int64p(1234568), + ct: int64p(1000), + }, + { + m: metric + "_bucket\xffle\xff-0.0002899999999999998", + v: 16, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0002899999999999998"), + t: int64p(1234568), + ct: int64p(1000), + }, + { + m: metric + "_bucket\xffle\xff+Inf", + v: 175, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "+Inf"), + t: int64p(1234568), + ct: int64p(1000), + }, + } + tc.exp = append(tc.exp, classicSeries...) + } + if req.expectNHCB { + // Always expect NHCB series after classic. + nhcbSeries := []parsedEntry{ + { + m: metric + "{}", + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 175, + Sum: 0.0008280461746287094, + PositiveSpans: []histogram.Span{{Length: 4}}, + PositiveBuckets: []int64{2, 0, 10, 147}, + CustomValues: []float64{-0.0004899999999999998, -0.0003899999999999998, -0.0002899999999999998}, + }, + lset: labels.FromStrings("__name__", metric), + t: int64p(1234568), + ct: int64p(1000), + }, + } + tc.exp = append(tc.exp, nhcbSeries...) + } + } + testCases = append(testCases, tc) + } + } + + inputBuf := createTestProtoBufHistogram(t) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + p := NewProtobufParser(inputBuf.Bytes(), tc.classic, labels.NewSymbolTable()) + if tc.nhcb { + p = NewNHCBParser(p, labels.NewSymbolTable(), tc.classic) + } + got := testParse(t, p) + requireEntries(t, tc.exp, got) + }) + } } func createTestProtoBufHistogram(t *testing.T) *bytes.Buffer { - testMetricFamilies := []string{`name: "test_histogram" -help: "Test histogram with classic and exponential buckets." + testMetricFamilies := []string{`name: "test_histogram1" +help: "Test histogram 1" type: HISTOGRAM metric: < histogram: < @@ -647,6 +742,72 @@ metric: < cumulative_count: 16 upper_bound: -0.0002899999999999998 > + > + timestamp_ms: 1234568 +>`, `name: "test_histogram2" +help: "Test histogram 2" +type: HISTOGRAM +metric: < + histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +>`, `name: "test_histogram3" +help: "Test histogram 3" +type: HISTOGRAM +metric: < + histogram: < + created_timestamp: < + seconds: 1 + nanos: 1 + > + sample_count: 175 + sample_sum: 0.0008280461746287094 schema: 3 zero_threshold: 2.938735877055719e-39 zero_count: 2 From 3cb09acb218189de660703a2823a24dc53f3a978 Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Thu, 24 Oct 2024 18:18:21 +0200 Subject: [PATCH 0813/1397] Docs: Remove experimental note on out of order feature (#15215) Signed-off-by: Jesus Vazquez --- docs/configuration/configuration.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 31ceac734f..104f7754f6 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -2940,8 +2940,6 @@ with this feature. `tsdb` lets you configure the runtime-reloadable configuration settings of the TSDB. -NOTE: Out-of-order ingestion is an experimental feature, but you do not need any additional flag to enable it. Setting `out_of_order_time_window` to a positive duration enables it. - ```yaml # Configures how old an out-of-order/out-of-bounds sample can be w.r.t. the TSDB max time. # An out-of-order/out-of-bounds sample is ingested into the TSDB as long as the timestamp From 99882eec3ba32d45178fa4651be5f68048bae8e4 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Thu, 24 Oct 2024 09:27:15 -0700 Subject: [PATCH 0814/1397] log last series labelset when hitting OOO series labels during compaction Signed-off-by: Ben Ye --- tsdb/index/index.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 3cd00729ab..8c0f698eae 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -438,7 +438,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... return err } if labels.Compare(lset, w.lastSeries) <= 0 { - return fmt.Errorf("out-of-order series added with label set %q", lset) + return fmt.Errorf("out-of-order series added with label set %q, last label set %q", lset, w.lastSeries) } if ref < w.lastSeriesRef && !w.lastSeries.IsEmpty() { From 20fdc8f541274aa117dafe974c2118c07f05d8a6 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 24 Oct 2024 14:07:54 +0100 Subject: [PATCH 0815/1397] [CHANGE] Remote-write: default enable_http2 to false Remote-write creates several shards to parallelise sending, each with its own http connection. We do not want them all combined onto one socket by http2. Signed-off-by: Bryan Boreham --- CHANGELOG.md | 1 + config/config.go | 7 ++++++- docs/configuration/configuration.md | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 72d9f7a110..084b88d6f3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## unreleased * [CHANGE] Scraping: Remove implicit fallback to the Prometheus text format in case of invalid/missing Content-Type and fail the scrape instead. Add ability to specify a `fallback_scrape_protocol` in the scrape config. #15136 +* [CHANGE] Remote-write: default enable_http2 to false. * [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710 - [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941 - [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941 diff --git a/config/config.go b/config/config.go index 657c4fc759..30a74e0402 100644 --- a/config/config.go +++ b/config/config.go @@ -181,13 +181,18 @@ var ( HTTPClientConfig: config.DefaultHTTPClientConfig, } + DefaultRemoteWriteHTTPClientConfig = config.HTTPClientConfig{ + FollowRedirects: true, + EnableHTTP2: false, + } + // DefaultRemoteWriteConfig is the default remote write configuration. DefaultRemoteWriteConfig = RemoteWriteConfig{ RemoteTimeout: model.Duration(30 * time.Second), ProtobufMessage: RemoteWriteProtoMsgV1, QueueConfig: DefaultQueueConfig, MetadataConfig: DefaultMetadataConfig, - HTTPClientConfig: config.DefaultHTTPClientConfig, + HTTPClientConfig: DefaultRemoteWriteHTTPClientConfig, } // DefaultQueueConfig is the default remote queue configuration. diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 104f7754f6..2093ed8836 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -2889,6 +2889,7 @@ metadata_config: # HTTP client settings, including authentication methods (such as basic auth and # authorization), proxy configurations, TLS options, custom HTTP headers, etc. +# enable_http2 defaults to false for remote-write. [ ] ``` From 7939eab77ae11cc064c114c7b5e2df7190ff4777 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Thu, 24 Oct 2024 22:32:08 +0200 Subject: [PATCH 0816/1397] remote-write: change test default expected to http2 disabled Signed-off-by: Jan Fajerski --- config/config_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index 8bf664c1f0..c3148f93a7 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -142,7 +142,7 @@ var expectedConf = &Config{ }, }, FollowRedirects: true, - EnableHTTP2: true, + EnableHTTP2: false, }, }, { @@ -158,7 +158,7 @@ var expectedConf = &Config{ KeyFile: filepath.FromSlash("testdata/valid_key_file"), }, FollowRedirects: true, - EnableHTTP2: true, + EnableHTTP2: false, }, Headers: map[string]string{"name": "value"}, }, From b602393473ac8ddceb9c3de308643414f8d2b531 Mon Sep 17 00:00:00 2001 From: TJ Hoplock Date: Thu, 24 Oct 2024 01:01:25 -0400 Subject: [PATCH 0817/1397] fix: avoid data race in log deduper This change should have been included in the initial prometheus slog conversion, but I must've lost track of it in all the rebases involved in that PR. This changes the dedupe logger so that the only method that needs to use the lock is the `Handle()` method that actually interacts with the deduplication map. Ex: ``` ================== WARNING: DATA RACE Write at 0x00c000518bc0 by goroutine 29481: github.com/prometheus/prometheus/util/logging.(*Deduper).WithAttrs() /home/tjhop/go/src/github.com/prometheus/prometheus/util/logging/dedupe.go:89 +0xef log/slog.(*Logger).With() /home/tjhop/.asdf/installs/golang/1.23.1/go/src/log/slog/logger.go:132 +0x106 github.com/prometheus/prometheus/storage/remote.NewQueueManager() /home/tjhop/go/src/github.com/prometheus/prometheus/storage/remote/queue_manager.go:483 +0x7a9 github.com/prometheus/prometheus/storage/remote.(*WriteStorage).ApplyConfig() /home/tjhop/go/src/github.com/prometheus/prometheus/storage/remote/write.go:201 +0x102c github.com/prometheus/prometheus/storage/remote.(*Storage).ApplyConfig() /home/tjhop/go/src/github.com/prometheus/prometheus/storage/remote/storage.go:92 +0xfd github.com/prometheus/prometheus/storage/remote.TestWriteStorageApplyConfigsDuringCommit.func1() /home/tjhop/go/src/github.com/prometheus/prometheus/storage/remote/storage_test.go:172 +0x3e4 github.com/prometheus/prometheus/storage/remote.TestWriteStorageApplyConfigsDuringCommit.gowrap1() /home/tjhop/go/src/github.com/prometheus/prometheus/storage/remote/storage_test.go:174 +0x41 Previous read at 0x00c000518bc0 by goroutine 31261: github.com/prometheus/prometheus/util/logging.(*Deduper).Handle() /home/tjhop/go/src/github.com/prometheus/prometheus/util/logging/dedupe.go:82 +0x2b1 log/slog.(*Logger).log() /home/tjhop/.asdf/installs/golang/1.23.1/go/src/log/slog/logger.go:257 +0x228 log/slog.(*Logger).Error() /home/tjhop/.asdf/installs/golang/1.23.1/go/src/log/slog/logger.go:230 +0x3d4 github.com/prometheus/prometheus/tsdb/wlog.(*Watcher).loop() /home/tjhop/go/src/github.com/prometheus/prometheus/tsdb/wlog/watcher.go:254 +0x2db github.com/prometheus/prometheus/tsdb/wlog.(*Watcher).Start.gowrap1() /home/tjhop/go/src/github.com/prometheus/prometheus/tsdb/wlog/watcher.go:227 +0x33 Goroutine 29481 (running) created at: github.com/prometheus/prometheus/storage/remote.TestWriteStorageApplyConfigsDuringCommit() /home/tjhop/go/src/github.com/prometheus/prometheus/storage/remote/storage_test.go:164 +0xe4 testing.tRunner() /home/tjhop/.asdf/installs/golang/1.23.1/go/src/testing/testing.go:1690 +0x226 testing.(*T).Run.gowrap1() /home/tjhop/.asdf/installs/golang/1.23.1/go/src/testing/testing.go:1743 +0x44 Goroutine 31261 (running) created at: github.com/prometheus/prometheus/tsdb/wlog.(*Watcher).Start() /home/tjhop/go/src/github.com/prometheus/prometheus/tsdb/wlog/watcher.go:227 +0x177 github.com/prometheus/prometheus/storage/remote.(*QueueManager).Start() /home/tjhop/go/src/github.com/prometheus/prometheus/storage/remote/queue_manager.go:934 +0x304 github.com/prometheus/prometheus/storage/remote.(*WriteStorage).ApplyConfig() /home/tjhop/go/src/github.com/prometheus/prometheus/storage/remote/write.go:232 +0x151b github.com/prometheus/prometheus/storage/remote.(*Storage).ApplyConfig() /home/tjhop/go/src/github.com/prometheus/prometheus/storage/remote/storage.go:92 +0xfd github.com/prometheus/prometheus/storage/remote.TestWriteStorageApplyConfigsDuringCommit.func1() /home/tjhop/go/src/github.com/prometheus/prometheus/storage/remote/storage_test.go:172 +0x3e4 github.com/prometheus/prometheus/storage/remote.TestWriteStorageApplyConfigsDuringCommit.gowrap1() /home/tjhop/go/src/github.com/prometheus/prometheus/storage/remote/storage_test.go:174 +0x41 ================== --- FAIL: TestWriteStorageApplyConfigsDuringCommit (2.26s) testing.go:1399: race detected during execution of test FAIL FAIL github.com/prometheus/prometheus/storage/remote 68.321s ``` Signed-off-by: TJ Hoplock --- util/logging/dedupe.go | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/util/logging/dedupe.go b/util/logging/dedupe.go index 37b345b395..b08b80a853 100644 --- a/util/logging/dedupe.go +++ b/util/logging/dedupe.go @@ -51,11 +51,7 @@ func Dedupe(next *slog.Logger, repeat time.Duration) *Deduper { // provided context and log level, and returns false otherwise. It implements // slog.Handler. func (d *Deduper) Enabled(ctx context.Context, level slog.Level) bool { - d.mtx.RLock() - enabled := d.next.Enabled(ctx, level) - d.mtx.RUnlock() - - return enabled + return d.next.Enabled(ctx, level) } // Handle uses the provided context and slog.Record to deduplicate messages @@ -85,19 +81,27 @@ func (d *Deduper) Handle(ctx context.Context, r slog.Record) error { // WithAttrs adds the provided attributes to the Deduper's internal // slog.Logger. It implements slog.Handler. func (d *Deduper) WithAttrs(attrs []slog.Attr) slog.Handler { - d.mtx.Lock() - d.next = slog.New(d.next.Handler().WithAttrs(attrs)) - d.mtx.Unlock() - return d + return &Deduper{ + next: slog.New(d.next.Handler().WithAttrs(attrs)), + repeat: d.repeat, + quit: d.quit, + seen: d.seen, + } } // WithGroup adds the provided group name to the Deduper's internal // slog.Logger. It implements slog.Handler. func (d *Deduper) WithGroup(name string) slog.Handler { - d.mtx.Lock() - d.next = slog.New(d.next.Handler().WithGroup(name)) - d.mtx.Unlock() - return d + if name == "" { + return d + } + + return &Deduper{ + next: slog.New(d.next.Handler().WithGroup(name)), + repeat: d.repeat, + quit: d.quit, + seen: d.seen, + } } // Info logs the provided message and key-value arguments using the Deduper's From 4f9e4dc0165e3a7d818f2933c80aaa9c2097b3c5 Mon Sep 17 00:00:00 2001 From: TJ Hoplock Date: Thu, 24 Oct 2024 01:31:21 -0400 Subject: [PATCH 0818/1397] ref: remove unused deduper log wrapper methods I used these wrapper methods during initial development of the custom handler that the deduper now implements. Since the deduper implements slog.Handler and can be used directly as a logger, these wrapper methods are no longer needed. Signed-off-by: TJ Hoplock --- util/logging/dedupe.go | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/util/logging/dedupe.go b/util/logging/dedupe.go index b08b80a853..d5aee5c095 100644 --- a/util/logging/dedupe.go +++ b/util/logging/dedupe.go @@ -104,30 +104,6 @@ func (d *Deduper) WithGroup(name string) slog.Handler { } } -// Info logs the provided message and key-value arguments using the Deduper's -// internal slog.Logger. It is simply a wrapper around slog.Logger.Info(). -func (d *Deduper) Info(msg string, args ...any) { - d.next.Info(msg, args...) -} - -// Warn logs the provided message and key-value arguments using the Deduper's -// internal slog.Logger. It is simply a wrapper around slog.Logger.Warn(). -func (d *Deduper) Warn(msg string, args ...any) { - d.next.Warn(msg, args...) -} - -// Error logs the provided message and key-value arguments using the Deduper's -// internal slog.Logger. It is simply a wrapper around slog.Logger.Error(). -func (d *Deduper) Error(msg string, args ...any) { - d.next.Error(msg, args...) -} - -// Debug logs the provided message and key-value arguments using the Deduper's -// internal slog.Logger. It is simply a wrapper around slog.Logger.Debug(). -func (d *Deduper) Debug(msg string, args ...any) { - d.next.Debug(msg, args...) -} - // Stop the Deduper. func (d *Deduper) Stop() { close(d.quit) From f131cdd4c5471deeda4db376d2f2b804e386dd96 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Fri, 25 Oct 2024 12:30:13 +0200 Subject: [PATCH 0819/1397] 3.0 migration guide (#15099) * docs: 2 to 3 migration guide Signed-off-by: Jan Fajerski * docs/stability: add 3.0 section Signed-off-by: Jan Fajerski * docs/migration: details on enabling legacy name validation Signed-off-by: Owen Williams \ * migration: add log format and `le` normalization Signed-off-by: Jan Fajerski * migration: add new enable_http2 default for remote write Signed-off-by: Jan Fajerski --------- Signed-off-by: Jan Fajerski Signed-off-by: Owen Williams Co-authored-by: Owen Williams --- docs/migration.md | 348 +++++++++++++++++++++++----------------------- docs/stability.md | 16 ++- 2 files changed, 184 insertions(+), 180 deletions(-) diff --git a/docs/migration.md b/docs/migration.md index cb88bbfd6f..43fc43df2a 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -3,198 +3,198 @@ title: Migration sort_rank: 10 --- -# Prometheus 2.0 migration guide +# Prometheus 3.0 migration guide -In line with our [stability promise](https://prometheus.io/blog/2016/07/18/prometheus-1-0-released/#fine-print), -the Prometheus 2.0 release contains a number of backwards incompatible changes. -This document offers guidance on migrating from Prometheus 1.8 to Prometheus 2.0 and newer versions. +In line with our [stability promise](https://prometheus.io/docs/prometheus/latest/stability/), +the Prometheus 3.0 release contains a number of backwards incompatible changes. +This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0 and newer versions. ## Flags -The format of Prometheus command line flags has changed. Instead of a -single dash, all flags now use a double dash. Common flags (`--config.file`, -`--web.listen-address` and `--web.external-url`) remain but -almost all storage-related flags have been removed. +- The following feature flags have been removed and they have been added to the + default behavior of Prometheus v3: + - `promql-at-modifier` + - `promql-negative-offset` + - `remote-write-receiver` + - `new-service-discovery-manager` + - `expand-external-labels` + Environment variable references `${var}` or `$var` in external label values + are replaced according to the values of the current environment variables. + References to undefined variables are replaced by the empty string. + The `$` character can be escaped by using `$$`. + - `no-default-scrape-port` + Prometheus v3 will no longer add ports to scrape targets according to the + specified scheme. Target will now appear in labels as configured. + If you rely on scrape targets like + `https://example.com/metrics` or `http://exmaple.com/metrics` to be + represented as `https://example.com/metrics:443` and + `http://example.com/metrics:80` respectively, add them to your target URLs + - `agent` + Instead use the dedicated `--agent` cli flag. -Some notable flags which have been removed: + Prometheus v3 will log a warning if you continue to pass these to + `--enable-feature`. -- `-alertmanager.url` In Prometheus 2.0, the command line flags for configuring - a static Alertmanager URL have been removed. Alertmanager must now be - discovered via service discovery, see [Alertmanager service discovery](#alertmanager-service-discovery). +## Configuration -- `-log.format` In Prometheus 2.0 logs can only be streamed to standard error. - -- `-query.staleness-delta` has been renamed to `--query.lookback-delta`; Prometheus - 2.0 introduces a new mechanism for handling staleness, see [staleness](querying/basics.md#staleness). - -- `-storage.local.*` Prometheus 2.0 introduces a new storage engine; as such all - flags relating to the old engine have been removed. For information on the - new engine, see [Storage](#storage). - -- `-storage.remote.*` Prometheus 2.0 has removed the deprecated remote - storage flags, and will fail to start if they are supplied. To write to - InfluxDB, Graphite, or OpenTSDB use the relevant storage adapter. - -## Alertmanager service discovery - -Alertmanager service discovery was introduced in Prometheus 1.4, allowing Prometheus -to dynamically discover Alertmanager replicas using the same mechanism as scrape -targets. In Prometheus 2.0, the command line flags for static Alertmanager config -have been removed, so the following command line flag: - -``` -./prometheus -alertmanager.url=http://alertmanager:9093/ -``` - -Would be replaced with the following in the `prometheus.yml` config file: - -```yaml -alerting: - alertmanagers: - - static_configs: - - targets: - - alertmanager:9093 -``` - -You can also use all the usual Prometheus service discovery integrations and -relabeling in your Alertmanager configuration. This snippet instructs -Prometheus to search for Kubernetes pods, in the `default` namespace, with the -label `name: alertmanager` and with a non-empty port. - -```yaml -alerting: - alertmanagers: - - kubernetes_sd_configs: - - role: pod - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - relabel_configs: - - source_labels: [__meta_kubernetes_pod_label_name] - regex: alertmanager - action: keep - - source_labels: [__meta_kubernetes_namespace] - regex: default - action: keep - - source_labels: [__meta_kubernetes_pod_container_port_number] - regex: - action: drop -``` - -## Recording rules and alerts - -The format for configuring alerting and recording rules has been changed to YAML. -An example of a recording rule and alert in the old format: - -``` -job:request_duration_seconds:histogram_quantile99 = - histogram_quantile(0.99, sum by (le, job) (rate(request_duration_seconds_bucket[1m]))) - -ALERT FrontendRequestLatency - IF job:request_duration_seconds:histogram_quantile99{job="frontend"} > 0.1 - FOR 5m - ANNOTATIONS { - summary = "High frontend request latency", - } -``` - -Would look like this: - -```yaml -groups: -- name: example.rules - rules: - - record: job:request_duration_seconds:histogram_quantile99 - expr: histogram_quantile(0.99, sum by (le, job) (rate(request_duration_seconds_bucket[1m]))) - - alert: FrontendRequestLatency - expr: job:request_duration_seconds:histogram_quantile99{job="frontend"} > 0.1 - for: 5m - annotations: - summary: High frontend request latency -``` - -To help with the change, the `promtool` tool has a mode to automate the rules conversion. Given a `.rules` file, it will output a `.rules.yml` file in the -new format. For example: - -``` -$ promtool update rules example.rules -``` - -You will need to use `promtool` from [Prometheus 2.5](https://github.com/prometheus/prometheus/releases/tag/v2.5.0) as later versions no longer contain the above subcommand. - -## Storage - -The data format in Prometheus 2.0 has completely changed and is not backwards -compatible with 1.8 and older versions. To retain access to your historic monitoring data we -recommend you run a non-scraping Prometheus instance running at least version -1.8.1 in parallel with your Prometheus 2.0 instance, and have the new server -read existing data from the old one via the remote read protocol. - -Your Prometheus 1.8 instance should be started with the following flags and an -config file containing only the `external_labels` setting (if any): - -``` -$ ./prometheus-1.8.1.linux-amd64/prometheus -web.listen-address ":9094" -config.file old.yml -``` - -Prometheus 2.0 can then be started (on the same machine) with the following flags: - -``` -$ ./prometheus-2.0.0.linux-amd64/prometheus --config.file prometheus.yml -``` - -Where `prometheus.yml` contains in addition to your full existing configuration, the stanza: - -```yaml -remote_read: - - url: "http://localhost:9094/api/v1/read" -``` +- The scrape job level configuration option `scrape_classic_histograms` has been + renamed to `always_scrape_classic_histograms`. If you use the + `--enable-feature=native-histograms` feature flag to ingest native histograms + and you also want to ingest classic histograms that an endpoint might expose + along with native histograms, be sure to add this configuration or change your + configuration from the old name. +- The `http_config.enable_http2` in `remote_write` items default has been + changed to `false`. In Prometheus v2 the remote write http client would + default to use http2. In order to parallelize multiple remote write queues + across multiple sockets its preferable to not default to http2. + If you prefer to use http2 for remote write you must now set + `http_config.enable_http2: true` in your `remote_write` configuration section. ## PromQL -The following features have been removed from PromQL: +- The `.` pattern in regular expressions in PromQL matches newline characters. + With this change a regular expressions like `.*` matches strings that include + `\n`. This applies to matchers in queries and relabel configs. For example the + following regular expressions now match the accompanying strings, wheras in + Prometheus v2 these combinations didn't match. -- `drop_common_labels` function - the `without` aggregation modifier should be used - instead. -- `keep_common` aggregation modifier - the `by` modifier should be used instead. -- `count_scalar` function - use cases are better handled by `absent()` or correct - propagation of labels in operations. +| Regex | Additional matches | +| ----- | ------ | +| ".*" | "foo\n", "Foo\nBar" | +| "foo.?bar" | "foo\nbar" | +| "foo.+bar" | "foo\nbar" | -See [issue #3060](https://github.com/prometheus/prometheus/issues/3060) for more -details. + If you want Prometheus v3 to behave like v2 did, you will have to change your + regular expressions by replacing all `.` patterns with `[^\n]`, e.g. + `foo[^\n]*`. +- Lookback and range selectors are left open and right closed (previously left + closed and right closed). This change affects queries when the evaluation time + perfectly aligns with the sample timestamps. For example assume querying a + timeseries with even spaced samples exactly 1 minute apart. Before Prometheus + 3.x, range query with `5m` will mostly return 5 samples. But if the query + evaluation aligns perfectly with a scrape, it would return 6 samples. In + Prometheus 3.x queries like this will always return 5 samples. + This change has likely few effects for everyday use, except for some sub query + use cases. + Query front-ends that align queries usually align sub-queries to multiples of + the step size. These sub queries will likely be affected. + Tests are more likely to affected. To fix those either adjust the expected + number of samples or extend to range by less then one sample interval. +- The `holt_winters` function has been renamed to `double_exponential_smoothing` + and is now guarded by the `promql-experimental-functions` feature flag. + If you want to keep using holt_winters, you have to do both of these things: + - Rename holt_winters to double_exponential_smoothing in your queries. + - Pass `--enable-feature=promql-experimental-functions` in your Prometheus + cli invocation.. + +## Scrape protocols +Prometheus v3 is more strict concerning the Content-Type header received when +scraping. Prometheus v2 would default to the standard Prometheus text protocol +if the target being scraped did not specify a Content-Type header or if the +header was unparsable or unrecognised. This could lead to incorrect data being +parsed in the scrape. Prometheus v3 will now fail the scrape in such cases. + +If a scrape target is not providing the correct Content-Type header the +fallback protocol can be specified using the fallback_scrape_protocol +parameter. See [Prometheus scrape_config documentation.](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) + +This is a breaking change as scrapes that may have succeeded with Prometheus v2 +may now fail if this fallback protocol is not specified. ## Miscellaneous -### Prometheus non-root user +### TSDB format and downgrade +The TSDB format has been changed in Prometheus v2.55 in preparation for changes +to the index format. Consequently a Prometheus v3 tsdb can only be read by a +Prometheus v2.55 or newer. +Before upgrading to Prometheus v3 please upgrade to v2.55 first and confirm +Prometheus works as expected. Only then continue with the upgrade to v3. -The Prometheus Docker image is now built to [run Prometheus -as a non-root user](https://github.com/prometheus/prometheus/pull/2859). If you -want the Prometheus UI/API to listen on a low port number (say, port 80), you'll -need to override it. For Kubernetes, you would use the following YAML: +### TSDB Storage contract +TSDB compatible storage is now expected to return results matching the specified +selectors. This might impact some third party implementations, most likely +implementing `remote_read`. +This contract is not explicitly enforced, but can cause undefined behavior. + +### UTF-8 names +Prometheus v3 supports UTF-8 in metric and label names. This means metric and +label names can change after upgrading according to what is exposed by +endpoints. Furthermore, metric and label names that would have previously been +flagged as invalid no longer will be. + +Users wishing to preserve the original validation behavior can update their +prometheus yaml configuration to specify the legacy validation scheme: + +``` +global: + metric_name_validation_scheme: legacy +``` + +Or on a per-scrape basis: + +``` +scrape_configs: + - job_name: job1 + metric_name_validation_scheme: utf8 + - job_name: job2 + metric_name_validation_scheme: legacy +``` + +### Log message format +Prometheus v3 has adopted `log/slog` over the previous `go-kit/log`. This +results in a change of log message format. An example of the old log format is: +``` +ts=2024-10-23T22:01:06.074Z caller=main.go:627 level=info msg="No time or size retention was set so using the default time retention" duration=15d +ts=2024-10-23T22:01:06.074Z caller=main.go:671 level=info msg="Starting Prometheus Server" mode=server version="(version=, branch=, revision=91d80252c3e528728b0f88d254dd720f6be07cb8-modified)" +ts=2024-10-23T22:01:06.074Z caller=main.go:676 level=info build_context="(go=go1.23.0, platform=linux/amd64, user=, date=, tags=unknown)" +ts=2024-10-23T22:01:06.074Z caller=main.go:677 level=info host_details="(Linux 5.15.0-124-generic #134-Ubuntu SMP Fri Sep 27 20:20:17 UTC 2024 x86_64 gigafips (none))" +``` + +a similar sequence in the new log format looks like this: +``` +time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:640 msg="No time or size retention was set so using the default time retention" duration=15d +time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:681 msg="Starting Prometheus Server" mode=server version="(version=, branch=, revision=7c7116fea8343795cae6da42960cacd0207a2af8)" +time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:686 msg="operational information" build_context="(go=go1.23.0, platform=linux/amd64, user=, date=, tags=unknown)" host_details="(Linux 5.15.0-124-generic #134-Ubuntu SMP Fri Sep 27 20:20:17 UTC 2024 x86_64 gigafips (none))" fd_limits="(soft=1048576, hard=1048576)" vm_limits="(soft=unlimited, hard=unlimited)" +``` + +### `le` and `quantile` label values +In Prometheus v3, the values of the `le` label of classic histograms and the +`quantile` label of summaries are normalized upon ingestions. In Prometheus v2 +the value of these labels depended on the scrape protocol (protobuf vs text +format) in some situations. This led to label values changing based on the +scrape protocol. E.g. a metric exposed as `my_classic_hist{le="1"}` would be +ingested as `my_classic_hist{le="1"}` via the text format, but as +`my_classic_hist{le="1.0"}` via protobuf. This changed the identity of the +metric and caused problems when querying the metric. +In Prometheus v3 these label values will always be normalized to a float like +representation. I.e. the above example will always result in +`my_classic_hist{le="1.0"}` being ingested into prometheus, no matter via which +protocol. The effect of this change is that alerts, recording rules and +dashboards that directly reference label values as whole numbers such as +`le="1"` will stop working. + +Ways to deal with this change either globally or on a per metric basis: + +- Fix references to integer `le`, `quantile` label values, but otherwise do +nothing and accept that some queries that span the transition time will produce +inaccurate or unexpected results. +_This is the recommended solution._ +- Use `metric_relabel_config` to retain the old labels when scraping targets. +This should **only** be applied to metrics that currently produce such labels. ```yaml -apiVersion: v1 -kind: Pod -metadata: - name: security-context-demo-2 -spec: - securityContext: - runAsUser: 0 -... + metric_relabel_configs: + - source_labels: + - quantile + target_label: quantile + regex: (\d+)\.0+ + - source_labels: + - le + - __name__ + target_label: le + regex: (\d+)\.0+;.*_bucket ``` -See [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) -for more details. +# Prometheus 2.0 migration guide -If you're using Docker, then the following snippet would be used: - -``` -docker run -p 9090:9090 prom/prometheus:latest -``` - -### Prometheus lifecycle - -If you use the Prometheus `/-/reload` HTTP endpoint to [automatically reload your -Prometheus config when it changes](configuration/configuration.md), -these endpoints are disabled by default for security reasons in Prometheus 2.0. -To enable them, set the `--web.enable-lifecycle` flag. +For the Prometheus 1.8 to 2.0 please refer to the [Prometheus v2.55 documentation](https://prometheus.io/docs/prometheus/2.55/migration/). diff --git a/docs/stability.md b/docs/stability.md index 1fd2e51e0c..cb30b8ad99 100644 --- a/docs/stability.md +++ b/docs/stability.md @@ -9,7 +9,7 @@ Prometheus promises API stability within a major version, and strives to avoid breaking changes for key features. Some features, which are cosmetic, still under development, or depend on 3rd party services, are not covered by this. -Things considered stable for 2.x: +Things considered stable for 3.x: * The query language and data model * Alerting and recording rules @@ -18,21 +18,25 @@ Things considered stable for 2.x: * Configuration file format (minus the service discovery remote read/write, see below) * Rule/alert file format * Console template syntax and semantics -* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/). +* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/) and receiving +* Agent mode +* OTLP receiver endpoint -Things considered unstable for 2.x: +Things considered unstable for 3.x: * Any feature listed as experimental or subject to change, including: - * The [`holt_winters` PromQL function](https://github.com/prometheus/prometheus/issues/2458) - * Remote write receiving, remote read and the remote read endpoint + * The [`double_exponential_smoothing` PromQL function](https://github.com/prometheus/prometheus/issues/2458) + * Remote read and the remote read endpoint * Server-side HTTPS and basic authentication -* Service discovery integrations, with the exception of `static_configs` and `file_sd_configs` +* Service discovery integrations, with the exception of `static_configs`, `file_sd_configs` and `http_sd_config` * Go APIs of packages that are part of the server * HTML generated by the web UI * The metrics in the /metrics endpoint of Prometheus itself * Exact on-disk format. Potential changes however, will be forward compatible and transparently handled by Prometheus * The format of the logs +Prometheus 2.x stability guarantees can be found [in the 2.x documentation](https://prometheus.io/docs/prometheus/2.55/stability/). + As long as you are not using any features marked as experimental/unstable, an upgrade within a major version can usually be performed without any operational adjustments and very little risk that anything will break. Any breaking changes From b6e22cd346eda9ad57c4b26d0bed10adf873a73f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Mierzwa?= Date: Mon, 7 Aug 2023 16:55:17 +0100 Subject: [PATCH 0820/1397] Short-cut common memChunk operations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit memChunk is a linked list, speed up some common operations when there's no need to iterate all elements on the list. Signed-off-by: Łukasz Mierzwa --- tsdb/head.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tsdb/head.go b/tsdb/head.go index 2963d781d0..fefd31bd33 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -2265,6 +2265,10 @@ type memChunk struct { // len returns the length of memChunk list, including the element it was called on. func (mc *memChunk) len() (count int) { + if mc.prev == nil { + return 1 + } + elem := mc for elem != nil { count++ @@ -2276,6 +2280,9 @@ func (mc *memChunk) len() (count int) { // oldest returns the oldest element on the list. // For single element list this will be the same memChunk oldest() was called on. func (mc *memChunk) oldest() (elem *memChunk) { + if mc.prev == nil { + return mc + } elem = mc for elem.prev != nil { elem = elem.prev @@ -2288,6 +2295,9 @@ func (mc *memChunk) atOffset(offset int) (elem *memChunk) { if offset == 0 { return mc } + if offset == 1 { + return mc.prev + } if offset < 0 { return nil } @@ -2301,7 +2311,6 @@ func (mc *memChunk) atOffset(offset int) (elem *memChunk) { break } } - return elem } From d87f7440ca5da009ea885cb23d390ce412ddc681 Mon Sep 17 00:00:00 2001 From: Charlie Le Date: Thu, 24 Oct 2024 11:51:41 -0700 Subject: [PATCH 0821/1397] support int exemplar value type When the exemplar type is an int, it incorrectly gets converted to a 0 when DoubleValue() is called on the exemplar. This adds a check to ensure that the value is converted properly based on the type. Signed-off-by: Charlie Le --- .../prometheusremotewrite/helper.go | 10 +++++- .../prometheusremotewrite/helper_test.go | 35 +++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index fd7f58f073..f7fede258b 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -351,9 +351,17 @@ func getPromExemplars[T exemplarType](ctx context.Context, everyN *everyNTimes, exemplarRunes := 0 promExemplar := prompb.Exemplar{ - Value: exemplar.DoubleValue(), Timestamp: timestamp.FromTime(exemplar.Timestamp().AsTime()), } + switch exemplar.ValueType() { + case pmetric.ExemplarValueTypeInt: + promExemplar.Value = float64(exemplar.IntValue()) + case pmetric.ExemplarValueTypeDouble: + promExemplar.Value = exemplar.DoubleValue() + default: + return nil, fmt.Errorf("unsupported exemplar value type: %v", exemplar.ValueType()) + } + if traceID := exemplar.TraceID(); !traceID.IsEmpty() { val := hex.EncodeToString(traceID[:]) exemplarRunes += utf8.RuneCountInString(traceIDKey) + utf8.RuneCountInString(val) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go index a48a57b062..9a994c5a42 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go @@ -406,3 +406,38 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { }) } } + +func TestGetPromExemplars(t *testing.T) { + ctx := context.Background() + everyN := &everyNTimes{n: 1} + + t.Run("Exemplars with int value", func(t *testing.T) { + pt := pmetric.NewNumberDataPoint() + exemplar := pt.Exemplars().AppendEmpty() + exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) + exemplar.SetIntValue(42) + exemplars, err := getPromExemplars(ctx, everyN, pt) + assert.NoError(t, err) + assert.Len(t, exemplars, 1) + assert.Equal(t, float64(42), exemplars[0].Value) + }) + + t.Run("Exemplars with double value", func(t *testing.T) { + pt := pmetric.NewNumberDataPoint() + exemplar := pt.Exemplars().AppendEmpty() + exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) + exemplar.SetDoubleValue(69.420) + exemplars, err := getPromExemplars(ctx, everyN, pt) + assert.NoError(t, err) + assert.Len(t, exemplars, 1) + assert.Equal(t, 69.420, exemplars[0].Value) + }) + + t.Run("Exemplars with unsupported value type", func(t *testing.T) { + pt := pmetric.NewNumberDataPoint() + exemplar := pt.Exemplars().AppendEmpty() + exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) + _, err := getPromExemplars(ctx, everyN, pt) + assert.Error(t, err) + }) +} From 372b83d7b8601247355531a91487cd98ef2970fe Mon Sep 17 00:00:00 2001 From: gopi Date: Sat, 26 Oct 2024 01:10:15 +0530 Subject: [PATCH 0822/1397] Documented that WAL can still be written after memory-snapshot-on-shutdown (#15179) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Documented that WAL can still be written after memory-snapshot-on-shutdown - #10824 Co-authored-by: Björn Rabenstein Signed-off-by: gopi --------- Signed-off-by: Gopi-eng2202 Signed-off-by: gopi Co-authored-by: Björn Rabenstein --- docs/feature_flags.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/feature_flags.md b/docs/feature_flags.md index c2de68dec2..4be11ed472 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -23,9 +23,8 @@ Exemplar storage is implemented as a fixed size circular buffer that stores exem `--enable-feature=memory-snapshot-on-shutdown` -This takes the snapshot of the chunks that are in memory along with the series information when shutting down and stores -it on disk. This will reduce the startup time since the memory state can be restored with this snapshot and m-mapped -chunks without the need of WAL replay. +This takes a snapshot of the chunks that are in memory along with the series information when shutting down and stores it on disk. This will reduce the startup time since the memory state can now be restored with this snapshot +and m-mapped chunks, while a WAL replay from disk is only needed for the parts of the WAL that are not part of the snapshot. ## Extra scrape metrics From 3acb3144fe9c735169833c20bb384f1cb28fa23b Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Sat, 26 Oct 2024 09:03:10 +0200 Subject: [PATCH 0823/1397] update CHANGELOG Signed-off-by: Jan Fajerski --- CHANGELOG.md | 44 +++++++++----------------------------------- 1 file changed, 9 insertions(+), 35 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9ed603318..ab454a9fcb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,8 +3,14 @@ ## unreleased * [CHANGE] Scraping: Remove implicit fallback to the Prometheus text format in case of invalid/missing Content-Type and fail the scrape instead. Add ability to specify a `fallback_scrape_protocol` in the scrape config. #15136 -* [CHANGE] Remote-write: default enable_http2 to false. -* [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710 +* [CHANGE] Remote-write: default enable_http2 to false. #15219 +* [CHANGE] Scraping: normalize "le" and "quantile" label values upon ingestion. #15164 +* [CHANGE] Scraping: config `scrape_classic_histograms` was renamed to `always_scrape_classic_histograms`. #15178 +* [CHANGE] Config: remove expand-external-labels flag, expand external labels env vars by default. #14657 +* [CHANGE] Adopt log/slog and remove go-kit/log. #14906 +* [CHANGE] Disallow configuring AM with the v1 api. #13883 +* [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710 +* [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196 - [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941 - [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941 @@ -54,6 +60,7 @@ As is traditional with a beta release, we do **not** recommend users install 3.0 * [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643 * [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 * [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769 +* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029 ## 2.55.0 / 2024-10-22 @@ -88,39 +95,6 @@ As is traditional with a beta release, we do **not** recommend users install 3.0 * [BUGFIX] Remote-Write: Return 4xx not 5xx when timeseries has duplicate label. #14716 * [BUGFIX] Experimental Native Histograms: many fixes for incorrect results, panics, warnings. #14513, #14575, #14598, #14609, #14611, #14771, #14821 * [BUGFIX] TSDB: Only count unknown record types in `record_decode_failures_total` metric. #14042 -* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029 - -## 2.55.0-rc.0 / 2024-09-20 - -* [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727 -* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769 -* [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817 -* [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815 -* [FEATURE] Scraping: `scrape_failure_log_file` option to log failures to a file. #14734 -* [FEATURE] OTLP receiver: Optional promotion of resource attributes to series labels. #14200 -* [FEATURE] Remote-Write: Support Google Cloud Monitoring authorization. #14346 -* [FEATURE] Promtool: `tsdb create-blocks` new option to add labels. #14403 -* [FEATURE] Promtool: `promtool test` adds `--junit` flag to format results. #14506 -* [ENHANCEMENT] OTLP receiver: Warn on exponential histograms with zero count and non-zero sum. #14706 -* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612 -* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379 -* [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450 -* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477 -* [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655 -* [ENHANCEMENT] PromQL: Clarify error message logged when Go runtime panic occurs during query evaluation. #14621 -* [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413 -* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls. #14816 -* [ENHANCEMENT] API: Support multiple listening addresses. #14665 -* [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934 -* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948 -* [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729 -* [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147 -* [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622 -* [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810 -* [BUGFIX] Remote-Write: fix metadata sending for experimental Remote-Write V2. #14766 -* [BUGFIX] Remote-Write: Return 4xx not 5xx when timeseries has duplicate label. #14716 -* [BUGFIX] Experimental Native Histograms: many fixes for incorrect results, panics, warnings. #14513, #14575, #14598, #14609, #14611, #14771, #14821 -* [BUGFIX] TSDB: Only count unknown record types in `record_decode_failures_total` metric. #14042 ## 2.54.1 / 2024-08-27 From bab587b9dca16274e38babbbf56efba50956dbd2 Mon Sep 17 00:00:00 2001 From: Pedro Tanaka Date: Sun, 27 Oct 2024 02:06:34 +0200 Subject: [PATCH 0824/1397] Agent: allow for ingestion of CT samples (#15124) * Remove unused option from HeadOptions Signed-off-by: Pedro Tanaka * Improve docs for appendable() method in head appender Signed-off-by: Pedro Tanaka * Ingest CT (float) samples in Agent DB Signed-off-by: Pedro Tanaka * allow for ingestion of CT native histogram Signed-off-by: Pedro Tanaka * adding some verification for ct ts Signed-off-by: Pedro Tanaka * Validating CT histogram before append and add newly created series to pending series Signed-off-by: Pedro Tanaka * checking the wal for written samples Signed-off-by: Pedro Tanaka * Checking for samples in test Signed-off-by: Pedro Tanaka * adding case for validations Signed-off-by: Pedro Tanaka * fixing comparison when dedupelabels is enabled Signed-off-by: Pedro Tanaka * unite tests, use table testing Signed-off-by: Pedro Tanaka * Implement CT related methods in timestampTracker for write storage Signed-off-by: Pedro Tanaka * adding error case to test Signed-off-by: Pedro Tanaka * removing unused fields Signed-off-by: Pedro Tanaka * Updating lastTs for series when adding CT to invalidate duplicates Signed-off-by: Pedro Tanaka * making sure that updating the lastTS wont cause OOO later on in Commit(); Signed-off-by: Pedro Tanaka --------- Signed-off-by: Pedro Tanaka --- storage/remote/write.go | 24 ++-- tsdb/agent/db.go | 136 ++++++++++++++++++++-- tsdb/agent/db_test.go | 246 ++++++++++++++++++++++++++++++++++++++++ tsdb/head.go | 4 - tsdb/head_append.go | 7 +- 5 files changed, 395 insertions(+), 22 deletions(-) diff --git a/storage/remote/write.go b/storage/remote/write.go index 00e4fa3a0c..639f344520 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -312,8 +312,23 @@ func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, return 0, nil } -func (t *timestampTracker) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { - // TODO: Implement +func (t *timestampTracker) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, ct int64) (storage.SeriesRef, error) { + t.samples++ + if ct > t.highestTimestamp { + // Theoretically, we should never see a CT zero sample with a timestamp higher than the highest timestamp we've seen so far. + // However, we're not going to enforce that here, as it is not the responsibility of the tracker to enforce this. + t.highestTimestamp = ct + } + return 0, nil +} + +func (t *timestampTracker) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, ct int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { + t.histograms++ + if ct > t.highestTimestamp { + // Theoretically, we should never see a CT zero sample with a timestamp higher than the highest timestamp we've seen so far. + // However, we're not going to enforce that here, as it is not the responsibility of the tracker to enforce this. + t.highestTimestamp = ct + } return 0, nil } @@ -323,11 +338,6 @@ func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, return 0, nil } -func (t *timestampTracker) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { - // AppendCTZeroSample is no-op for remote-write for now. - return 0, nil -} - // Commit implements storage.Appender. func (t *timestampTracker) Commit() error { t.writeStorage.samplesIn.incr(t.samples + t.exemplars + t.histograms) diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 5de84c93af..3863e6cd99 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -976,19 +976,139 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int return storage.SeriesRef(series.ref), nil } -func (a *appender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { - // TODO(bwplotka/arthursens): Wire metadata in the Agent's appender. - return 0, nil -} - func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { // TODO: Wire metadata in the Agent's appender. return 0, nil } -func (a *appender) AppendCTZeroSample(storage.SeriesRef, labels.Labels, int64, int64) (storage.SeriesRef, error) { - // TODO(bwplotka): Wire metadata in the Agent's appender. - return 0, nil +func (a *appender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if h != nil { + if err := h.Validate(); err != nil { + return 0, err + } + } + if fh != nil { + if err := fh.Validate(); err != nil { + return 0, err + } + } + if ct >= t { + return 0, storage.ErrCTNewerThanSample + } + + series := a.series.GetByID(chunks.HeadSeriesRef(ref)) + if series == nil { + // Ensure no empty labels have gotten through. + l = l.WithoutEmpty() + if l.IsEmpty() { + return 0, fmt.Errorf("empty labelset: %w", tsdb.ErrInvalidSample) + } + + if lbl, dup := l.HasDuplicateLabelNames(); dup { + return 0, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidSample) + } + + var created bool + series, created = a.getOrCreate(l) + if created { + a.pendingSeries = append(a.pendingSeries, record.RefSeries{ + Ref: series.ref, + Labels: l, + }) + a.metrics.numActiveSeries.Inc() + } + } + + series.Lock() + defer series.Unlock() + + if ct <= a.minValidTime(series.lastTs) { + return 0, storage.ErrOutOfOrderCT + } + + if ct > series.lastTs { + series.lastTs = ct + } else { + // discard the sample if it's out of order. + return 0, storage.ErrOutOfOrderCT + } + + switch { + case h != nil: + zeroHistogram := &histogram.Histogram{} + a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{ + Ref: series.ref, + T: ct, + H: zeroHistogram, + }) + a.histogramSeries = append(a.histogramSeries, series) + case fh != nil: + a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{ + Ref: series.ref, + T: ct, + FH: &histogram.FloatHistogram{}, + }) + a.floatHistogramSeries = append(a.floatHistogramSeries, series) + } + + a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() + return storage.SeriesRef(series.ref), nil +} + +func (a *appender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { + if ct >= t { + return 0, storage.ErrCTNewerThanSample + } + + series := a.series.GetByID(chunks.HeadSeriesRef(ref)) + if series == nil { + l = l.WithoutEmpty() + if l.IsEmpty() { + return 0, fmt.Errorf("empty labelset: %w", tsdb.ErrInvalidSample) + } + + if lbl, dup := l.HasDuplicateLabelNames(); dup { + return 0, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidSample) + } + + newSeries, created := a.getOrCreate(l) + if created { + a.pendingSeries = append(a.pendingSeries, record.RefSeries{ + Ref: newSeries.ref, + Labels: l, + }) + a.metrics.numActiveSeries.Inc() + } + + series = newSeries + } + + series.Lock() + defer series.Unlock() + + if t <= a.minValidTime(series.lastTs) { + a.metrics.totalOutOfOrderSamples.Inc() + return 0, storage.ErrOutOfOrderSample + } + + if ct > series.lastTs { + series.lastTs = ct + } else { + // discard the sample if it's out of order. + return 0, storage.ErrOutOfOrderCT + } + + // NOTE: always modify pendingSamples and sampleSeries together. + a.pendingSamples = append(a.pendingSamples, record.RefSample{ + Ref: series.ref, + T: ct, + V: 0, + }) + a.sampleSeries = append(a.sampleSeries, series) + + a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeFloat).Inc() + + return storage.SeriesRef(series.ref), nil } // Commit submits the collected samples and purges the batch. diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index 4d5fda25db..b28c29095c 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -15,7 +15,9 @@ package agent import ( "context" + "errors" "fmt" + "io" "math" "path/filepath" "strconv" @@ -29,6 +31,7 @@ import ( "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote" @@ -933,6 +936,249 @@ func TestDBOutOfOrderTimeWindow(t *testing.T) { } } +type walSample struct { + t int64 + f float64 + h *histogram.Histogram + lbls labels.Labels + ref storage.SeriesRef +} + +func TestDBCreatedTimestampSamplesIngestion(t *testing.T) { + t.Parallel() + + type appendableSample struct { + t int64 + ct int64 + v float64 + lbls labels.Labels + h *histogram.Histogram + expectsError bool + } + + testHistogram := tsdbutil.GenerateTestHistograms(1)[0] + zeroHistogram := &histogram.Histogram{} + + lbls := labelsForTest(t.Name(), 1) + defLbls := labels.New(lbls[0]...) + + testCases := []struct { + name string + inputSamples []appendableSample + expectedSamples []*walSample + expectedSeriesCount int + }{ + { + name: "in order ct+normal sample/floatSamples", + inputSamples: []appendableSample{ + {t: 100, ct: 1, v: 10, lbls: defLbls}, + {t: 101, ct: 1, v: 10, lbls: defLbls}, + }, + expectedSamples: []*walSample{ + {t: 1, f: 0, lbls: defLbls}, + {t: 100, f: 10, lbls: defLbls}, + {t: 101, f: 10, lbls: defLbls}, + }, + }, + { + name: "CT+float && CT+histogram samples", + inputSamples: []appendableSample{ + { + t: 100, + ct: 30, + v: 20, + lbls: defLbls, + }, + { + t: 300, + ct: 230, + h: testHistogram, + lbls: defLbls, + }, + }, + expectedSamples: []*walSample{ + {t: 30, f: 0, lbls: defLbls}, + {t: 100, f: 20, lbls: defLbls}, + {t: 230, h: zeroHistogram, lbls: defLbls}, + {t: 300, h: testHistogram, lbls: defLbls}, + }, + expectedSeriesCount: 1, + }, + { + name: "CT+float && CT+histogram samples with error", + inputSamples: []appendableSample{ + { + // invalid CT + t: 100, + ct: 100, + v: 10, + lbls: defLbls, + expectsError: true, + }, + { + // invalid CT histogram + t: 300, + ct: 300, + h: testHistogram, + lbls: defLbls, + expectsError: true, + }, + }, + expectedSamples: []*walSample{ + {t: 100, f: 10, lbls: defLbls}, + {t: 300, h: testHistogram, lbls: defLbls}, + }, + expectedSeriesCount: 0, + }, + { + name: "In order ct+normal sample/histogram", + inputSamples: []appendableSample{ + {t: 100, h: testHistogram, ct: 1, lbls: defLbls}, + {t: 101, h: testHistogram, ct: 1, lbls: defLbls}, + }, + expectedSamples: []*walSample{ + {t: 1, h: &histogram.Histogram{}}, + {t: 100, h: testHistogram}, + {t: 101, h: &histogram.Histogram{CounterResetHint: histogram.NotCounterReset}}, + }, + }, + { + name: "ct+normal then OOO sample/float", + inputSamples: []appendableSample{ + {t: 60_000, ct: 40_000, v: 10, lbls: defLbls}, + {t: 120_000, ct: 40_000, v: 10, lbls: defLbls}, + {t: 180_000, ct: 40_000, v: 10, lbls: defLbls}, + {t: 50_000, ct: 40_000, v: 10, lbls: defLbls}, + }, + expectedSamples: []*walSample{ + {t: 40_000, f: 0, lbls: defLbls}, + {t: 50_000, f: 10, lbls: defLbls}, + {t: 60_000, f: 10, lbls: defLbls}, + {t: 120_000, f: 10, lbls: defLbls}, + {t: 180_000, f: 10, lbls: defLbls}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + reg := prometheus.NewRegistry() + opts := DefaultOptions() + opts.OutOfOrderTimeWindow = 360_000 + s := createTestAgentDB(t, reg, opts) + app := s.Appender(context.TODO()) + + for _, sample := range tc.inputSamples { + // We supposed to write a Histogram to the WAL + if sample.h != nil { + _, err := app.AppendHistogramCTZeroSample(0, sample.lbls, sample.t, sample.ct, zeroHistogram, nil) + if !errors.Is(err, storage.ErrOutOfOrderCT) { + require.Equal(t, sample.expectsError, err != nil, "expected error: %v, got: %v", sample.expectsError, err) + } + + _, err = app.AppendHistogram(0, sample.lbls, sample.t, sample.h, nil) + require.NoError(t, err) + } else { + // We supposed to write a float sample to the WAL + _, err := app.AppendCTZeroSample(0, sample.lbls, sample.t, sample.ct) + if !errors.Is(err, storage.ErrOutOfOrderCT) { + require.Equal(t, sample.expectsError, err != nil, "expected error: %v, got: %v", sample.expectsError, err) + } + + _, err = app.Append(0, sample.lbls, sample.t, sample.v) + require.NoError(t, err) + } + } + + require.NoError(t, app.Commit()) + // Close the DB to ensure all data is flushed to the WAL + require.NoError(t, s.Close()) + + // Check that we dont have any OOO samples in the WAL by checking metrics + families, err := reg.Gather() + require.NoError(t, err, "failed to gather metrics") + for _, f := range families { + if f.GetName() == "prometheus_agent_out_of_order_samples_total" { + t.Fatalf("unexpected metric %s", f.GetName()) + } + } + + outputSamples := readWALSamples(t, s.wal.Dir()) + + require.Equal(t, len(tc.expectedSamples), len(outputSamples), "Expected %d samples", len(tc.expectedSamples)) + + for i, expectedSample := range tc.expectedSamples { + for _, sample := range outputSamples { + if sample.t == expectedSample.t && sample.lbls.String() == expectedSample.lbls.String() { + if expectedSample.h != nil { + require.Equal(t, expectedSample.h, sample.h, "histogram value mismatch (sample index %d)", i) + } else { + require.Equal(t, expectedSample.f, sample.f, "value mismatch (sample index %d)", i) + } + } + } + } + }) + } +} + +func readWALSamples(t *testing.T, walDir string) []*walSample { + t.Helper() + sr, err := wlog.NewSegmentsReader(walDir) + require.NoError(t, err) + defer func(sr io.ReadCloser) { + err := sr.Close() + require.NoError(t, err) + }(sr) + + r := wlog.NewReader(sr) + dec := record.NewDecoder(labels.NewSymbolTable()) + + var ( + samples []record.RefSample + histograms []record.RefHistogramSample + + lastSeries record.RefSeries + outputSamples = make([]*walSample, 0) + ) + + for r.Next() { + rec := r.Record() + switch dec.Type(rec) { + case record.Series: + series, err := dec.Series(rec, nil) + require.NoError(t, err) + lastSeries = series[0] + case record.Samples: + samples, err = dec.Samples(rec, samples[:0]) + require.NoError(t, err) + for _, s := range samples { + outputSamples = append(outputSamples, &walSample{ + t: s.T, + f: s.V, + lbls: lastSeries.Labels.Copy(), + ref: storage.SeriesRef(lastSeries.Ref), + }) + } + case record.HistogramSamples: + histograms, err = dec.HistogramSamples(rec, histograms[:0]) + require.NoError(t, err) + for _, h := range histograms { + outputSamples = append(outputSamples, &walSample{ + t: h.T, + h: h.H, + lbls: lastSeries.Labels.Copy(), + ref: storage.SeriesRef(lastSeries.Ref), + }) + } + } + } + + return outputSamples +} + func BenchmarkCreateSeries(b *testing.B) { s := createTestAgentDB(b, nil, DefaultOptions()) defer s.Close() diff --git a/tsdb/head.go b/tsdb/head.go index 2963d781d0..c67c438e52 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -155,10 +155,6 @@ type HeadOptions struct { // OutOfOrderTimeWindow is > 0 EnableOOONativeHistograms atomic.Bool - // EnableCreatedTimestampZeroIngestion enables the ingestion of the created timestamp as a synthetic zero sample. - // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md - EnableCreatedTimestampZeroIngestion bool - ChunkRange int64 // ChunkDirRoot is the parent directory of the chunks directory. ChunkDirRoot string diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 170e740448..9c732990bf 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -474,9 +474,10 @@ func (a *headAppender) getOrCreate(lset labels.Labels) (s *memSeries, created bo return s, created, nil } -// appendable checks whether the given sample is valid for appending to the series. (if we return false and no error) -// The sample belongs to the out of order chunk if we return true and no error. -// An error signifies the sample cannot be handled. +// appendable checks whether the given sample is valid for appending to the series. +// If the sample is valid and in-order, it returns false with no error. +// If the sample belongs to the out-of-order chunk, it returns true with no error. +// If the sample cannot be handled, it returns an error. func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTimeWindow int64) (isOOO bool, oooDelta int64, err error) { // Check if we can append in the in-order chunk. if t >= minValidTime { From eb3b349024de77bb57499fddf3a7f55b449f95cf Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Mon, 28 Oct 2024 08:31:43 +0100 Subject: [PATCH 0825/1397] fix(nhcb): created timestamp fails when keeping classic histograms (#15218) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The wrong source was used to return the created timestamp, leading to index out of bound panic. One line fix. Refactor the requirement test to be generic and be able to test OpenMetrics and Prom parsers as well. There are some differencies in what the parsers support, the Prom parser doesn't have created timestamp. The protobuf parser uses different formatting to identify the metric for the scrape loop. Each parser represents the sample timestamp differently. Signed-off-by: György Krajcsovits --- model/textparse/nhcbparse.go | 2 +- model/textparse/nhcbparse_test.go | 303 +++++++++++++++++++----------- 2 files changed, 191 insertions(+), 114 deletions(-) diff --git a/model/textparse/nhcbparse.go b/model/textparse/nhcbparse.go index 79f5c892ac..d019c327c3 100644 --- a/model/textparse/nhcbparse.go +++ b/model/textparse/nhcbparse.go @@ -169,7 +169,7 @@ func (p *NHCBParser) CreatedTimestamp() *int64 { return p.parser.CreatedTimestamp() } case stateCollecting: - return p.parser.CreatedTimestamp() + return p.tempCT case stateEmitting: return p.ctNHCB } diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go index b97de0f7e6..6152a85038 100644 --- a/model/textparse/nhcbparse_test.go +++ b/model/textparse/nhcbparse_test.go @@ -524,9 +524,6 @@ something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000 // "classic" means the option "always_scrape_classic_histograms". // "nhcb" means the option "convert_classic_histograms_to_nhcb". // -// Currently only with the ProtoBuf parser that supports exponential -// histograms. -// // Case 1. Only classic histogram is exposed. // // | Scrape Config | Expect classic | Expect exponential | Expect NHCB |. @@ -550,7 +547,7 @@ something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000 // | classic=true, nhcb=false | NO | YES | NO |. // | classic=false, nhcb=true | NO | YES | NO |. // | classic=true, nhcb=true | NO | YES | NO |. -func TestNHCBParserProtoBufParser_NoNHCBWhenExponential(t *testing.T) { +func TestNHCBParser_NoNHCBWhenExponential(t *testing.T) { type requirement struct { expectClassic bool expectExponential bool @@ -581,134 +578,190 @@ func TestNHCBParserProtoBufParser_NoNHCBWhenExponential(t *testing.T) { }, } + // Create parser from keep classic option. + type parserFactory func(bool) Parser + type testCase struct { name string + parser parserFactory classic bool nhcb bool exp []parsedEntry } + type parserOptions struct { + useUTF8sep bool + hasCreatedTimeStamp bool + } + // Defines the parser name, the Parser factory and the test cases + // supported by the parser and parser options. + parsers := []func() (string, parserFactory, []int, parserOptions){ + func() (string, parserFactory, []int, parserOptions) { + factory := func(keepClassic bool) Parser { + inputBuf := createTestProtoBufHistogram(t) + return NewProtobufParser(inputBuf.Bytes(), keepClassic, labels.NewSymbolTable()) + } + return "ProtoBuf", factory, []int{1, 2, 3}, parserOptions{useUTF8sep: true, hasCreatedTimeStamp: true} + }, + func() (string, parserFactory, []int, parserOptions) { + factory := func(keepClassic bool) Parser { + input := createTestOpenMetricsHistogram() + return NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + } + return "OpenMetrics", factory, []int{1}, parserOptions{hasCreatedTimeStamp: true} + }, + func() (string, parserFactory, []int, parserOptions) { + factory := func(keepClassic bool) Parser { + input := createTestPromHistogram() + return NewPromParser([]byte(input), labels.NewSymbolTable()) + } + return "Prometheus", factory, []int{1}, parserOptions{} + }, + } + testCases := []testCase{} - for _, classic := range []bool{false, true} { - for _, nhcb := range []bool{false, true} { - tc := testCase{ - name: "classic=" + strconv.FormatBool(classic) + ", nhcb=" + strconv.FormatBool(nhcb), - classic: classic, - nhcb: nhcb, - exp: []parsedEntry{}, - } - for i, caseI := range cases { - req := caseI[tc.name] - metric := "test_histogram" + strconv.Itoa(i+1) - tc.exp = append(tc.exp, parsedEntry{ - m: metric, - help: "Test histogram " + strconv.Itoa(i+1), - }) - tc.exp = append(tc.exp, parsedEntry{ - m: metric, - typ: model.MetricTypeHistogram, - }) - if req.expectExponential { - // Always expect exponential histogram first. - exponentialSeries := []parsedEntry{ - { - m: metric, - shs: &histogram.Histogram{ - Schema: 3, - Count: 175, - Sum: 0.0008280461746287094, - ZeroThreshold: 2.938735877055719e-39, - ZeroCount: 2, - PositiveSpans: []histogram.Span{{Offset: -161, Length: 1}, {Offset: 8, Length: 3}}, - NegativeSpans: []histogram.Span{{Offset: -162, Length: 1}, {Offset: 23, Length: 4}}, - PositiveBuckets: []int64{1, 2, -1, -1}, - NegativeBuckets: []int64{1, 3, -2, -1, 1}, + for _, parser := range parsers { + for _, classic := range []bool{false, true} { + for _, nhcb := range []bool{false, true} { + parserName, parser, supportedCases, options := parser() + requirementName := "classic=" + strconv.FormatBool(classic) + ", nhcb=" + strconv.FormatBool(nhcb) + tc := testCase{ + name: "parser=" + parserName + ", " + requirementName, + parser: parser, + classic: classic, + nhcb: nhcb, + exp: []parsedEntry{}, + } + for _, caseNumber := range supportedCases { + caseI := cases[caseNumber-1] + req, ok := caseI[requirementName] + require.True(t, ok, "Case %d does not have requirement %s", caseNumber, requirementName) + metric := "test_histogram" + strconv.Itoa(caseNumber) + tc.exp = append(tc.exp, parsedEntry{ + m: metric, + help: "Test histogram " + strconv.Itoa(caseNumber), + }) + tc.exp = append(tc.exp, parsedEntry{ + m: metric, + typ: model.MetricTypeHistogram, + }) + + var ct *int64 + if options.hasCreatedTimeStamp { + ct = int64p(1000) + } + + var bucketForMetric func(string) string + if options.useUTF8sep { + bucketForMetric = func(s string) string { + return "_bucket\xffle\xff" + s + } + } else { + bucketForMetric = func(s string) string { + return "_bucket{le=\"" + s + "\"}" + } + } + + if req.expectExponential { + // Always expect exponential histogram first. + exponentialSeries := []parsedEntry{ + { + m: metric, + shs: &histogram.Histogram{ + Schema: 3, + Count: 175, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + ZeroCount: 2, + PositiveSpans: []histogram.Span{{Offset: -161, Length: 1}, {Offset: 8, Length: 3}}, + NegativeSpans: []histogram.Span{{Offset: -162, Length: 1}, {Offset: 23, Length: 4}}, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings("__name__", metric), + t: int64p(1234568), + ct: ct, }, - lset: labels.FromStrings("__name__", metric), - t: int64p(1234568), - ct: int64p(1000), - }, + } + tc.exp = append(tc.exp, exponentialSeries...) } - tc.exp = append(tc.exp, exponentialSeries...) - } - if req.expectClassic { - // Always expect classic histogram series after exponential. - classicSeries := []parsedEntry{ - { - m: metric + "_count", - v: 175, - lset: labels.FromStrings("__name__", metric+"_count"), - t: int64p(1234568), - ct: int64p(1000), - }, - { - m: metric + "_sum", - v: 0.0008280461746287094, - lset: labels.FromStrings("__name__", metric+"_sum"), - t: int64p(1234568), - ct: int64p(1000), - }, - { - m: metric + "_bucket\xffle\xff-0.0004899999999999998", - v: 2, - lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0004899999999999998"), - t: int64p(1234568), - ct: int64p(1000), - }, - { - m: metric + "_bucket\xffle\xff-0.0003899999999999998", - v: 4, - lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0003899999999999998"), - t: int64p(1234568), - ct: int64p(1000), - }, - { - m: metric + "_bucket\xffle\xff-0.0002899999999999998", - v: 16, - lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0002899999999999998"), - t: int64p(1234568), - ct: int64p(1000), - }, - { - m: metric + "_bucket\xffle\xff+Inf", - v: 175, - lset: labels.FromStrings("__name__", metric+"_bucket", "le", "+Inf"), - t: int64p(1234568), - ct: int64p(1000), - }, - } - tc.exp = append(tc.exp, classicSeries...) - } - if req.expectNHCB { - // Always expect NHCB series after classic. - nhcbSeries := []parsedEntry{ - { - m: metric + "{}", - shs: &histogram.Histogram{ - Schema: histogram.CustomBucketsSchema, - Count: 175, - Sum: 0.0008280461746287094, - PositiveSpans: []histogram.Span{{Length: 4}}, - PositiveBuckets: []int64{2, 0, 10, 147}, - CustomValues: []float64{-0.0004899999999999998, -0.0003899999999999998, -0.0002899999999999998}, + if req.expectClassic { + // Always expect classic histogram series after exponential. + classicSeries := []parsedEntry{ + { + m: metric + "_count", + v: 175, + lset: labels.FromStrings("__name__", metric+"_count"), + t: int64p(1234568), + ct: ct, }, - lset: labels.FromStrings("__name__", metric), - t: int64p(1234568), - ct: int64p(1000), - }, + { + m: metric + "_sum", + v: 0.0008280461746287094, + lset: labels.FromStrings("__name__", metric+"_sum"), + t: int64p(1234568), + ct: ct, + }, + { + m: metric + bucketForMetric("-0.0004899999999999998"), + v: 2, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0004899999999999998"), + t: int64p(1234568), + ct: ct, + }, + { + m: metric + bucketForMetric("-0.0003899999999999998"), + v: 4, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0003899999999999998"), + t: int64p(1234568), + ct: ct, + }, + { + m: metric + bucketForMetric("-0.0002899999999999998"), + v: 16, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "-0.0002899999999999998"), + t: int64p(1234568), + ct: ct, + }, + { + m: metric + bucketForMetric("+Inf"), + v: 175, + lset: labels.FromStrings("__name__", metric+"_bucket", "le", "+Inf"), + t: int64p(1234568), + ct: ct, + }, + } + tc.exp = append(tc.exp, classicSeries...) + } + if req.expectNHCB { + // Always expect NHCB series after classic. + nhcbSeries := []parsedEntry{ + { + m: metric + "{}", + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 175, + Sum: 0.0008280461746287094, + PositiveSpans: []histogram.Span{{Length: 4}}, + PositiveBuckets: []int64{2, 0, 10, 147}, + CustomValues: []float64{-0.0004899999999999998, -0.0003899999999999998, -0.0002899999999999998}, + }, + lset: labels.FromStrings("__name__", metric), + t: int64p(1234568), + ct: ct, + }, + } + tc.exp = append(tc.exp, nhcbSeries...) } - tc.exp = append(tc.exp, nhcbSeries...) } + testCases = append(testCases, tc) } - testCases = append(testCases, tc) } } - inputBuf := createTestProtoBufHistogram(t) - for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - p := NewProtobufParser(inputBuf.Bytes(), tc.classic, labels.NewSymbolTable()) + p := tc.parser(tc.classic) if tc.nhcb { p = NewNHCBParser(p, labels.NewSymbolTable(), tc.classic) } @@ -860,3 +913,27 @@ metric: < return buf } + +func createTestOpenMetricsHistogram() string { + return `# HELP test_histogram1 Test histogram 1 +# TYPE test_histogram1 histogram +test_histogram1_count 175 1234.568 +test_histogram1_sum 0.0008280461746287094 1234.568 +test_histogram1_bucket{le="-0.0004899999999999998"} 2 1234.568 +test_histogram1_bucket{le="-0.0003899999999999998"} 4 1234.568 +test_histogram1_bucket{le="-0.0002899999999999998"} 16 1234.568 +test_histogram1_bucket{le="+Inf"} 175 1234.568 +test_histogram1_created 1 +# EOF` +} + +func createTestPromHistogram() string { + return `# HELP test_histogram1 Test histogram 1 +# TYPE test_histogram1 histogram +test_histogram1_count 175 1234568 +test_histogram1_sum 0.0008280461746287094 1234768 +test_histogram1_bucket{le="-0.0004899999999999998"} 2 1234568 +test_histogram1_bucket{le="-0.0003899999999999998"} 4 1234568 +test_histogram1_bucket{le="-0.0002899999999999998"} 16 1234568 +test_histogram1_bucket{le="+Inf"} 175 1234568` +} From eafe72a0d0d2525715027b52705d89a9b68b37a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Fri, 25 Oct 2024 09:42:46 +0200 Subject: [PATCH 0826/1397] perf(nhcb): optimize away most allocations in convertnhcb MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In general aim for the happy case when the exposer lists the buckets in ascending order. Use Compact(2) to compact the result of nhcb convert. This is more in line with how client_golang optimizes spans vs buckets. https://github.com/prometheus/client_golang/blob/aef8aedb4b6e1fb8ac1c90790645169125594096/prometheus/histogram.go#L1485 Signed-off-by: György Krajcsovits --- model/textparse/nhcbparse.go | 60 +++--- model/textparse/nhcbparse_test.go | 49 ++++- promql/promqltest/test.go | 28 ++- util/convertnhcb/convertnhcb.go | 286 +++++++++++++++++---------- util/convertnhcb/convertnhcb_test.go | 189 ++++++++++++++++++ 5 files changed, 461 insertions(+), 151 deletions(-) create mode 100644 util/convertnhcb/convertnhcb_test.go diff --git a/model/textparse/nhcbparse.go b/model/textparse/nhcbparse.go index d019c327c3..6fe2e8e54e 100644 --- a/model/textparse/nhcbparse.go +++ b/model/textparse/nhcbparse.go @@ -283,18 +283,18 @@ func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool { le, err := strconv.ParseFloat(lset.Get(labels.BucketLabel), 64) if err == nil && !math.IsNaN(le) { p.processClassicHistogramSeries(lset, "_bucket", func(hist *convertnhcb.TempHistogram) { - hist.BucketCounts[le] = p.value + _ = hist.SetBucketCount(le, p.value) }) return true } case strings.HasSuffix(mName, "_count"): p.processClassicHistogramSeries(lset, "_count", func(hist *convertnhcb.TempHistogram) { - hist.Count = p.value + _ = hist.SetCount(p.value) }) return true case strings.HasSuffix(mName, "_sum"): p.processClassicHistogramSeries(lset, "_sum", func(hist *convertnhcb.TempHistogram) { - hist.Sum = p.value + _ = hist.SetSum(p.value) }) return true } @@ -306,8 +306,8 @@ func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, suffix st p.storeClassicLabels() p.tempCT = p.parser.CreatedTimestamp() p.state = stateCollecting + p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, suffix) } - p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, suffix) p.storeExemplars() updateHist(&p.tempNHCB) } @@ -335,7 +335,6 @@ func (p *NHCBParser) nextExemplarPtr() *exemplar.Exemplar { func (p *NHCBParser) swapExemplars() { p.exemplars = p.tempExemplars[:p.tempExemplarCount] p.tempExemplars = p.tempExemplars[:0] - p.tempExemplarCount = 0 } // processNHCB converts the collated classic histogram series to NHCB and caches the info @@ -344,33 +343,32 @@ func (p *NHCBParser) processNHCB() bool { if p.state != stateCollecting { return false } - ub := make([]float64, 0, len(p.tempNHCB.BucketCounts)) - for b := range p.tempNHCB.BucketCounts { - ub = append(ub, b) - } - upperBounds, hBase := convertnhcb.ProcessUpperBoundsAndCreateBaseHistogram(ub, false) - fhBase := hBase.ToFloat(nil) - h, fh := convertnhcb.NewHistogram(p.tempNHCB, upperBounds, hBase, fhBase) - if h != nil { - if err := h.Validate(); err != nil { - return false + h, fh, err := p.tempNHCB.Convert() + if err == nil { + if h != nil { + if err := h.Validate(); err != nil { + return false + } + p.hNHCB = h + p.fhNHCB = nil + } else if fh != nil { + if err := fh.Validate(); err != nil { + return false + } + p.hNHCB = nil + p.fhNHCB = fh } - p.hNHCB = h - p.fhNHCB = nil - } else if fh != nil { - if err := fh.Validate(); err != nil { - return false - } - p.hNHCB = nil - p.fhNHCB = fh + p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + strings.ReplaceAll(p.tempLsetNHCB.DropMetricName().String(), ", ", ",") + p.bytesNHCB = []byte(p.metricStringNHCB) + p.lsetNHCB = p.tempLsetNHCB + p.swapExemplars() + p.ctNHCB = p.tempCT + p.state = stateEmitting + } else { + p.state = stateStart } - p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + strings.ReplaceAll(p.tempLsetNHCB.DropMetricName().String(), ", ", ",") - p.bytesNHCB = []byte(p.metricStringNHCB) - p.lsetNHCB = p.tempLsetNHCB - p.swapExemplars() - p.ctNHCB = p.tempCT - p.tempNHCB = convertnhcb.NewTempHistogram() - p.state = stateEmitting + p.tempNHCB.Reset() + p.tempExemplarCount = 0 p.tempCT = nil - return true + return err == nil } diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go index 6152a85038..859bcc1cb7 100644 --- a/model/textparse/nhcbparse_test.go +++ b/model/textparse/nhcbparse_test.go @@ -500,8 +500,8 @@ something_bucket{a="b",le="+Inf"} 9 # {id="something-test"} 2e100 123.000 Schema: histogram.CustomBucketsSchema, Count: 9, Sum: 42123.0, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}, {Offset: 1, Length: 1}}, - PositiveBuckets: []int64{8, -7}, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 3}}, + PositiveBuckets: []int64{8, -8, 1}, CustomValues: []float64{0.0, 1.0}, // We do not store the +Inf boundary. }, lset: labels.FromStrings("__name__", "something", "a", "b"), @@ -937,3 +937,48 @@ test_histogram1_bucket{le="-0.0003899999999999998"} 4 1234568 test_histogram1_bucket{le="-0.0002899999999999998"} 16 1234568 test_histogram1_bucket{le="+Inf"} 175 1234568` } + +func TestNHCBParserErrorHandling(t *testing.T) { + input := `# HELP something Histogram with non cumulative buckets +# TYPE something histogram +something_count 18 +something_sum 324789.4 +something_created 1520430001 +something_bucket{le="0.0"} 18 +something_bucket{le="+Inf"} 1 +something_count{a="b"} 9 +something_sum{a="b"} 42123 +something_created{a="b"} 1520430002 +something_bucket{a="b",le="0.0"} 1 +something_bucket{a="b",le="+Inf"} 9 +# EOF` + exp := []parsedEntry{ + { + m: "something", + help: "Histogram with non cumulative buckets", + }, + { + m: "something", + typ: model.MetricTypeHistogram, + }, + // The parser should skip the series with non-cumulative buckets. + { + m: `something{a="b"}`, + shs: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 9, + Sum: 42123.0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []int64{1, 7}, + CustomValues: []float64{0.0}, // We do not store the +Inf boundary. + }, + lset: labels.FromStrings("__name__", "something", "a", "b"), + ct: int64p(1520430002000), + }, + } + + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + p = NewNHCBParser(p, labels.NewSymbolTable(), false) + got := testParse(t, p) + requireEntries(t, exp, got) +} diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index e078bcb60b..fe140a0f1d 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -482,18 +482,16 @@ func (cmd *loadCmd) append(a storage.Appender) error { type tempHistogramWrapper struct { metric labels.Labels - upperBounds []float64 histogramByTs map[int64]convertnhcb.TempHistogram } func newTempHistogramWrapper() tempHistogramWrapper { return tempHistogramWrapper{ - upperBounds: []float64{}, histogramByTs: map[int64]convertnhcb.TempHistogram{}, } } -func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogramWrapper func(*tempHistogramWrapper), updateHistogram func(*convertnhcb.TempHistogram, float64)) { +func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogram func(*convertnhcb.TempHistogram, float64)) { m2 := convertnhcb.GetHistogramMetricBase(m, suffix) m2hash := m2.Hash() histogramWrapper, exists := histogramMap[m2hash] @@ -501,9 +499,6 @@ func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap histogramWrapper = newTempHistogramWrapper() } histogramWrapper.metric = m2 - if updateHistogramWrapper != nil { - updateHistogramWrapper(&histogramWrapper) - } for _, s := range smpls { if s.H != nil { continue @@ -534,18 +529,16 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { if err != nil || math.IsNaN(le) { continue } - processClassicHistogramSeries(m, "_bucket", histogramMap, smpls, func(histogramWrapper *tempHistogramWrapper) { - histogramWrapper.upperBounds = append(histogramWrapper.upperBounds, le) - }, func(histogram *convertnhcb.TempHistogram, f float64) { - histogram.BucketCounts[le] = f + processClassicHistogramSeries(m, "_bucket", histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { + _ = histogram.SetBucketCount(le, f) }) case strings.HasSuffix(mName, "_count"): - processClassicHistogramSeries(m, "_count", histogramMap, smpls, nil, func(histogram *convertnhcb.TempHistogram, f float64) { - histogram.Count = f + processClassicHistogramSeries(m, "_count", histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { + _ = histogram.SetCount(f) }) case strings.HasSuffix(mName, "_sum"): - processClassicHistogramSeries(m, "_sum", histogramMap, smpls, nil, func(histogram *convertnhcb.TempHistogram, f float64) { - histogram.Sum = f + processClassicHistogramSeries(m, "_sum", histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { + _ = histogram.SetSum(f) }) } } @@ -553,11 +546,12 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { // Convert the collated classic histogram data into native histograms // with custom bounds and append them to the storage. for _, histogramWrapper := range histogramMap { - upperBounds, hBase := convertnhcb.ProcessUpperBoundsAndCreateBaseHistogram(histogramWrapper.upperBounds, true) - fhBase := hBase.ToFloat(nil) samples := make([]promql.Sample, 0, len(histogramWrapper.histogramByTs)) for t, histogram := range histogramWrapper.histogramByTs { - h, fh := convertnhcb.NewHistogram(histogram, upperBounds, hBase, fhBase) + h, fh, err := histogram.Convert() + if err != nil { + return err + } if fh == nil { if err := h.Validate(); err != nil { return err diff --git a/util/convertnhcb/convertnhcb.go b/util/convertnhcb/convertnhcb.go index 5e08422aa0..ee5bcb72de 100644 --- a/util/convertnhcb/convertnhcb.go +++ b/util/convertnhcb/convertnhcb.go @@ -14,6 +14,7 @@ package convertnhcb import ( + "errors" "fmt" "math" "sort" @@ -23,129 +24,212 @@ import ( "github.com/prometheus/prometheus/model/labels" ) +var ( + errNegativeBucketCount = errors.New("bucket count must be non-negative") + errNegativeCount = errors.New("count must be non-negative") + errCountMismatch = errors.New("count mismatch") + errCountNotCumulative = errors.New("count is not cumulative") +) + +type tempHistogramBucket struct { + le float64 + count float64 +} + // TempHistogram is used to collect information about classic histogram // samples incrementally before creating a histogram.Histogram or // histogram.FloatHistogram based on the values collected. type TempHistogram struct { - BucketCounts map[float64]float64 - Count float64 - Sum float64 - HasFloat bool + buckets []tempHistogramBucket + count float64 + sum float64 + err error + hasCount bool } // NewTempHistogram creates a new TempHistogram to // collect information about classic histogram samples. func NewTempHistogram() TempHistogram { return TempHistogram{ - BucketCounts: map[float64]float64{}, + buckets: make([]tempHistogramBucket, 0, 10), } } -func (h TempHistogram) getIntBucketCounts() (map[float64]int64, error) { - bucketCounts := map[float64]int64{} - for le, count := range h.BucketCounts { - intCount := int64(math.Round(count)) - if float64(intCount) != count { - return nil, fmt.Errorf("bucket count %f for le %g is not an integer", count, le) +func (h TempHistogram) Err() error { + return h.err +} + +func (h *TempHistogram) Reset() { + h.buckets = h.buckets[:0] + h.count = 0 + h.sum = 0 + h.err = nil + h.hasCount = false +} + +func (h *TempHistogram) SetBucketCount(boundary, count float64) error { + if h.err != nil { + return h.err + } + if count < 0 { + h.err = fmt.Errorf("%w: le=%g, count=%g", errNegativeBucketCount, boundary, count) + return h.err + } + // Assume that the elements are added in order. + switch { + case len(h.buckets) == 0: + h.buckets = append(h.buckets, tempHistogramBucket{le: boundary, count: count}) + case h.buckets[len(h.buckets)-1].le < boundary: + // Happy case is "<". + if count < h.buckets[len(h.buckets)-1].count { + h.err = fmt.Errorf("%w: %g < %g", errCountNotCumulative, count, h.buckets[len(h.buckets)-1].count) + return h.err } - bucketCounts[le] = intCount - } - return bucketCounts, nil -} - -// ProcessUpperBoundsAndCreateBaseHistogram prepares an integer native -// histogram with custom buckets based on the provided upper bounds. -// Everything is set except the bucket counts. -// The sorted upper bounds are also returned. -func ProcessUpperBoundsAndCreateBaseHistogram(upperBounds0 []float64, needsDedup bool) ([]float64, *histogram.Histogram) { - sort.Float64s(upperBounds0) - var upperBounds []float64 - if needsDedup { - upperBounds = make([]float64, 0, len(upperBounds0)) - prevLE := math.Inf(-1) - for _, le := range upperBounds0 { - if le != prevLE { - upperBounds = append(upperBounds, le) - prevLE = le - } + h.buckets = append(h.buckets, tempHistogramBucket{le: boundary, count: count}) + case h.buckets[len(h.buckets)-1].le == boundary: + // Ignore this, as it is a duplicate sample. + default: + // Find the correct position to insert. + i := sort.Search(len(h.buckets), func(i int) bool { + return h.buckets[i].le >= boundary + }) + if h.buckets[i].le == boundary { + // Ignore this, as it is a duplicate sample. + return nil } - } else { - upperBounds = upperBounds0 - } - var customBounds []float64 - if upperBounds[len(upperBounds)-1] == math.Inf(1) { - customBounds = upperBounds[:len(upperBounds)-1] - } else { - customBounds = upperBounds - } - return upperBounds, &histogram.Histogram{ - Count: 0, - Sum: 0, - Schema: histogram.CustomBucketsSchema, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: uint32(len(upperBounds))}, - }, - PositiveBuckets: make([]int64, len(upperBounds)), - CustomValues: customBounds, - } -} - -// NewHistogram fills the bucket counts in the provided histogram.Histogram -// or histogram.FloatHistogram based on the provided temporary histogram and -// upper bounds. -func NewHistogram(histogram TempHistogram, upperBounds []float64, hBase *histogram.Histogram, fhBase *histogram.FloatHistogram) (*histogram.Histogram, *histogram.FloatHistogram) { - intBucketCounts, err := histogram.getIntBucketCounts() - if err != nil { - return nil, newFloatHistogram(histogram, upperBounds, histogram.BucketCounts, fhBase) - } - return newIntegerHistogram(histogram, upperBounds, intBucketCounts, hBase), nil -} - -func newIntegerHistogram(histogram TempHistogram, upperBounds []float64, bucketCounts map[float64]int64, hBase *histogram.Histogram) *histogram.Histogram { - h := hBase.Copy() - absBucketCounts := make([]int64, len(h.PositiveBuckets)) - var prevCount, total int64 - for i, le := range upperBounds { - currCount, exists := bucketCounts[le] - if !exists { - currCount = 0 + if i > 0 && count < h.buckets[i-1].count { + h.err = fmt.Errorf("%w: %g < %g", errCountNotCumulative, count, h.buckets[i-1].count) + return h.err } - count := currCount - prevCount - absBucketCounts[i] = count - total += count - prevCount = currCount + if count > h.buckets[i].count { + h.err = fmt.Errorf("%w: %g > %g", errCountNotCumulative, count, h.buckets[i].count) + return h.err + } + // Insert at the correct position unless duplicate. + h.buckets = append(h.buckets, tempHistogramBucket{}) + copy(h.buckets[i+1:], h.buckets[i:]) + h.buckets[i] = tempHistogramBucket{le: boundary, count: count} } - h.PositiveBuckets[0] = absBucketCounts[0] - for i := 1; i < len(h.PositiveBuckets); i++ { - h.PositiveBuckets[i] = absBucketCounts[i] - absBucketCounts[i-1] - } - h.Sum = histogram.Sum - if histogram.Count != 0 { - total = int64(histogram.Count) - } - h.Count = uint64(total) - return h.Compact(0) + return nil } -func newFloatHistogram(histogram TempHistogram, upperBounds []float64, bucketCounts map[float64]float64, fhBase *histogram.FloatHistogram) *histogram.FloatHistogram { - fh := fhBase.Copy() - var prevCount, total float64 - for i, le := range upperBounds { - currCount, exists := bucketCounts[le] - if !exists { - currCount = 0 +func (h *TempHistogram) SetCount(count float64) error { + if h.err != nil { + return h.err + } + if count < 0 { + h.err = fmt.Errorf("%w: count=%g", errNegativeCount, count) + return h.err + } + h.count = count + h.hasCount = true + return nil +} + +func (h *TempHistogram) SetSum(sum float64) error { + if h.err != nil { + return h.err + } + h.sum = sum + return nil +} + +func (h TempHistogram) Convert() (*histogram.Histogram, *histogram.FloatHistogram, error) { + if h.err != nil { + return nil, nil, h.err + } + + if len(h.buckets) == 0 || h.buckets[len(h.buckets)-1].le != math.Inf(1) { + // No +Inf bucket. + if !h.hasCount && len(h.buckets) > 0 { + // No count either, so set count to the last known bucket's count. + h.count = h.buckets[len(h.buckets)-1].count } - count := currCount - prevCount - fh.PositiveBuckets[i] = count - total += count - prevCount = currCount + // Let the last bucket be +Inf with the overall count. + h.buckets = append(h.buckets, tempHistogramBucket{le: math.Inf(1), count: h.count}) } - fh.Sum = histogram.Sum - if histogram.Count != 0 { - total = histogram.Count + + if !h.hasCount { + h.count = h.buckets[len(h.buckets)-1].count + h.hasCount = true } - fh.Count = total - return fh.Compact(0) + + for _, b := range h.buckets { + intCount := int64(math.Round(b.count)) + if b.count != float64(intCount) { + return h.convertToFloatHistogram() + } + } + + intCount := uint64(math.Round(h.count)) + if h.count != float64(intCount) { + return h.convertToFloatHistogram() + } + return h.convertToIntegerHistogram(intCount) +} + +func (h TempHistogram) convertToIntegerHistogram(count uint64) (*histogram.Histogram, *histogram.FloatHistogram, error) { + rh := &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: count, + Sum: h.sum, + PositiveSpans: []histogram.Span{{Length: uint32(len(h.buckets))}}, + PositiveBuckets: make([]int64, len(h.buckets)), + } + + if len(h.buckets) > 1 { + rh.CustomValues = make([]float64, len(h.buckets)-1) // Not storing the last +Inf bucket. + } + + prevCount := int64(0) + prevDelta := int64(0) + for i, b := range h.buckets { + // delta is the actual bucket count as the input is cumulative. + delta := int64(b.count) - prevCount + rh.PositiveBuckets[i] = delta - prevDelta + prevCount = int64(b.count) + prevDelta = delta + if b.le != math.Inf(1) { + rh.CustomValues[i] = b.le + } + } + + if count != uint64(h.buckets[len(h.buckets)-1].count) { + h.err = fmt.Errorf("%w: count=%d != le=%g count=%g", errCountMismatch, count, h.buckets[len(h.buckets)-1].le, h.buckets[len(h.buckets)-1].count) + return nil, nil, h.err + } + + return rh.Compact(2), nil, nil +} + +func (h TempHistogram) convertToFloatHistogram() (*histogram.Histogram, *histogram.FloatHistogram, error) { + rh := &histogram.FloatHistogram{ + Schema: histogram.CustomBucketsSchema, + Count: h.count, + Sum: h.sum, + PositiveSpans: []histogram.Span{{Length: uint32(len(h.buckets))}}, + PositiveBuckets: make([]float64, len(h.buckets)), + } + + if len(h.buckets) > 1 { + rh.CustomValues = make([]float64, len(h.buckets)-1) // Not storing the last +Inf bucket. + } + + prevCount := 0.0 + for i, b := range h.buckets { + rh.PositiveBuckets[i] = b.count - prevCount + prevCount = b.count + if b.le != math.Inf(1) { + rh.CustomValues[i] = b.le + } + } + + if h.count != h.buckets[len(h.buckets)-1].count { + h.err = fmt.Errorf("%w: count=%g != le=%g count=%g", errCountMismatch, h.count, h.buckets[len(h.buckets)-1].le, h.buckets[len(h.buckets)-1].count) + return nil, nil, h.err + } + + return nil, rh.Compact(0), nil } func GetHistogramMetricBase(m labels.Labels, suffix string) labels.Labels { diff --git a/util/convertnhcb/convertnhcb_test.go b/util/convertnhcb/convertnhcb_test.go new file mode 100644 index 0000000000..7486ac18bb --- /dev/null +++ b/util/convertnhcb/convertnhcb_test.go @@ -0,0 +1,189 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convertnhcb + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/histogram" +) + +func TestNHCBConvert(t *testing.T) { + tests := map[string]struct { + setup func() *TempHistogram + expectedErr error + expectedH *histogram.Histogram + expectedFH *histogram.FloatHistogram + }{ + "empty": { + setup: func() *TempHistogram { + h := NewTempHistogram() + return &h + }, + expectedH: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + PositiveSpans: []histogram.Span{}, + PositiveBuckets: []int64{}, + }, + }, + "sum only": { + setup: func() *TempHistogram { + h := NewTempHistogram() + h.SetSum(1000.25) + return &h + }, + expectedH: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Sum: 1000.25, + PositiveSpans: []histogram.Span{}, + PositiveBuckets: []int64{}, + }, + }, + "single integer bucket": { + setup: func() *TempHistogram { + h := NewTempHistogram() + h.SetSum(1000.25) + h.SetBucketCount(0.5, 1000) + return &h + }, + expectedH: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 1000, + Sum: 1000.25, + PositiveSpans: []histogram.Span{{Length: 1}}, + PositiveBuckets: []int64{1000}, + CustomValues: []float64{0.5}, + }, + }, + "single float bucket": { + setup: func() *TempHistogram { + h := NewTempHistogram() + h.SetSum(1000.25) + h.SetBucketCount(0.5, 1337.42) + return &h + }, + expectedFH: &histogram.FloatHistogram{ + Schema: histogram.CustomBucketsSchema, + Count: 1337.42, + Sum: 1000.25, + PositiveSpans: []histogram.Span{{Length: 1}}, + PositiveBuckets: []float64{1337.42}, + CustomValues: []float64{0.5}, + }, + }, + "happy case integer bucket": { + setup: func() *TempHistogram { + h := NewTempHistogram() + h.SetCount(1000) + h.SetSum(1000.25) + h.SetBucketCount(0.5, 50) + h.SetBucketCount(1.0, 950) + h.SetBucketCount(math.Inf(1), 1000) + return &h + }, + expectedH: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 1000, + Sum: 1000.25, + PositiveSpans: []histogram.Span{{Length: 3}}, + PositiveBuckets: []int64{50, 850, -850}, + CustomValues: []float64{0.5, 1.0}, + }, + }, + "happy case float bucket": { + setup: func() *TempHistogram { + h := NewTempHistogram() + h.SetCount(1000) + h.SetSum(1000.25) + h.SetBucketCount(0.5, 50) + h.SetBucketCount(1.0, 950.5) + h.SetBucketCount(math.Inf(1), 1000) + return &h + }, + expectedFH: &histogram.FloatHistogram{ + Schema: histogram.CustomBucketsSchema, + Count: 1000, + Sum: 1000.25, + PositiveSpans: []histogram.Span{{Length: 3}}, + PositiveBuckets: []float64{50, 900.5, 49.5}, + CustomValues: []float64{0.5, 1.0}, + }, + }, + "non cumulative bucket": { + setup: func() *TempHistogram { + h := NewTempHistogram() + h.SetCount(1000) + h.SetSum(1000.25) + h.SetBucketCount(0.5, 50) + h.SetBucketCount(1.0, 950) + h.SetBucketCount(math.Inf(1), 900) + return &h + }, + expectedErr: errCountNotCumulative, + }, + "negative count": { + setup: func() *TempHistogram { + h := NewTempHistogram() + h.SetCount(-1000) + h.SetSum(1000.25) + h.SetBucketCount(0.5, 50) + h.SetBucketCount(1.0, 950) + h.SetBucketCount(math.Inf(1), 900) + return &h + }, + expectedErr: errNegativeCount, + }, + "mixed order": { + setup: func() *TempHistogram { + h := NewTempHistogram() + h.SetBucketCount(0.5, 50) + h.SetBucketCount(math.Inf(1), 1000) + h.SetBucketCount(1.0, 950) + h.SetCount(1000) + h.SetSum(1000.25) + return &h + }, + expectedH: &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: 1000, + Sum: 1000.25, + PositiveSpans: []histogram.Span{{Length: 3}}, + PositiveBuckets: []int64{50, 850, -850}, + CustomValues: []float64{0.5, 1.0}, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + th := test.setup() + h, fh, err := th.Convert() + if test.expectedErr != nil { + require.ErrorIs(t, err, test.expectedErr) + return + } + require.Equal(t, test.expectedH, h) + if h != nil { + require.NoError(t, h.Validate()) + } + require.Equal(t, test.expectedFH, fh) + if fh != nil { + require.NoError(t, fh.Validate()) + } + }) + } +} From 685d6d169f3d3723f72a7d0d4481bded09a614ec Mon Sep 17 00:00:00 2001 From: 3Juhwan <13selfesteem91@naver.com> Date: Sun, 27 Oct 2024 17:43:33 +0900 Subject: [PATCH 0827/1397] refactor: reorder fields in defaultSDConfig initialization for consistency Signed-off-by: 3Juhwan <13selfesteem91@naver.com> --- discovery/http/http.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/discovery/http/http.go b/discovery/http/http.go index 004a5b4ae6..65404694c4 100644 --- a/discovery/http/http.go +++ b/discovery/http/http.go @@ -41,8 +41,8 @@ import ( var ( // DefaultSDConfig is the default HTTP SD configuration. DefaultSDConfig = SDConfig{ - RefreshInterval: model.Duration(60 * time.Second), HTTPClientConfig: config.DefaultHTTPClientConfig, + RefreshInterval: model.Duration(60 * time.Second), } userAgent = fmt.Sprintf("Prometheus/%s", version.Version) matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`) From e452308e3768825e2f22dfae1e9b9bb80b8c48f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Mon, 28 Oct 2024 12:09:23 +0200 Subject: [PATCH 0828/1397] discovery/kubernetes: optimize resolvePodRef MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit resolvePodRef is in a hot path: ``` ROUTINE ======================== github.com/prometheus/prometheus/discovery/kubernetes.(*Endpoints).resolvePodRef in discovery/kubernetes/endpoints.go 2.50TB 2.66TB (flat, cum) 22.28% of Total . . 447:func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { . . 448: if ref == nil || ref.Kind != "Pod" { . . 449: return nil . . 450: } 2.50TB 2.50TB 451: p := &apiv1.Pod{} . . 452: p.Namespace = ref.Namespace . . 453: p.Name = ref.Name . . 454: . 156.31GB 455: obj, exists, err := e.podStore.Get(p) . . 456: if err != nil { . . 457: level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err) . . 458: return nil . . 459: } . . 460: if !exists { ``` This is some low hanging fruit that we can easily optimize. The key of an object has format "namespace/name" so generate that inside of Prometheus itself and use pooling. ``` goos: linux goarch: amd64 pkg: github.com/prometheus/prometheus/discovery/kubernetes cpu: Intel(R) Core(TM) i9-10885H CPU @ 2.40GHz │ olddisc │ newdisc │ │ sec/op │ sec/op vs base │ ResolvePodRef-16 516.3n ± 17% 289.5n ± 7% -43.92% (p=0.000 n=10) │ olddisc │ newdisc │ │ B/op │ B/op vs base │ ResolvePodRef-16 1168.00 ± 0% 24.00 ± 0% -97.95% (p=0.000 n=10) │ olddisc │ newdisc │ │ allocs/op │ allocs/op vs base │ ResolvePodRef-16 2.000 ± 0% 2.000 ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal ``` Signed-off-by: Giedrius Statkevičius --- discovery/kubernetes/endpoints.go | 32 +++++++++++++++++++++++++- discovery/kubernetes/endpoints_test.go | 21 +++++++++++++++++ 2 files changed, 52 insertions(+), 1 deletion(-) diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 5ba9df6276..934f37ee45 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -20,6 +20,8 @@ import ( "log/slog" "net" "strconv" + "strings" + "sync" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -453,15 +455,43 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { return tg } +var objKeyPool = sync.Pool{} + +func generateObjKey(namespace, name string) (string, *strings.Builder) { + var sb *strings.Builder + + b := objKeyPool.Get() + if b == nil { + sb = &strings.Builder{} + } else { + sb = b.(*strings.Builder) + } + + sb.Reset() + if namespace == "" { + _, _ = sb.WriteString(name) + return sb.String(), sb + } + + _, _ = sb.WriteString(namespace) + _, _ = sb.WriteRune('/') + _, _ = sb.WriteString(name) + return sb.String(), sb +} + func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { if ref == nil || ref.Kind != "Pod" { return nil } + p := &apiv1.Pod{} p.Namespace = ref.Namespace p.Name = ref.Name - obj, exists, err := e.podStore.Get(p) + key, sb := generateObjKey(p.Namespace, p.Name) + defer objKeyPool.Put(sb) + + obj, exists, err := e.podStore.GetByKey(key) if err != nil { e.logger.Error("resolving pod ref failed", "err", err) return nil diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index 4af6889602..a1ac6e5d48 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -18,10 +18,12 @@ import ( "testing" "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -1257,3 +1259,22 @@ func TestEndpointsDiscoverySidecarContainer(t *testing.T) { }, }.Run(t) } + +func BenchmarkResolvePodRef(b *testing.B) { + indexer := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, nil) + e := &Endpoints{ + podStore: indexer, + } + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + p := e.resolvePodRef(&v1.ObjectReference{ + Kind: "Pod", + Name: "testpod", + Namespace: "foo", + }) + require.Nil(b, p) + } +} From 716fd5b11f6f6f414bbc602b9724b028140006f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Mon, 28 Oct 2024 16:19:56 +0200 Subject: [PATCH 0829/1397] discovery/kubernetes: use namespacedName MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Giedrius Statkevičius --- discovery/kubernetes/endpoints.go | 48 +++---------------------------- 1 file changed, 4 insertions(+), 44 deletions(-) diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 934f37ee45..14d3bc7a99 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -20,8 +20,6 @@ import ( "log/slog" "net" "strconv" - "strings" - "sync" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -104,10 +102,7 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node return } - ep := &apiv1.Endpoints{} - ep.Namespace = svc.Namespace - ep.Name = svc.Name - obj, exists, err := e.endpointsStore.Get(ep) + obj, exists, err := e.endpointsStore.GetByKey(namespacedName(svc.Namespace, svc.Name)) if exists && err == nil { e.enqueue(obj.(*apiv1.Endpoints)) } @@ -455,43 +450,12 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { return tg } -var objKeyPool = sync.Pool{} - -func generateObjKey(namespace, name string) (string, *strings.Builder) { - var sb *strings.Builder - - b := objKeyPool.Get() - if b == nil { - sb = &strings.Builder{} - } else { - sb = b.(*strings.Builder) - } - - sb.Reset() - if namespace == "" { - _, _ = sb.WriteString(name) - return sb.String(), sb - } - - _, _ = sb.WriteString(namespace) - _, _ = sb.WriteRune('/') - _, _ = sb.WriteString(name) - return sb.String(), sb -} - func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { if ref == nil || ref.Kind != "Pod" { return nil } - p := &apiv1.Pod{} - p.Namespace = ref.Namespace - p.Name = ref.Name - - key, sb := generateObjKey(p.Namespace, p.Name) - defer objKeyPool.Put(sb) - - obj, exists, err := e.podStore.GetByKey(key) + obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name)) if err != nil { e.logger.Error("resolving pod ref failed", "err", err) return nil @@ -503,11 +467,7 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { } func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) { - svc := &apiv1.Service{} - svc.Namespace = ns - svc.Name = name - - obj, exists, err := e.serviceStore.Get(svc) + obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name)) if err != nil { e.logger.Error("retrieving service failed", "err", err) return @@ -515,7 +475,7 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) { if !exists { return } - svc = obj.(*apiv1.Service) + svc := obj.(*apiv1.Service) tg.Labels = tg.Labels.Merge(serviceLabels(svc)) } From 58fedb6b61b7a6918226d8c791be538aa4cc02ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Mon, 28 Oct 2024 17:16:37 +0200 Subject: [PATCH 0830/1397] discovery/kubernetes: optimize more gets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Giedrius Statkevičius --- discovery/kubernetes/endpointslice.go | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 8f58ba3535..45bc43eff9 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -467,11 +467,8 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { if ref == nil || ref.Kind != "Pod" { return nil } - p := &apiv1.Pod{} - p.Namespace = ref.Namespace - p.Name = ref.Name - obj, exists, err := e.podStore.Get(p) + obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name)) if err != nil { e.logger.Error("resolving pod ref failed", "err", err) return nil @@ -484,19 +481,19 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgroup.Group) { var ( - svc = &apiv1.Service{} found bool + name string ) - svc.Namespace = esa.namespace() + ns := esa.namespace() // Every EndpointSlice object has the Service they belong to in the // kubernetes.io/service-name label. - svc.Name, found = esa.labels()[esa.labelServiceName()] + name, found = esa.labels()[esa.labelServiceName()] if !found { return } - obj, exists, err := e.serviceStore.Get(svc) + obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name)) if err != nil { e.logger.Error("retrieving service failed", "err", err) return @@ -504,7 +501,7 @@ func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgro if !exists { return } - svc = obj.(*apiv1.Service) + svc := obj.(*apiv1.Service) tg.Labels = tg.Labels.Merge(serviceLabels(svc)) } From 7cda23ba3270ad305f550d98d42e53b7ccf18907 Mon Sep 17 00:00:00 2001 From: Jack Westbrook Date: Mon, 28 Oct 2024 17:05:10 +0100 Subject: [PATCH 0831/1397] fix(lezer-promql): fix missing types export in package.json (#15161) Signed-off-by: Jack Westbrook --- web/ui/module/lezer-promql/package.json | 1 + 1 file changed, 1 insertion(+) diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 3eadc3a536..0883552c84 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -5,6 +5,7 @@ "main": "dist/index.cjs", "type": "module", "exports": { + "types": "./dist/index.d.ts", "import": "./dist/index.es.js", "require": "./dist/index.cjs" }, From 5408184452b8e612bba51bd49d228e41e2962573 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Mon, 28 Oct 2024 17:10:34 +0100 Subject: [PATCH 0832/1397] react-app: bump version in lock file Signed-off-by: Jan Fajerski --- web/ui/react-app/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 667eb0b375..f8d1cfb3ea 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -1,12 +1,12 @@ { "name": "@prometheus-io/app", - "version": "0.55.0-rc.0", + "version": "0.55.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@prometheus-io/app", - "version": "0.55.0-rc.0", + "version": "0.55.0", "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", @@ -24,7 +24,7 @@ "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.55.0-rc.0", + "@prometheus-io/codemirror-promql": "0.55.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", From 5571c7dc98ac526e7ebbcf39ae3c6c61a40c437f Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 23 Oct 2024 17:00:02 +0100 Subject: [PATCH 0833/1397] FastRegexMatcher: use stack memory for lowercase copy of string Up to 32-byte values this saves garbage, runs faster. For prefixes, only `toLower` the part we need for the map lookup. Split toNormalisedLower into fast and slow paths, to avoid a penalty for the `copy` call in the case where no allocations are done. Signed-off-by: Bryan Boreham --- model/labels/regexp.go | 54 ++++++++++++++++++++++++++----------- model/labels/regexp_test.go | 5 ++-- 2 files changed, 42 insertions(+), 17 deletions(-) diff --git a/model/labels/regexp.go b/model/labels/regexp.go index 3df9435194..bfd9034059 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -802,7 +802,7 @@ type equalMultiStringMapMatcher struct { func (m *equalMultiStringMapMatcher) add(s string) { if !m.caseSensitive { - s = toNormalisedLower(s) + s = toNormalisedLower(s, nil) // Don't pass a stack buffer here - it will always escape to heap. } m.values[s] = struct{}{} @@ -840,15 +840,24 @@ func (m *equalMultiStringMapMatcher) setMatches() []string { } func (m *equalMultiStringMapMatcher) Matches(s string) bool { - if !m.caseSensitive { - s = toNormalisedLower(s) + if len(m.values) > 0 { + sNorm := s + var a [32]byte + if !m.caseSensitive { + sNorm = toNormalisedLower(s, a[:]) + } + if _, ok := m.values[sNorm]; ok { + return true + } } - if _, ok := m.values[s]; ok { - return true - } if m.minPrefixLen > 0 && len(s) >= m.minPrefixLen { - for _, matcher := range m.prefixes[s[:m.minPrefixLen]] { + prefix := s[:m.minPrefixLen] + var a [32]byte + if !m.caseSensitive { + prefix = toNormalisedLower(s[:m.minPrefixLen], a[:]) + } + for _, matcher := range m.prefixes[prefix] { if matcher.Matches(s) { return true } @@ -859,22 +868,37 @@ func (m *equalMultiStringMapMatcher) Matches(s string) bool { // toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert // it to lower case. -func toNormalisedLower(s string) string { - var buf []byte +func toNormalisedLower(s string, a []byte) string { for i := 0; i < len(s); i++ { c := s[i] if c >= utf8.RuneSelf { return strings.Map(unicode.ToLower, norm.NFKD.String(s)) } if 'A' <= c && c <= 'Z' { - if buf == nil { - buf = []byte(s) - } - buf[i] = c + 'a' - 'A' + return toNormalisedLowerSlow(s, i, a) } } - if buf == nil { - return s + return s +} + +// toNormalisedLowerSlow is split from toNormalisedLower because having a call +// to `copy` slows it down even when it is not called. +func toNormalisedLowerSlow(s string, i int, a []byte) string { + var buf []byte + if cap(a) > len(s) { + buf = a[:len(s)] + copy(buf, s) + } else { + buf = []byte(s) + } + for ; i < len(s); i++ { + c := s[i] + if c >= utf8.RuneSelf { + return strings.Map(unicode.ToLower, norm.NFKD.String(s)) + } + if 'A' <= c && c <= 'Z' { + buf[i] = c + 'a' - 'A' + } } return yoloString(buf) } diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go index 8df0dbb023..e9745502ff 100644 --- a/model/labels/regexp_test.go +++ b/model/labels/regexp_test.go @@ -333,7 +333,8 @@ func BenchmarkToNormalizedLower(b *testing.B) { } b.ResetTimer() for n := 0; n < b.N; n++ { - toNormalisedLower(inputs[n%len(inputs)]) + var a [256]byte + toNormalisedLower(inputs[n%len(inputs)], a[:]) } }) } @@ -1390,6 +1391,6 @@ func TestToNormalisedLower(t *testing.T) { "ſſAſſa": "ssassa", } for input, expectedOutput := range testCases { - require.Equal(t, expectedOutput, toNormalisedLower(input)) + require.Equal(t, expectedOutput, toNormalisedLower(input, nil)) } } From 706dcfeecff0d64dc3c4451d7cf59b0a5c5c82f7 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 29 Oct 2024 10:40:46 +0100 Subject: [PATCH 0834/1397] tsdb.CircularExemplarStorage: Avoid racing (#15231) * tsdb.CircularExemplarStorage: Avoid racing --------- Signed-off-by: Arve Knudsen --- tsdb/exemplar.go | 22 +++++++++++----------- tsdb/exemplar_test.go | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 11 deletions(-) diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index d32870f704..31d461bed9 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -152,13 +152,13 @@ func (ce *CircularExemplarStorage) Querier(_ context.Context) (storage.ExemplarQ func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*labels.Matcher) ([]exemplar.QueryResult, error) { ret := make([]exemplar.QueryResult, 0) + ce.lock.RLock() + defer ce.lock.RUnlock() + if len(ce.exemplars) == 0 { return ret, nil } - ce.lock.RLock() - defer ce.lock.RUnlock() - // Loop through each index entry, which will point us to first/last exemplar for each series. for _, idx := range ce.index { var se exemplar.QueryResult @@ -281,13 +281,13 @@ func (ce *CircularExemplarStorage) Resize(l int64) int { l = 0 } + ce.lock.Lock() + defer ce.lock.Unlock() + if l == int64(len(ce.exemplars)) { return 0 } - ce.lock.Lock() - defer ce.lock.Unlock() - oldBuffer := ce.exemplars oldNextIndex := int64(ce.nextIndex) @@ -349,6 +349,11 @@ func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry, buf []byt } func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error { + // TODO(bwplotka): This lock can lock all scrapers, there might high contention on this on scale. + // Optimize by moving the lock to be per series (& benchmark it). + ce.lock.Lock() + defer ce.lock.Unlock() + if len(ce.exemplars) == 0 { return storage.ErrExemplarsDisabled } @@ -356,11 +361,6 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp var buf [1024]byte seriesLabels := l.Bytes(buf[:]) - // TODO(bwplotka): This lock can lock all scrapers, there might high contention on this on scale. - // Optimize by moving the lock to be per series (& benchmark it). - ce.lock.Lock() - defer ce.lock.Unlock() - idx, ok := ce.index[string(seriesLabels)] err := ce.validateExemplar(idx, e, true) if err != nil { diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go index 7723ec3894..dbd34cc48c 100644 --- a/tsdb/exemplar_test.go +++ b/tsdb/exemplar_test.go @@ -20,6 +20,7 @@ import ( "reflect" "strconv" "strings" + "sync" "testing" "github.com/prometheus/client_golang/prometheus" @@ -499,3 +500,40 @@ func BenchmarkResizeExemplars(b *testing.B) { }) } } + +// TestCircularExemplarStorage_Concurrent_AddExemplar_Resize tries to provoke a data race between AddExemplar and Resize. +// Run with race detection enabled. +func TestCircularExemplarStorage_Concurrent_AddExemplar_Resize(t *testing.T) { + exs, err := NewCircularExemplarStorage(0, eMetrics) + require.NoError(t, err) + es := exs.(*CircularExemplarStorage) + + l := labels.FromStrings("service", "asdf") + e := exemplar.Exemplar{ + Labels: labels.FromStrings("trace_id", "qwerty"), + Value: 0.1, + Ts: 1, + } + + var wg sync.WaitGroup + wg.Add(1) + t.Cleanup(wg.Wait) + + started := make(chan struct{}) + + go func() { + defer wg.Done() + + <-started + for i := 0; i < 100; i++ { + require.NoError(t, es.AddExemplar(l, e)) + } + }() + + for i := 0; i < 100; i++ { + es.Resize(int64(i + 1)) + if i == 0 { + close(started) + } + } +} From a44db5f7842a7b3e82687f6fb8d87076a493d858 Mon Sep 17 00:00:00 2001 From: shenpengfeng Date: Tue, 29 Oct 2024 17:58:44 +0800 Subject: [PATCH 0835/1397] chore: fix function name in comment Signed-off-by: shenpengfeng --- storage/buffer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/buffer.go b/storage/buffer.go index ad504ad5db..e847c10e61 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -626,7 +626,7 @@ func addF(s fSample, buf []fSample, r *sampleRing) []fSample { return buf } -// addF adds an hSample to a (specialized) hSample buffer. +// addH adds an hSample to a (specialized) hSample buffer. func addH(s hSample, buf []hSample, r *sampleRing) []hSample { l := len(buf) // Grow the ring buffer if it fits no more elements. From b6c538972c4d738896c0ee34fd7d8efcc6e2c2f8 Mon Sep 17 00:00:00 2001 From: Nicolas Takashi Date: Tue, 29 Oct 2024 12:34:02 +0000 Subject: [PATCH 0836/1397] [REFACTORY] simplify appender commit (#15112) * [REFACTOR] simplify appender commit Signed-off-by: Nicolas Takashi Signed-off-by: Arthur Silva Sens Co-authored-by: George Krajcsovits Co-authored-by: Arthur Silva Sens --- tsdb/head_append.go | 852 +++++++++++++++++++++------------------- tsdb/head_bench_test.go | 87 ++++ 2 files changed, 538 insertions(+), 401 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 9c732990bf..603b96cfcc 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -984,23 +984,38 @@ func exemplarsForEncoding(es []exemplarWithSeriesRef) []record.RefExemplar { return ret } -// Commit writes to the WAL and adds the data to the Head. -// TODO(codesome): Refactor this method to reduce indentation and make it more readable. -func (a *headAppender) Commit() (err error) { - if a.closed { - return ErrAppenderClosed - } - defer func() { a.closed = true }() - - if err := a.log(); err != nil { - _ = a.Rollback() // Most likely the same error will happen again. - return fmt.Errorf("write to WAL: %w", err) - } - - if a.head.writeNotified != nil { - a.head.writeNotified.Notify() - } +type appenderCommitContext struct { + floatsAppended int + histogramsAppended int + // Number of samples out of order but accepted: with ooo enabled and within time window. + oooFloatsAccepted int + oooHistogramAccepted int + // Number of samples rejected due to: out of order but OOO support disabled. + floatOOORejected int + histoOOORejected int + // Number of samples rejected due to: out of order but too old (OOO support enabled, but outside time window). + floatTooOldRejected int + histoTooOldRejected int + // Number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled). + floatOOBRejected int + histoOOBRejected int + inOrderMint int64 + inOrderMaxt int64 + oooMinT int64 + oooMaxT int64 + wblSamples []record.RefSample + wblHistograms []record.RefHistogramSample + wblFloatHistograms []record.RefFloatHistogramSample + oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef + oooMmapMarkersCount int + oooRecords [][]byte + oooCapMax int64 + appendChunkOpts chunkOpts + enc record.Encoder +} +// commitExemplars adds all exemplars from headAppender to the head's exemplar storage. +func (a *headAppender) commitExemplars() { // No errors logging to WAL, so pass the exemplars along to the in memory storage. for _, e := range a.exemplars { s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref)) @@ -1018,6 +1033,396 @@ func (a *headAppender) Commit() (err error) { a.head.logger.Debug("Unknown error while adding exemplar", "err", err) } } +} + +func (acc *appenderCommitContext) collectOOORecords(a *headAppender) { + if a.head.wbl == nil { + // WBL is not enabled. So no need to collect. + acc.wblSamples = nil + acc.wblHistograms = nil + acc.wblFloatHistograms = nil + acc.oooMmapMarkers = nil + acc.oooMmapMarkersCount = 0 + return + } + + // The m-map happens before adding a new sample. So we collect + // the m-map markers first, and then samples. + // WBL Graphically: + // WBL Before this Commit(): [old samples before this commit for chunk 1] + // WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3] + if acc.oooMmapMarkers != nil { + markers := make([]record.RefMmapMarker, 0, acc.oooMmapMarkersCount) + for ref, mmapRefs := range acc.oooMmapMarkers { + for _, mmapRef := range mmapRefs { + markers = append(markers, record.RefMmapMarker{ + Ref: ref, + MmapRef: mmapRef, + }) + } + } + r := acc.enc.MmapMarkers(markers, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } + + if len(acc.wblSamples) > 0 { + r := acc.enc.Samples(acc.wblSamples, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } + if len(acc.wblHistograms) > 0 { + r := acc.enc.HistogramSamples(acc.wblHistograms, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } + if len(acc.wblFloatHistograms) > 0 { + r := acc.enc.FloatHistogramSamples(acc.wblFloatHistograms, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } + + acc.wblSamples = nil + acc.wblHistograms = nil + acc.wblFloatHistograms = nil + acc.oooMmapMarkers = nil +} + +// handleAppendableError processes errors encountered during sample appending and updates +// the provided counters accordingly. +// +// Parameters: +// - err: The error encountered during appending. +// - appended: Pointer to the counter tracking the number of successfully appended samples. +// - oooRejected: Pointer to the counter tracking the number of out-of-order samples rejected. +// - oobRejected: Pointer to the counter tracking the number of out-of-bounds samples rejected. +// - tooOldRejected: Pointer to the counter tracking the number of too-old samples rejected. +func handleAppendableError(err error, appended, oooRejected, oobRejected, tooOldRejected *int) { + switch { + case errors.Is(err, storage.ErrOutOfOrderSample): + *appended-- + *oooRejected++ + case errors.Is(err, storage.ErrOutOfBounds): + *appended-- + *oobRejected++ + case errors.Is(err, storage.ErrTooOldSample): + *appended-- + *tooOldRejected++ + default: + *appended-- + } +} + +// commitSamples processes and commits the samples in the headAppender to the series. +// It handles both in-order and out-of-order samples, updating the appenderCommitContext +// with the results of the append operations. +// +// The function iterates over the samples in the headAppender and attempts to append each sample +// to its corresponding series. It handles various error cases such as out-of-order samples, +// out-of-bounds samples, and too-old samples, updating the appenderCommitContext accordingly. +// +// For out-of-order samples, it checks if the sample can be inserted into the series and updates +// the out-of-order mmap markers if necessary. It also updates the write-ahead log (WBL) samples +// and the minimum and maximum timestamps for out-of-order samples. +// +// For in-order samples, it attempts to append the sample to the series and updates the minimum +// and maximum timestamps for in-order samples. +// +// The function also increments the chunk metrics if a new chunk is created and performs cleanup +// operations on the series after appending the samples. +// +// There are also specific functions to commit histograms and float histograms. +func (a *headAppender) commitSamples(acc *appenderCommitContext) { + var ok, chunkCreated bool + var series *memSeries + + for i, s := range a.samples { + series = a.sampleSeries[i] + series.Lock() + + oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow) + if err != nil { + handleAppendableError(err, &acc.floatsAppended, &acc.floatOOORejected, &acc.floatOOBRejected, &acc.floatTooOldRejected) + } + + switch { + case err != nil: + // Do nothing here. + case oooSample: + // Sample is OOO and OOO handling is enabled + // and the delta is within the OOO tolerance. + var mmapRefs []chunks.ChunkDiskMapperRef + ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, acc.oooCapMax, a.head.logger) + if chunkCreated { + r, ok := acc.oooMmapMarkers[series.ref] + if !ok || r != nil { + // !ok means there are no markers collected for these samples yet. So we first flush the samples + // before setting this m-map marker. + + // r != nil means we have already m-mapped a chunk for this series in the same Commit(). + // Hence, before we m-map again, we should add the samples and m-map markers + // seen till now to the WBL records. + acc.collectOOORecords(a) + } + + if acc.oooMmapMarkers == nil { + acc.oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) + } + if len(mmapRefs) > 0 { + acc.oooMmapMarkers[series.ref] = mmapRefs + acc.oooMmapMarkersCount += len(mmapRefs) + } else { + // No chunk was written to disk, so we need to set an initial marker for this series. + acc.oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} + acc.oooMmapMarkersCount++ + } + } + if ok { + acc.wblSamples = append(acc.wblSamples, s) + if s.T < acc.oooMinT { + acc.oooMinT = s.T + } + if s.T > acc.oooMaxT { + acc.oooMaxT = s.T + } + acc.oooFloatsAccepted++ + } else { + // Sample is an exact duplicate of the last sample. + // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, + // not with samples in already flushed OOO chunks. + // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. + acc.floatsAppended-- + } + default: + ok, chunkCreated = series.append(s.T, s.V, a.appendID, acc.appendChunkOpts) + if ok { + if s.T < acc.inOrderMint { + acc.inOrderMint = s.T + } + if s.T > acc.inOrderMaxt { + acc.inOrderMaxt = s.T + } + } else { + // The sample is an exact duplicate, and should be silently dropped. + acc.floatsAppended-- + } + } + + if chunkCreated { + a.head.metrics.chunks.Inc() + a.head.metrics.chunksCreated.Inc() + } + + series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) + series.pendingCommit = false + series.Unlock() + } +} + +// For details on the commitHistograms function, see the commitSamples docs. +func (a *headAppender) commitHistograms(acc *appenderCommitContext) { + var ok, chunkCreated bool + var series *memSeries + + for i, s := range a.histograms { + series = a.histogramSeries[i] + series.Lock() + + oooSample, _, err := series.appendableHistogram(s.T, s.H, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) + if err != nil { + handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected) + } + + switch { + case err != nil: + // Do nothing here. + case oooSample: + // Sample is OOO and OOO handling is enabled + // and the delta is within the OOO tolerance. + var mmapRefs []chunks.ChunkDiskMapperRef + ok, chunkCreated, mmapRefs = series.insert(s.T, 0, s.H, nil, a.head.chunkDiskMapper, acc.oooCapMax, a.head.logger) + if chunkCreated { + r, ok := acc.oooMmapMarkers[series.ref] + if !ok || r != nil { + // !ok means there are no markers collected for these samples yet. So we first flush the samples + // before setting this m-map marker. + + // r != 0 means we have already m-mapped a chunk for this series in the same Commit(). + // Hence, before we m-map again, we should add the samples and m-map markers + // seen till now to the WBL records. + acc.collectOOORecords(a) + } + + if acc.oooMmapMarkers == nil { + acc.oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) + } + if len(mmapRefs) > 0 { + acc.oooMmapMarkers[series.ref] = mmapRefs + acc.oooMmapMarkersCount += len(mmapRefs) + } else { + // No chunk was written to disk, so we need to set an initial marker for this series. + acc.oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} + acc.oooMmapMarkersCount++ + } + } + if ok { + acc.wblHistograms = append(acc.wblHistograms, s) + if s.T < acc.oooMinT { + acc.oooMinT = s.T + } + if s.T > acc.oooMaxT { + acc.oooMaxT = s.T + } + acc.oooHistogramAccepted++ + } else { + // Sample is an exact duplicate of the last sample. + // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, + // not with samples in already flushed OOO chunks. + // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. + acc.histogramsAppended-- + } + default: + ok, chunkCreated = series.appendHistogram(s.T, s.H, a.appendID, acc.appendChunkOpts) + if ok { + if s.T < acc.inOrderMint { + acc.inOrderMint = s.T + } + if s.T > acc.inOrderMaxt { + acc.inOrderMaxt = s.T + } + } else { + acc.histogramsAppended-- + acc.histoOOORejected++ + } + } + + if chunkCreated { + a.head.metrics.chunks.Inc() + a.head.metrics.chunksCreated.Inc() + } + + series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) + series.pendingCommit = false + series.Unlock() + } +} + +// For details on the commitFloatHistograms function, see the commitSamples docs. +func (a *headAppender) commitFloatHistograms(acc *appenderCommitContext) { + var ok, chunkCreated bool + var series *memSeries + + for i, s := range a.floatHistograms { + series = a.floatHistogramSeries[i] + series.Lock() + + oooSample, _, err := series.appendableFloatHistogram(s.T, s.FH, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) + if err != nil { + handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected) + } + + switch { + case err != nil: + // Do nothing here. + case oooSample: + // Sample is OOO and OOO handling is enabled + // and the delta is within the OOO tolerance. + var mmapRefs []chunks.ChunkDiskMapperRef + ok, chunkCreated, mmapRefs = series.insert(s.T, 0, nil, s.FH, a.head.chunkDiskMapper, acc.oooCapMax, a.head.logger) + if chunkCreated { + r, ok := acc.oooMmapMarkers[series.ref] + if !ok || r != nil { + // !ok means there are no markers collected for these samples yet. So we first flush the samples + // before setting this m-map marker. + + // r != 0 means we have already m-mapped a chunk for this series in the same Commit(). + // Hence, before we m-map again, we should add the samples and m-map markers + // seen till now to the WBL records. + acc.collectOOORecords(a) + } + + if acc.oooMmapMarkers == nil { + acc.oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) + } + if len(mmapRefs) > 0 { + acc.oooMmapMarkers[series.ref] = mmapRefs + acc.oooMmapMarkersCount += len(mmapRefs) + } else { + // No chunk was written to disk, so we need to set an initial marker for this series. + acc.oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} + acc.oooMmapMarkersCount++ + } + } + if ok { + acc.wblFloatHistograms = append(acc.wblFloatHistograms, s) + if s.T < acc.oooMinT { + acc.oooMinT = s.T + } + if s.T > acc.oooMaxT { + acc.oooMaxT = s.T + } + acc.oooHistogramAccepted++ + } else { + // Sample is an exact duplicate of the last sample. + // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, + // not with samples in already flushed OOO chunks. + // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. + acc.histogramsAppended-- + } + default: + ok, chunkCreated = series.appendFloatHistogram(s.T, s.FH, a.appendID, acc.appendChunkOpts) + if ok { + if s.T < acc.inOrderMint { + acc.inOrderMint = s.T + } + if s.T > acc.inOrderMaxt { + acc.inOrderMaxt = s.T + } + } else { + acc.histogramsAppended-- + acc.histoOOORejected++ + } + } + + if chunkCreated { + a.head.metrics.chunks.Inc() + a.head.metrics.chunksCreated.Inc() + } + + series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) + series.pendingCommit = false + series.Unlock() + } +} + +// commitMetadata commits the metadata for each series in the headAppender. +// It iterates over the metadata slice and updates the corresponding series +// with the new metadata information. The series is locked during the update +// to ensure thread safety. +func (a *headAppender) commitMetadata() { + var series *memSeries + for i, m := range a.metadata { + series = a.metadataSeries[i] + series.Lock() + series.meta = &metadata.Metadata{Type: record.ToMetricType(m.Type), Unit: m.Unit, Help: m.Help} + series.Unlock() + } +} + +// Commit writes to the WAL and adds the data to the Head. +// TODO(codesome): Refactor this method to reduce indentation and make it more readable. +func (a *headAppender) Commit() (err error) { + if a.closed { + return ErrAppenderClosed + } + defer func() { a.closed = true }() + + if err := a.log(); err != nil { + _ = a.Rollback() // Most likely the same error will happen again. + return fmt.Errorf("write to WAL: %w", err) + } + + if a.head.writeNotified != nil { + a.head.writeNotified.Notify() + } + + a.commitExemplars() defer a.head.metrics.activeAppenders.Dec() defer a.head.putAppendBuffer(a.samples) @@ -1028,401 +1433,46 @@ func (a *headAppender) Commit() (err error) { defer a.head.putMetadataBuffer(a.metadata) defer a.head.iso.closeAppend(a.appendID) - var ( - floatsAppended = len(a.samples) - histogramsAppended = len(a.histograms) + len(a.floatHistograms) - // number of samples out of order but accepted: with ooo enabled and within time window - oooFloatsAccepted int - oooHistogramAccepted int - // number of samples rejected due to: out of order but OOO support disabled. - floatOOORejected int - histoOOORejected int - // number of samples rejected due to: that are out of order but too old (OOO support enabled, but outside time window) - floatTooOldRejected int - histoTooOldRejected int - // number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled) - floatOOBRejected int - histoOOBRejected int - inOrderMint int64 = math.MaxInt64 - inOrderMaxt int64 = math.MinInt64 - oooMinT int64 = math.MaxInt64 - oooMaxT int64 = math.MinInt64 - wblSamples []record.RefSample - wblHistograms []record.RefHistogramSample - wblFloatHistograms []record.RefFloatHistogramSample - oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef - oooMmapMarkersCount int - oooRecords [][]byte - oooCapMax = a.head.opts.OutOfOrderCapMax.Load() - series *memSeries - appendChunkOpts = chunkOpts{ + acc := &appenderCommitContext{ + floatsAppended: len(a.samples), + histogramsAppended: len(a.histograms) + len(a.floatHistograms), + inOrderMint: math.MaxInt64, + inOrderMaxt: math.MinInt64, + oooMinT: math.MaxInt64, + oooMaxT: math.MinInt64, + oooCapMax: a.head.opts.OutOfOrderCapMax.Load(), + appendChunkOpts: chunkOpts{ chunkDiskMapper: a.head.chunkDiskMapper, chunkRange: a.head.chunkRange.Load(), samplesPerChunk: a.head.opts.SamplesPerChunk, - } - enc record.Encoder - ) + }, + } + defer func() { - for i := range oooRecords { - a.head.putBytesBuffer(oooRecords[i][:0]) + for i := range acc.oooRecords { + a.head.putBytesBuffer(acc.oooRecords[i][:0]) } }() - collectOOORecords := func() { - if a.head.wbl == nil { - // WBL is not enabled. So no need to collect. - wblSamples = nil - wblHistograms = nil - wblFloatHistograms = nil - oooMmapMarkers = nil - oooMmapMarkersCount = 0 - return - } - // The m-map happens before adding a new sample. So we collect - // the m-map markers first, and then samples. - // WBL Graphically: - // WBL Before this Commit(): [old samples before this commit for chunk 1] - // WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3] - if oooMmapMarkers != nil { - markers := make([]record.RefMmapMarker, 0, oooMmapMarkersCount) - for ref, mmapRefs := range oooMmapMarkers { - for _, mmapRef := range mmapRefs { - markers = append(markers, record.RefMmapMarker{ - Ref: ref, - MmapRef: mmapRef, - }) - } - } - r := enc.MmapMarkers(markers, a.head.getBytesBuffer()) - oooRecords = append(oooRecords, r) - } - if len(wblSamples) > 0 { - r := enc.Samples(wblSamples, a.head.getBytesBuffer()) - oooRecords = append(oooRecords, r) - } - if len(wblHistograms) > 0 { - r := enc.HistogramSamples(wblHistograms, a.head.getBytesBuffer()) - oooRecords = append(oooRecords, r) - } - if len(wblFloatHistograms) > 0 { - r := enc.FloatHistogramSamples(wblFloatHistograms, a.head.getBytesBuffer()) - oooRecords = append(oooRecords, r) - } + a.commitSamples(acc) + a.commitHistograms(acc) + a.commitFloatHistograms(acc) + a.commitMetadata() - wblSamples = nil - wblHistograms = nil - wblFloatHistograms = nil - oooMmapMarkers = nil - } - for i, s := range a.samples { - series = a.sampleSeries[i] - series.Lock() + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected)) + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected)) + a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOBRejected)) + a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatTooOldRejected)) + a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatsAppended)) + a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histogramsAppended)) + a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.oooFloatsAccepted)) + a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.oooHistogramAccepted)) + a.head.updateMinMaxTime(acc.inOrderMint, acc.inOrderMaxt) + a.head.updateMinOOOMaxOOOTime(acc.oooMinT, acc.oooMaxT) - oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow) - switch { - case err == nil: - // Do nothing. - case errors.Is(err, storage.ErrOutOfOrderSample): - floatsAppended-- - floatOOORejected++ - case errors.Is(err, storage.ErrOutOfBounds): - floatsAppended-- - floatOOBRejected++ - case errors.Is(err, storage.ErrTooOldSample): - floatsAppended-- - floatTooOldRejected++ - default: - floatsAppended-- - } - - var ok, chunkCreated bool - - switch { - case err != nil: - // Do nothing here. - case oooSample: - // Sample is OOO and OOO handling is enabled - // and the delta is within the OOO tolerance. - var mmapRefs []chunks.ChunkDiskMapperRef - ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, oooCapMax, a.head.logger) - if chunkCreated { - r, ok := oooMmapMarkers[series.ref] - if !ok || r != nil { - // !ok means there are no markers collected for these samples yet. So we first flush the samples - // before setting this m-map marker. - - // r != nil means we have already m-mapped a chunk for this series in the same Commit(). - // Hence, before we m-map again, we should add the samples and m-map markers - // seen till now to the WBL records. - collectOOORecords() - } - - if oooMmapMarkers == nil { - oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) - } - if len(mmapRefs) > 0 { - oooMmapMarkers[series.ref] = mmapRefs - oooMmapMarkersCount += len(mmapRefs) - } else { - // No chunk was written to disk, so we need to set an initial marker for this series. - oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} - oooMmapMarkersCount++ - } - } - if ok { - wblSamples = append(wblSamples, s) - if s.T < oooMinT { - oooMinT = s.T - } - if s.T > oooMaxT { - oooMaxT = s.T - } - oooFloatsAccepted++ - } else { - // Sample is an exact duplicate of the last sample. - // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, - // not with samples in already flushed OOO chunks. - // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. - floatsAppended-- - } - default: - ok, chunkCreated = series.append(s.T, s.V, a.appendID, appendChunkOpts) - if ok { - if s.T < inOrderMint { - inOrderMint = s.T - } - if s.T > inOrderMaxt { - inOrderMaxt = s.T - } - } else { - // The sample is an exact duplicate, and should be silently dropped. - floatsAppended-- - } - } - - if chunkCreated { - a.head.metrics.chunks.Inc() - a.head.metrics.chunksCreated.Inc() - } - - series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) - series.pendingCommit = false - series.Unlock() - } - - for i, s := range a.histograms { - series = a.histogramSeries[i] - series.Lock() - - oooSample, _, err := series.appendableHistogram(s.T, s.H, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) - switch { - case err == nil: - // Do nothing. - case errors.Is(err, storage.ErrOutOfOrderSample): - histogramsAppended-- - histoOOORejected++ - case errors.Is(err, storage.ErrOutOfBounds): - histogramsAppended-- - histoOOBRejected++ - case errors.Is(err, storage.ErrTooOldSample): - histogramsAppended-- - histoTooOldRejected++ - default: - histogramsAppended-- - } - - var ok, chunkCreated bool - - switch { - case err != nil: - // Do nothing here. - case oooSample: - // Sample is OOO and OOO handling is enabled - // and the delta is within the OOO tolerance. - var mmapRefs []chunks.ChunkDiskMapperRef - ok, chunkCreated, mmapRefs = series.insert(s.T, 0, s.H, nil, a.head.chunkDiskMapper, oooCapMax, a.head.logger) - if chunkCreated { - r, ok := oooMmapMarkers[series.ref] - if !ok || r != nil { - // !ok means there are no markers collected for these samples yet. So we first flush the samples - // before setting this m-map marker. - - // r != 0 means we have already m-mapped a chunk for this series in the same Commit(). - // Hence, before we m-map again, we should add the samples and m-map markers - // seen till now to the WBL records. - collectOOORecords() - } - - if oooMmapMarkers == nil { - oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) - } - if len(mmapRefs) > 0 { - oooMmapMarkers[series.ref] = mmapRefs - oooMmapMarkersCount += len(mmapRefs) - } else { - // No chunk was written to disk, so we need to set an initial marker for this series. - oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} - oooMmapMarkersCount++ - } - } - if ok { - wblHistograms = append(wblHistograms, s) - if s.T < oooMinT { - oooMinT = s.T - } - if s.T > oooMaxT { - oooMaxT = s.T - } - oooHistogramAccepted++ - } else { - // Sample is an exact duplicate of the last sample. - // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, - // not with samples in already flushed OOO chunks. - // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. - histogramsAppended-- - } - default: - ok, chunkCreated = series.appendHistogram(s.T, s.H, a.appendID, appendChunkOpts) - if ok { - if s.T < inOrderMint { - inOrderMint = s.T - } - if s.T > inOrderMaxt { - inOrderMaxt = s.T - } - } else { - histogramsAppended-- - histoOOORejected++ - } - } - - if chunkCreated { - a.head.metrics.chunks.Inc() - a.head.metrics.chunksCreated.Inc() - } - - series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) - series.pendingCommit = false - series.Unlock() - } - - for i, s := range a.floatHistograms { - series = a.floatHistogramSeries[i] - series.Lock() - - oooSample, _, err := series.appendableFloatHistogram(s.T, s.FH, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) - switch { - case err == nil: - // Do nothing. - case errors.Is(err, storage.ErrOutOfOrderSample): - histogramsAppended-- - histoOOORejected++ - case errors.Is(err, storage.ErrOutOfBounds): - histogramsAppended-- - histoOOBRejected++ - case errors.Is(err, storage.ErrTooOldSample): - histogramsAppended-- - histoTooOldRejected++ - default: - histogramsAppended-- - } - - var ok, chunkCreated bool - - switch { - case err != nil: - // Do nothing here. - case oooSample: - // Sample is OOO and OOO handling is enabled - // and the delta is within the OOO tolerance. - var mmapRefs []chunks.ChunkDiskMapperRef - ok, chunkCreated, mmapRefs = series.insert(s.T, 0, nil, s.FH, a.head.chunkDiskMapper, oooCapMax, a.head.logger) - if chunkCreated { - r, ok := oooMmapMarkers[series.ref] - if !ok || r != nil { - // !ok means there are no markers collected for these samples yet. So we first flush the samples - // before setting this m-map marker. - - // r != 0 means we have already m-mapped a chunk for this series in the same Commit(). - // Hence, before we m-map again, we should add the samples and m-map markers - // seen till now to the WBL records. - collectOOORecords() - } - - if oooMmapMarkers == nil { - oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) - } - if len(mmapRefs) > 0 { - oooMmapMarkers[series.ref] = mmapRefs - oooMmapMarkersCount += len(mmapRefs) - } else { - // No chunk was written to disk, so we need to set an initial marker for this series. - oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} - oooMmapMarkersCount++ - } - } - if ok { - wblFloatHistograms = append(wblFloatHistograms, s) - if s.T < oooMinT { - oooMinT = s.T - } - if s.T > oooMaxT { - oooMaxT = s.T - } - oooHistogramAccepted++ - } else { - // Sample is an exact duplicate of the last sample. - // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, - // not with samples in already flushed OOO chunks. - // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. - histogramsAppended-- - } - default: - ok, chunkCreated = series.appendFloatHistogram(s.T, s.FH, a.appendID, appendChunkOpts) - if ok { - if s.T < inOrderMint { - inOrderMint = s.T - } - if s.T > inOrderMaxt { - inOrderMaxt = s.T - } - } else { - histogramsAppended-- - histoOOORejected++ - } - } - - if chunkCreated { - a.head.metrics.chunks.Inc() - a.head.metrics.chunksCreated.Inc() - } - - series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) - series.pendingCommit = false - series.Unlock() - } - - for i, m := range a.metadata { - series = a.metadataSeries[i] - series.Lock() - series.meta = &metadata.Metadata{Type: record.ToMetricType(m.Type), Unit: m.Unit, Help: m.Help} - series.Unlock() - } - - a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOORejected)) - a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histoOOORejected)) - a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOBRejected)) - a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatTooOldRejected)) - a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatsAppended)) - a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended)) - a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(oooFloatsAccepted)) - a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(oooHistogramAccepted)) - a.head.updateMinMaxTime(inOrderMint, inOrderMaxt) - a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT) - - collectOOORecords() + acc.collectOOORecords(a) if a.head.wbl != nil { - if err := a.head.wbl.Log(oooRecords...); err != nil { + if err := a.head.wbl.Log(acc.oooRecords...); err != nil { // TODO(codesome): Currently WBL logging of ooo samples is best effort here since we cannot try logging // until we have found what samples become OOO. We can try having a metric for this failure. // Returning the error here is not correct because we have already put the samples into the memory, diff --git a/tsdb/head_bench_test.go b/tsdb/head_bench_test.go index 51de50ec27..aa2cf22148 100644 --- a/tsdb/head_bench_test.go +++ b/tsdb/head_bench_test.go @@ -14,15 +14,22 @@ package tsdb import ( + "context" "errors" + "fmt" + "math/rand" "strconv" "testing" "github.com/stretchr/testify/require" "go.uber.org/atomic" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/wlog" ) func BenchmarkHeadStripeSeriesCreate(b *testing.B) { @@ -79,6 +86,86 @@ func BenchmarkHeadStripeSeriesCreate_PreCreationFailure(b *testing.B) { } } +func BenchmarkHead_WalCommit(b *testing.B) { + seriesCounts := []int{100, 1000, 10000} + series := genSeries(10000, 10, 0, 0) // Only using the generated labels. + + appendSamples := func(b *testing.B, app storage.Appender, seriesCount int, ts int64) { + var err error + for i, s := range series[:seriesCount] { + var ref storage.SeriesRef + // if i is even, append a sample, else append a histogram. + if i%2 == 0 { + ref, err = app.Append(ref, s.Labels(), ts, float64(ts)) + } else { + h := &histogram.Histogram{ + Count: 7 + uint64(ts*5), + ZeroCount: 2 + uint64(ts), + ZeroThreshold: 0.001, + Sum: 18.4 * rand.Float64(), + Schema: 1, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{ts + 1, 1, -1, 0}, + } + ref, err = app.AppendHistogram(ref, s.Labels(), ts, h, nil) + } + require.NoError(b, err) + + _, err = app.AppendExemplar(ref, s.Labels(), exemplar.Exemplar{ + Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())), + Value: rand.Float64(), + Ts: ts, + }) + require.NoError(b, err) + } + } + + for _, seriesCount := range seriesCounts { + b.Run(fmt.Sprintf("%d series", seriesCount), func(b *testing.B) { + for _, commits := range []int64{1, 2} { // To test commits that create new series and when the series already exists. + b.Run(fmt.Sprintf("%d commits", commits), func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + b.StopTimer() + h, w := newTestHead(b, 10000, wlog.CompressionNone, false) + b.Cleanup(func() { + if h != nil { + h.Close() + } + if w != nil { + w.Close() + } + }) + app := h.Appender(context.Background()) + + appendSamples(b, app, seriesCount, 0) + + b.StartTimer() + require.NoError(b, app.Commit()) + if commits == 2 { + b.StopTimer() + app = h.Appender(context.Background()) + appendSamples(b, app, seriesCount, 1) + b.StartTimer() + require.NoError(b, app.Commit()) + } + b.StopTimer() + h.Close() + h = nil + w.Close() + w = nil + } + }) + } + }) + } +} + type failingSeriesLifecycleCallback struct{} func (failingSeriesLifecycleCallback) PreCreation(labels.Labels) error { return errors.New("failed") } From e2f55c34c9d68347fa9a095f480290473c557c07 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Tue, 29 Oct 2024 14:41:50 +0100 Subject: [PATCH 0837/1397] fix CHANGELOG formatting and add entry for #14694 Signed-off-by: Jan Fajerski --- CHANGELOG.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ab454a9fcb..f71b701abe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,8 +11,9 @@ * [CHANGE] Disallow configuring AM with the v1 api. #13883 * [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710 * [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196 -- [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941 -- [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941 +* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694 +* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941 +* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941 ## 3.0.0-beta.1 / 2024-10-09 From ba11a55df4a8fd4db4150444296cc411566cfd5a Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 29 Oct 2024 17:00:29 +0100 Subject: [PATCH 0838/1397] Revert "Process `MemPostings.Delete()` with `GOMAXPROCS` workers" Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 83 ++++++++-------------------------- tsdb/index/postings_test.go | 88 ++++++++++++------------------------- 2 files changed, 46 insertions(+), 125 deletions(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 7bc5629ac5..58f3473dae 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -26,7 +26,6 @@ import ( "sync" "github.com/bboreham/go-loser" - "github.com/cespare/xxhash/v2" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -293,76 +292,30 @@ func (p *MemPostings) EnsureOrder(numberOfConcurrentProcesses int) { func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected map[labels.Label]struct{}) { p.mtx.Lock() defer p.mtx.Unlock() - if len(p.m) == 0 || len(deleted) == 0 { - return + + process := func(l labels.Label) { + orig := p.m[l.Name][l.Value] + repl := make([]storage.SeriesRef, 0, len(orig)) + for _, id := range orig { + if _, ok := deleted[id]; !ok { + repl = append(repl, id) + } + } + if len(repl) > 0 { + p.m[l.Name][l.Value] = repl + } else { + delete(p.m[l.Name], l.Value) + // Delete the key if we removed all values. + if len(p.m[l.Name]) == 0 { + delete(p.m, l.Name) + } + } } - // Deleting label names mutates p.m map, so it should be done from a single goroutine after nobody else is reading it. - deleteLabelNames := make(chan string, len(p.m)) - - process, wait := processWithBoundedParallelismAndConsistentWorkers( - runtime.GOMAXPROCS(0), - func(l labels.Label) uint64 { return xxhash.Sum64String(l.Name) }, - func(l labels.Label) { - orig := p.m[l.Name][l.Value] - repl := make([]storage.SeriesRef, 0, len(orig)) - for _, id := range orig { - if _, ok := deleted[id]; !ok { - repl = append(repl, id) - } - } - if len(repl) > 0 { - p.m[l.Name][l.Value] = repl - } else { - delete(p.m[l.Name], l.Value) - if len(p.m[l.Name]) == 0 { - // Delete the key if we removed all values. - deleteLabelNames <- l.Name - } - } - }, - ) - for l := range affected { process(l) } process(allPostingsKey) - wait() - - // Close deleteLabelNames channel and delete the label names requested. - close(deleteLabelNames) - for name := range deleteLabelNames { - delete(p.m, name) - } -} - -// processWithBoundedParallelismAndConsistentWorkers will call f() with bounded parallelism, -// making sure that elements with same hash(T) will always be processed by the same worker. -// Call process() to add more jobs to process, and once finished adding, call wait() to ensure that all jobs are processed. -func processWithBoundedParallelismAndConsistentWorkers[T any](workers int, hash func(T) uint64, f func(T)) (process func(T), wait func()) { - wg := &sync.WaitGroup{} - jobs := make([]chan T, workers) - for i := 0; i < workers; i++ { - wg.Add(1) - jobs[i] = make(chan T, 128) - go func(jobs <-chan T) { - defer wg.Done() - for l := range jobs { - f(l) - } - }(jobs[i]) - } - - process = func(job T) { - jobs[hash(job)%uint64(workers)] <- job - } - wait = func() { - for i := range jobs { - close(jobs[i]) - } - wg.Wait() - } - return process, wait } // Iter calls f for each postings list. It aborts if f returns an error and returns it. diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 8ee9b99434..7d0b717bf0 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -973,69 +973,37 @@ func TestMemPostingsStats(t *testing.T) { } func TestMemPostings_Delete(t *testing.T) { - t.Run("some postings", func(t *testing.T) { - p := NewMemPostings() - p.Add(1, labels.FromStrings("lbl1", "a")) - p.Add(2, labels.FromStrings("lbl1", "b")) - p.Add(3, labels.FromStrings("lbl2", "a")) + p := NewMemPostings() + p.Add(1, labels.FromStrings("lbl1", "a")) + p.Add(2, labels.FromStrings("lbl1", "b")) + p.Add(3, labels.FromStrings("lbl2", "a")) - before := p.Get(allPostingsKey.Name, allPostingsKey.Value) - deletedRefs := map[storage.SeriesRef]struct{}{ - 2: {}, - } - affectedLabels := map[labels.Label]struct{}{ - {Name: "lbl1", Value: "b"}: {}, - } - p.Delete(deletedRefs, affectedLabels) - after := p.Get(allPostingsKey.Name, allPostingsKey.Value) + before := p.Get(allPostingsKey.Name, allPostingsKey.Value) + deletedRefs := map[storage.SeriesRef]struct{}{ + 2: {}, + } + affectedLabels := map[labels.Label]struct{}{ + {Name: "lbl1", Value: "b"}: {}, + } + p.Delete(deletedRefs, affectedLabels) + after := p.Get(allPostingsKey.Name, allPostingsKey.Value) - // Make sure postings gotten before the delete have the old data when - // iterated over. - expanded, err := ExpandPostings(before) - require.NoError(t, err) - require.Equal(t, []storage.SeriesRef{1, 2, 3}, expanded) + // Make sure postings gotten before the delete have the old data when + // iterated over. + expanded, err := ExpandPostings(before) + require.NoError(t, err) + require.Equal(t, []storage.SeriesRef{1, 2, 3}, expanded) - // Make sure postings gotten after the delete have the new data when - // iterated over. - expanded, err = ExpandPostings(after) - require.NoError(t, err) - require.Equal(t, []storage.SeriesRef{1, 3}, expanded) + // Make sure postings gotten after the delete have the new data when + // iterated over. + expanded, err = ExpandPostings(after) + require.NoError(t, err) + require.Equal(t, []storage.SeriesRef{1, 3}, expanded) - deleted := p.Get("lbl1", "b") - expanded, err = ExpandPostings(deleted) - require.NoError(t, err) - require.Empty(t, expanded, "expected empty postings, got %v", expanded) - }) - - t.Run("all postings", func(t *testing.T) { - p := NewMemPostings() - p.Add(1, labels.FromStrings("lbl1", "a")) - p.Add(2, labels.FromStrings("lbl1", "b")) - p.Add(3, labels.FromStrings("lbl2", "a")) - - deletedRefs := map[storage.SeriesRef]struct{}{1: {}, 2: {}, 3: {}} - affectedLabels := map[labels.Label]struct{}{ - {Name: "lbl1", Value: "a"}: {}, - {Name: "lbl1", Value: "b"}: {}, - {Name: "lbl1", Value: "c"}: {}, - } - p.Delete(deletedRefs, affectedLabels) - after := p.Get(allPostingsKey.Name, allPostingsKey.Value) - expanded, err := ExpandPostings(after) - require.NoError(t, err) - require.Empty(t, expanded) - }) - - t.Run("nothing on empty mempostings", func(t *testing.T) { - p := NewMemPostings() - deletedRefs := map[storage.SeriesRef]struct{}{} - affectedLabels := map[labels.Label]struct{}{} - p.Delete(deletedRefs, affectedLabels) - after := p.Get(allPostingsKey.Name, allPostingsKey.Value) - expanded, err := ExpandPostings(after) - require.NoError(t, err) - require.Empty(t, expanded) - }) + deleted := p.Get("lbl1", "b") + expanded, err = ExpandPostings(deleted) + require.NoError(t, err) + require.Empty(t, expanded, "expected empty postings, got %v", expanded) } // BenchmarkMemPostings_Delete is quite heavy, so consider running it with @@ -1057,7 +1025,7 @@ func BenchmarkMemPostings_Delete(b *testing.B) { return s } - const total = 2e6 + const total = 1e6 allSeries := [total]labels.Labels{} nameValues := make([]string, 0, 100) for i := 0; i < total; i++ { From c861b31b72b6716d166b74b2701a22a528342c2a Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Tue, 29 Oct 2024 19:40:12 +0100 Subject: [PATCH 0839/1397] Support UTF-8 metric names and labels in web UI Fixes most of https://github.com/prometheus/prometheus/issues/15202 This should address all areas of the UI except for the autocompletion in the codemirror-promql text editor. The strategy here is that any time we print or internally serialize (like for the PromLens tree view) either a metric name or a label name as part of a selector or in other relevant parts of PromQL, we check whether it contains characters beyond what was previously supported, and if so, quote and escape it. In the case of metric names, we also have to move them from the beginning of the selector into the curly braces. Signed-off-by: Julius Volz --- .../mantine-ui/src/components/LabelBadges.tsx | 3 +- web/ui/mantine-ui/src/lib/formatSeries.ts | 14 ++- .../mantine-ui/src/pages/query/SeriesName.tsx | 39 +++++-- web/ui/mantine-ui/src/promql/format.tsx | 63 +++++++--- web/ui/mantine-ui/src/promql/serialize.ts | 44 ++++--- .../src/promql/serializeAndFormat.test.ts | 109 +++++++++++++++++- web/ui/mantine-ui/src/promql/utils.ts | 15 +++ 7 files changed, 242 insertions(+), 45 deletions(-) diff --git a/web/ui/mantine-ui/src/components/LabelBadges.tsx b/web/ui/mantine-ui/src/components/LabelBadges.tsx index f60a37f032..8aa7135564 100644 --- a/web/ui/mantine-ui/src/components/LabelBadges.tsx +++ b/web/ui/mantine-ui/src/components/LabelBadges.tsx @@ -2,6 +2,7 @@ import { Badge, BadgeVariant, Group, MantineColor, Stack } from "@mantine/core"; import { FC } from "react"; import { escapeString } from "../lib/escapeString"; import badgeClasses from "../Badge.module.css"; +import { maybeQuoteLabelName } from "../promql/utils"; export interface LabelBadgesProps { labels: Record; @@ -30,7 +31,7 @@ export const LabelBadges: FC = ({ }} key={k} > - {k}="{escapeString(v)}" + {maybeQuoteLabelName(k)}="{escapeString(v)}" ); })} diff --git a/web/ui/mantine-ui/src/lib/formatSeries.ts b/web/ui/mantine-ui/src/lib/formatSeries.ts index b79c40076d..0076590706 100644 --- a/web/ui/mantine-ui/src/lib/formatSeries.ts +++ b/web/ui/mantine-ui/src/lib/formatSeries.ts @@ -1,12 +1,24 @@ +import { + maybeQuoteLabelName, + metricContainsExtendedCharset, +} from "../promql/utils"; import { escapeString } from "./escapeString"; +// TODO: Maybe replace this with the new PromLens-derived serialization code in src/promql/serialize.ts? export const formatSeries = (labels: { [key: string]: string }): string => { if (labels === null) { return "scalar"; } + if (metricContainsExtendedCharset(labels.__name__ || "")) { + return `{"${escapeString(labels.__name__)}",${Object.entries(labels) + .filter(([k]) => k !== "__name__") + .map(([k, v]) => `${maybeQuoteLabelName(k)}="${escapeString(v)}"`) + .join(", ")}}`; + } + return `${labels.__name__ || ""}{${Object.entries(labels) .filter(([k]) => k !== "__name__") - .map(([k, v]) => `${k}="${escapeString(v)}"`) + .map(([k, v]) => `${maybeQuoteLabelName(k)}="${escapeString(v)}"`) .join(", ")}}`; }; diff --git a/web/ui/mantine-ui/src/pages/query/SeriesName.tsx b/web/ui/mantine-ui/src/pages/query/SeriesName.tsx index 66a7856f56..61bc62eee3 100644 --- a/web/ui/mantine-ui/src/pages/query/SeriesName.tsx +++ b/web/ui/mantine-ui/src/pages/query/SeriesName.tsx @@ -5,6 +5,10 @@ import classes from "./SeriesName.module.css"; import { escapeString } from "../../lib/escapeString"; import { useClipboard } from "@mantine/hooks"; import { notifications } from "@mantine/notifications"; +import { + maybeQuoteLabelName, + metricContainsExtendedCharset, +} from "../../promql/utils"; interface SeriesNameProps { labels: { [key: string]: string } | null; @@ -15,8 +19,26 @@ const SeriesName: FC = ({ labels, format }) => { const clipboard = useClipboard(); const renderFormatted = (): React.ReactElement => { + const metricExtendedCharset = + labels && metricContainsExtendedCharset(labels.__name__ || ""); + const labelNodes: React.ReactElement[] = []; let first = true; + + // If the metric name uses the extended new charset, we need to escape it, + // put it into the label matcher list, and make sure it's the first item. + if (metricExtendedCharset) { + labelNodes.push( + + + "{escapeString(labels.__name__)}" + + + ); + + first = false; + } + for (const label in labels) { if (label === "__name__") { continue; @@ -37,7 +59,10 @@ const SeriesName: FC = ({ labels, format }) => { }} title="Click to copy label matcher" > - {label}= + + {maybeQuoteLabelName(label)} + + = "{escapeString(labels[label])}" @@ -52,9 +77,11 @@ const SeriesName: FC = ({ labels, format }) => { return ( - - {labels ? labels.__name__ : ""} - + {!metricExtendedCharset && ( + + {labels ? labels.__name__ : ""} + + )} {"{"} {labelNodes} {"}"} @@ -62,10 +89,6 @@ const SeriesName: FC = ({ labels, format }) => { ); }; - if (labels === null) { - return <>scalar; - } - if (format) { return renderFormatted(); } diff --git a/web/ui/mantine-ui/src/promql/format.tsx b/web/ui/mantine-ui/src/promql/format.tsx index 05dd7d410e..3996444085 100644 --- a/web/ui/mantine-ui/src/promql/format.tsx +++ b/web/ui/mantine-ui/src/promql/format.tsx @@ -8,14 +8,21 @@ import ASTNode, { MatrixSelector, } from "./ast"; import { formatPrometheusDuration } from "../lib/formatTime"; -import { maybeParenthesizeBinopChild, escapeString } from "./utils"; +import { + maybeParenthesizeBinopChild, + escapeString, + maybeQuoteLabelName, + metricContainsExtendedCharset, +} from "./utils"; export const labelNameList = (labels: string[]): React.ReactNode[] => { return labels.map((l, i) => { return ( {i !== 0 && ", "} - {l} + + {maybeQuoteLabelName(l)} + ); }); @@ -69,27 +76,45 @@ const formatAtAndOffset = ( const formatSelector = ( node: VectorSelector | MatrixSelector ): ReactElement => { - const matchLabels = node.matchers - .filter( - (m) => - !( - m.name === "__name__" && - m.type === matchType.equal && - m.value === node.name - ) - ) - .map((m, i) => ( - - {i !== 0 && ","} - {m.name} - {m.type} - "{escapeString(m.value)}" + const matchLabels: JSX.Element[] = []; + + // If the metric name contains the new extended charset, we need to escape it + // and add it at the beginning of the matchers list in the curly braces. + const metricName = + node.name || + node.matchers.find( + (m) => m.name === "__name__" && m.type === matchType.equal + )?.value || + ""; + const metricExtendedCharset = metricContainsExtendedCharset(metricName); + if (metricExtendedCharset) { + matchLabels.push( + + "{escapeString(metricName)}" - )); + ); + } + + matchLabels.push( + ...node.matchers + .filter((m) => !(m.name === "__name__" && m.type === matchType.equal)) + .map((m, i) => ( + + {(i !== 0 || metricExtendedCharset) && ","} + + {maybeQuoteLabelName(m.name)} + + {m.type} + "{escapeString(m.value)}" + + )) + ); return ( <> - {node.name} + {!metricExtendedCharset && ( + {metricName} + )} {matchLabels.length > 0 && ( <> {"{"} diff --git a/web/ui/mantine-ui/src/promql/serialize.ts b/web/ui/mantine-ui/src/promql/serialize.ts index af9c6ef151..1d2c63f4fc 100644 --- a/web/ui/mantine-ui/src/promql/serialize.ts +++ b/web/ui/mantine-ui/src/promql/serialize.ts @@ -11,8 +11,14 @@ import { aggregatorsWithParam, maybeParenthesizeBinopChild, escapeString, + metricContainsExtendedCharset, + maybeQuoteLabelName, } from "./utils"; +const labelNameList = (labels: string[]): string => { + return labels.map((ln) => maybeQuoteLabelName(ln)).join(", "); +}; + const serializeAtAndOffset = ( timestamp: number | null, startOrEnd: StartOrEnd, @@ -28,15 +34,23 @@ const serializeAtAndOffset = ( const serializeSelector = (node: VectorSelector | MatrixSelector): string => { const matchers = node.matchers - .filter( - (m) => - !( - m.name === "__name__" && - m.type === matchType.equal && - m.value === node.name - ) - ) - .map((m) => `${m.name}${m.type}"${escapeString(m.value)}"`); + .filter((m) => !(m.name === "__name__" && m.type === matchType.equal)) + .map( + (m) => `${maybeQuoteLabelName(m.name)}${m.type}"${escapeString(m.value)}"` + ); + + // If the metric name contains the new extended charset, we need to escape it + // and add it at the beginning of the matchers list in the curly braces. + const metricName = + node.name || + node.matchers.find( + (m) => m.name === "__name__" && m.type === matchType.equal + )?.value || + ""; + const metricExtendedCharset = metricContainsExtendedCharset(metricName); + if (metricExtendedCharset) { + matchers.unshift(`"${escapeString(metricName)}"`); + } const range = node.type === nodeType.matrixSelector @@ -48,7 +62,7 @@ const serializeSelector = (node: VectorSelector | MatrixSelector): string => { node.offset ); - return `${node.name}${matchers.length > 0 ? `{${matchers.join(",")}}` : ""}${range}${atAndOffset}`; + return `${!metricExtendedCharset ? metricName : ""}${matchers.length > 0 ? `{${matchers.join(",")}}` : ""}${range}${atAndOffset}`; }; const serializeNode = ( @@ -68,9 +82,9 @@ const serializeNode = ( case nodeType.aggregation: return `${initialInd}${node.op}${ node.without - ? ` without(${node.grouping.join(", ")}) ` + ? ` without(${labelNameList(node.grouping)}) ` : node.grouping.length > 0 - ? ` by(${node.grouping.join(", ")}) ` + ? ` by(${labelNameList(node.grouping)}) ` : "" }(${childListSeparator}${ aggregatorsWithParam.includes(node.op) && node.param !== null @@ -119,16 +133,16 @@ const serializeNode = ( const vm = node.matching; if (vm !== null && (vm.labels.length > 0 || vm.on)) { if (vm.on) { - matching = ` on(${vm.labels.join(", ")})`; + matching = ` on(${labelNameList(vm.labels)})`; } else { - matching = ` ignoring(${vm.labels.join(", ")})`; + matching = ` ignoring(${labelNameList(vm.labels)})`; } if ( vm.card === vectorMatchCardinality.manyToOne || vm.card === vectorMatchCardinality.oneToMany ) { - grouping = ` group_${vm.card === vectorMatchCardinality.manyToOne ? "left" : "right"}(${vm.include.join(",")})`; + grouping = ` group_${vm.card === vectorMatchCardinality.manyToOne ? "left" : "right"}(${labelNameList(vm.include)})`; } } diff --git a/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts b/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts index ea045612c1..a2b97ec904 100644 --- a/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts +++ b/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts @@ -99,7 +99,7 @@ describe("serializeNode and formatNode", () => { timestamp: null, startOrEnd: null, }, - output: '{__name__="metric_name"} offset 1m', + output: "metric_name offset 1m", }, { // Escaping in label values. @@ -642,6 +642,113 @@ describe("serializeNode and formatNode", () => { == bool on(label1, label2) group_right(label3) …`, }, + // Test new Prometheus 3.0 UTF-8 support. + { + node: { + bool: false, + lhs: { + bool: false, + lhs: { + expr: { + matchers: [ + { + name: "__name__", + type: matchType.equal, + value: "metric_ä", + }, + { + name: "foo", + type: matchType.equal, + value: "bar", + }, + ], + name: "", + offset: 0, + startOrEnd: null, + timestamp: null, + type: nodeType.vectorSelector, + }, + grouping: ["a", "ä"], + op: aggregationType.sum, + param: null, + type: nodeType.aggregation, + without: false, + }, + matching: { + card: vectorMatchCardinality.manyToOne, + include: ["c", "ü"], + labels: ["b", "ö"], + on: true, + }, + op: binaryOperatorType.div, + rhs: { + expr: { + matchers: [ + { + name: "__name__", + type: matchType.equal, + value: "metric_ö", + }, + { + name: "bar", + type: matchType.equal, + value: "foo", + }, + ], + name: "", + offset: 0, + startOrEnd: null, + timestamp: null, + type: nodeType.vectorSelector, + }, + grouping: ["d", "ä"], + op: aggregationType.sum, + param: null, + type: nodeType.aggregation, + without: true, + }, + type: nodeType.binaryExpr, + }, + matching: { + card: vectorMatchCardinality.oneToOne, + include: [], + labels: ["e", "ö"], + on: false, + }, + op: binaryOperatorType.add, + rhs: { + expr: { + matchers: [ + { + name: "__name__", + type: matchType.equal, + value: "metric_ü", + }, + ], + name: "", + offset: 0, + startOrEnd: null, + timestamp: null, + type: nodeType.vectorSelector, + }, + type: nodeType.parenExpr, + }, + type: nodeType.binaryExpr, + }, + output: + 'sum by(a, "ä") ({"metric_ä",foo="bar"}) / on(b, "ö") group_left(c, "ü") sum without(d, "ä") ({"metric_ö",bar="foo"}) + ignoring(e, "ö") ({"metric_ü"})', + prettyOutput: ` sum by(a, "ä") ( + {"metric_ä",foo="bar"} + ) + / on(b, "ö") group_left(c, "ü") + sum without(d, "ä") ( + {"metric_ö",bar="foo"} + ) ++ ignoring(e, "ö") + ( + {"metric_ü"} + )`, + }, ]; tests.forEach((t) => { diff --git a/web/ui/mantine-ui/src/promql/utils.ts b/web/ui/mantine-ui/src/promql/utils.ts index 5477ee5968..2f1cc11d2f 100644 --- a/web/ui/mantine-ui/src/promql/utils.ts +++ b/web/ui/mantine-ui/src/promql/utils.ts @@ -267,6 +267,21 @@ export const humanizedValueType: Record = { [valueType.matrix]: "range vector", }; +const metricNameRe = /^[a-zA-Z_:][a-zA-Z0-9_:]*$/; +const labelNameCharsetRe = /^[a-zA-Z_][a-zA-Z0-9_]*$/; + +export const metricContainsExtendedCharset = (str: string) => { + return !metricNameRe.test(str); +}; + +export const labelNameContainsExtendedCharset = (str: string) => { + return !labelNameCharsetRe.test(str); +}; + export const escapeString = (str: string) => { return str.replace(/([\\"])/g, "\\$1"); }; + +export const maybeQuoteLabelName = (str: string) => { + return labelNameContainsExtendedCharset(str) ? `"${escapeString(str)}"` : str; +}; From 2e7c739d4475102e894b1ac538dc4e205573158c Mon Sep 17 00:00:00 2001 From: Juraj Michalek Date: Fri, 25 Oct 2024 16:46:00 +0200 Subject: [PATCH 0840/1397] chore: add tcp events to remote store span Signed-off-by: Juraj Michalek --- storage/remote/client.go | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/storage/remote/client.go b/storage/remote/client.go index 62218cfba9..54c8b34fcf 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -18,8 +18,10 @@ import ( "context" "errors" "fmt" + "go.opentelemetry.io/otel/attribute" "io" "net/http" + "net/http/httptrace" "strconv" "strings" "time" @@ -279,7 +281,36 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo ctx, span := otel.Tracer("").Start(ctx, "Remote Store", trace.WithSpanKind(trace.SpanKindClient)) defer span.End() - httpResp, err := c.Client.Do(httpReq.WithContext(ctx)) + httpReqTrace := &httptrace.ClientTrace{ + GetConn: func(hostPort string) { + span.AddEvent("GetConn", trace.WithAttributes(attribute.String("host", hostPort))) + }, + GotConn: func(info httptrace.GotConnInfo) { + span.AddEvent("GotConn", trace.WithAttributes( + attribute.Bool("reused", info.Reused), + attribute.Bool("wasIdle", info.WasIdle), + //attribute.Duration("idleTime", info.IdleTime), + )) + }, + DNSStart: func(info httptrace.DNSStartInfo) { + span.AddEvent("DNSStart", trace.WithAttributes(attribute.String("host", info.Host))) + }, + DNSDone: func(info httptrace.DNSDoneInfo) { + span.AddEvent("DNSDone", trace.WithAttributes(attribute.Bool("coalesced", info.Coalesced))) + }, + ConnectStart: func(network, addr string) { + span.AddEvent("ConnectStart", trace.WithAttributes(attribute.String("network", network), attribute.String("addr", addr))) + }, + ConnectDone: func(network, addr string, err error) { + attrs := []attribute.KeyValue{attribute.String("network", network), attribute.String("addr", addr)} + if err != nil { + attrs = append(attrs, attribute.String("error", err.Error())) + } + span.AddEvent("ConnectDone", trace.WithAttributes(attrs...)) + }, + } + + httpResp, err := c.Client.Do(httpReq.WithContext(httptrace.WithClientTrace(ctx, httpReqTrace))) if err != nil { // Errors from Client.Do are from (for example) network errors, so are // recoverable. From 3c1ffbb2fdb646ff7382d6debfe6771b5590066c Mon Sep 17 00:00:00 2001 From: Juraj Michalek Date: Fri, 25 Oct 2024 17:01:52 +0200 Subject: [PATCH 0841/1397] chore: added idleTimeMs and fixed linting issues Signed-off-by: Juraj Michalek --- storage/remote/client.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/storage/remote/client.go b/storage/remote/client.go index 54c8b34fcf..ad898cd3f0 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -18,7 +18,6 @@ import ( "context" "errors" "fmt" - "go.opentelemetry.io/otel/attribute" "io" "net/http" "net/http/httptrace" @@ -26,6 +25,8 @@ import ( "strings" "time" + "go.opentelemetry.io/otel/attribute" + "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/client_golang/prometheus" @@ -289,7 +290,7 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo span.AddEvent("GotConn", trace.WithAttributes( attribute.Bool("reused", info.Reused), attribute.Bool("wasIdle", info.WasIdle), - //attribute.Duration("idleTime", info.IdleTime), + attribute.Float64("idleTimeMs", float64(info.IdleTime.Milliseconds())), )) }, DNSStart: func(info httptrace.DNSStartInfo) { From 7ecdb55b02345dc7c52f078840c75f9ec65b7c14 Mon Sep 17 00:00:00 2001 From: Juraj Michalek Date: Mon, 28 Oct 2024 16:02:09 +0100 Subject: [PATCH 0842/1397] chore: use otelhttptrace instead Signed-off-by: Juraj Michalek --- go.mod | 9 +++++---- go.sum | 18 ++++++++++-------- storage/remote/client.go | 41 +++++++--------------------------------- 3 files changed, 22 insertions(+), 46 deletions(-) diff --git a/go.mod b/go.mod index 6d33d2ed23..3399ffb002 100644 --- a/go.mod +++ b/go.mod @@ -62,13 +62,14 @@ require ( github.com/vultr/govultr/v2 v2.17.2 go.opentelemetry.io/collector/pdata v1.16.0 go.opentelemetry.io/collector/semconv v0.110.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 - go.opentelemetry.io/otel v1.30.0 + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 + go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 go.opentelemetry.io/otel/sdk v1.30.0 - go.opentelemetry.io/otel/trace v1.30.0 + go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 @@ -184,7 +185,7 @@ require ( github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect diff --git a/go.sum b/go.sum index 3d415cf344..1dce748ba7 100644 --- a/go.sum +++ b/go.sum @@ -591,22 +591,24 @@ go.opentelemetry.io/collector/pdata v1.16.0 h1:g02K8jlRnmQ7TQDuXpdgVL6vIxIVqr5Gb go.opentelemetry.io/collector/pdata v1.16.0/go.mod h1:YZZJIt2ehxosYf/Y1pbvexjNWsIGNNrzzlCTO9jC1F4= go.opentelemetry.io/collector/semconv v0.110.0 h1:KHQnOHe3gUz0zsxe8ph9kN5OTypCFD4V+06AiBTfeNk= go.opentelemetry.io/collector/semconv v0.110.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= -go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= -go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= -go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= -go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= -go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= -go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= diff --git a/storage/remote/client.go b/storage/remote/client.go index ad898cd3f0..d37b1505cb 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -25,8 +25,6 @@ import ( "strings" "time" - "go.opentelemetry.io/otel/attribute" - "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/client_golang/prometheus" @@ -34,6 +32,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/sigv4" "github.com/prometheus/common/version" + "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/trace" @@ -216,8 +215,11 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) { if conf.WriteProtoMsg != "" { writeProtoMsg = conf.WriteProtoMsg } - - httpClient.Transport = otelhttp.NewTransport(t) + httpClient.Transport = otelhttp.NewTransport( + t, + otelhttp.WithClientTrace(func(ctx context.Context) *httptrace.ClientTrace { + return otelhttptrace.NewClientTrace(ctx) + })) return &Client{ remoteName: name, urlString: conf.URL.String(), @@ -282,36 +284,7 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo ctx, span := otel.Tracer("").Start(ctx, "Remote Store", trace.WithSpanKind(trace.SpanKindClient)) defer span.End() - httpReqTrace := &httptrace.ClientTrace{ - GetConn: func(hostPort string) { - span.AddEvent("GetConn", trace.WithAttributes(attribute.String("host", hostPort))) - }, - GotConn: func(info httptrace.GotConnInfo) { - span.AddEvent("GotConn", trace.WithAttributes( - attribute.Bool("reused", info.Reused), - attribute.Bool("wasIdle", info.WasIdle), - attribute.Float64("idleTimeMs", float64(info.IdleTime.Milliseconds())), - )) - }, - DNSStart: func(info httptrace.DNSStartInfo) { - span.AddEvent("DNSStart", trace.WithAttributes(attribute.String("host", info.Host))) - }, - DNSDone: func(info httptrace.DNSDoneInfo) { - span.AddEvent("DNSDone", trace.WithAttributes(attribute.Bool("coalesced", info.Coalesced))) - }, - ConnectStart: func(network, addr string) { - span.AddEvent("ConnectStart", trace.WithAttributes(attribute.String("network", network), attribute.String("addr", addr))) - }, - ConnectDone: func(network, addr string, err error) { - attrs := []attribute.KeyValue{attribute.String("network", network), attribute.String("addr", addr)} - if err != nil { - attrs = append(attrs, attribute.String("error", err.Error())) - } - span.AddEvent("ConnectDone", trace.WithAttributes(attrs...)) - }, - } - - httpResp, err := c.Client.Do(httpReq.WithContext(httptrace.WithClientTrace(ctx, httpReqTrace))) + httpResp, err := c.Client.Do(httpReq.WithContext(ctx)) if err != nil { // Errors from Client.Do are from (for example) network errors, so are // recoverable. From 76ff12b32a9ec946c4c2241ea5fea148a2f952c2 Mon Sep 17 00:00:00 2001 From: Juraj Michalek Date: Wed, 30 Oct 2024 09:41:16 +0100 Subject: [PATCH 0843/1397] chore: only create span events Signed-off-by: Juraj Michalek --- storage/remote/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/remote/client.go b/storage/remote/client.go index d37b1505cb..23775122e5 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -218,7 +218,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) { httpClient.Transport = otelhttp.NewTransport( t, otelhttp.WithClientTrace(func(ctx context.Context) *httptrace.ClientTrace { - return otelhttptrace.NewClientTrace(ctx) + return otelhttptrace.NewClientTrace(ctx, otelhttptrace.WithoutSubSpans()) })) return &Client{ remoteName: name, From 76ca7d08d9ae9cb23ea0a498f87370a26f16dc1e Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 30 Oct 2024 16:43:10 +0100 Subject: [PATCH 0844/1397] Fixup: re-add erroneously removed lines Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/pages/query/SeriesName.tsx | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/web/ui/mantine-ui/src/pages/query/SeriesName.tsx b/web/ui/mantine-ui/src/pages/query/SeriesName.tsx index 61bc62eee3..d03b530f03 100644 --- a/web/ui/mantine-ui/src/pages/query/SeriesName.tsx +++ b/web/ui/mantine-ui/src/pages/query/SeriesName.tsx @@ -89,6 +89,10 @@ const SeriesName: FC = ({ labels, format }) => { ); }; + if (labels === null) { + return <>scalar; + } + if (format) { return renderFormatted(); } From 8588289c246304dd2a736154a6a0904707b02cd2 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Wed, 30 Oct 2024 18:07:51 -0300 Subject: [PATCH 0845/1397] otlp translator: Add test showing bugs Signed-off-by: Arthur Silva Sens --- .../prometheus/normalize_label_test.go | 45 +++++++++++++++++++ .../prometheus/normalize_name_test.go | 1 + 2 files changed, 46 insertions(+) create mode 100644 storage/remote/otlptranslator/prometheus/normalize_label_test.go diff --git a/storage/remote/otlptranslator/prometheus/normalize_label_test.go b/storage/remote/otlptranslator/prometheus/normalize_label_test.go new file mode 100644 index 0000000000..3ceb8760c5 --- /dev/null +++ b/storage/remote/otlptranslator/prometheus/normalize_label_test.go @@ -0,0 +1,45 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNormalizeLabel(t *testing.T) { + tests := []struct { + label string + expected string + }{ + {"", ""}, + {"label:with:colons", "label_with_colons"}, // Without UTF-8 support, colons are only allowed in metric names + {"LabelWithCapitalLetters", "LabelWithCapitalLetters"}, + {"label!with&special$chars)", "label_with_special_chars_"}, + {"label_with_foreign_characteres_字符", "label_with_foreign_characteres___"}, + {"label.with.dots", "label_with_dots"}, + {"123label", "key_123label"}, + {"_label_starting_with_underscore", "key_label_starting_with_underscore"}, + {"__label_starting_with_2underscores", "__label_starting_with_2underscores"}, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) { + result := NormalizeLabel(test.label) + require.Equal(t, test.expected, result) + }) + } +} diff --git a/storage/remote/otlptranslator/prometheus/normalize_name_test.go b/storage/remote/otlptranslator/prometheus/normalize_name_test.go index 07b9b0a784..4e55209410 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name_test.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name_test.go @@ -202,4 +202,5 @@ func TestBuildCompliantNameWithoutSuffixes(t *testing.T) { require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false)) require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false)) require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false)) + require.Equal(t, "metric_with___foreign_characteres", BuildCompliantName(createCounter("metric_with_字符_foreign_characteres", ""), "", false)) } From ea06f1a1d1ea4571d040e952b626da2e7fa74668 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Wed, 30 Oct 2024 19:29:31 -0300 Subject: [PATCH 0846/1397] bugfix: Fix otlp translation of foreign characters Signed-off-by: Arthur Silva Sens --- storage/remote/otlptranslator/prometheus/normalize_label.go | 2 +- storage/remote/otlptranslator/prometheus/normalize_name.go | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/storage/remote/otlptranslator/prometheus/normalize_label.go b/storage/remote/otlptranslator/prometheus/normalize_label.go index a112b9bbce..c22c761320 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label.go @@ -49,7 +49,7 @@ func NormalizeLabel(label string) string { // Return '_' for anything non-alphanumeric. func sanitizeRune(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) { + if unicode.IsLower(r) || unicode.IsUpper(r) || unicode.IsDigit(r) { return r } return '_' diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index 0f472b80a0..36b647f510 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -237,11 +237,13 @@ func removeSuffix(tokens []string, suffix string) []string { // Clean up specified string so it's Prometheus compliant func CleanUpString(s string) string { - return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }), "_") + return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsUpper(r) && !unicode.IsLower(r) && !unicode.IsDigit(r) }), "_") } func RemovePromForbiddenRunes(s string) string { - return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' && r != ':' }), "_") + return strings.Join(strings.FieldsFunc(s, func(r rune) bool { + return !unicode.IsUpper(r) && !unicode.IsLower(r) && !unicode.IsDigit(r) && r != '_' && r != ':' + }), "_") } // Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit From b7aca45de7e40b7e99949480bfdfa3e188a0344b Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Thu, 31 Oct 2024 00:30:22 -0700 Subject: [PATCH 0847/1397] fix round function ignoring enableDelayedNameRemoval feature flag Signed-off-by: Ben Ye --- promql/engine_test.go | 62 +++++++++++++++++++ promql/functions.go | 3 + .../testdata/name_label_dropping.test | 4 ++ 3 files changed, 69 insertions(+) diff --git a/promql/engine_test.go b/promql/engine_test.go index db399d8656..8da27a73c5 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3501,3 +3501,65 @@ histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum: }, }) } + +func TestEvaluationWithDelayedNameRemovalDisabled(t *testing.T) { + opts := promql.EngineOpts{ + Logger: nil, + Reg: nil, + EnableAtModifier: true, + MaxSamples: 10000, + Timeout: 10 * time.Second, + EnableDelayedNameRemoval: false, + } + engine := promqltest.NewTestEngineWithOpts(t, opts) + + promqltest.RunTest(t, ` +load 5m + metric{env="1"} 0 60 120 + another_metric{env="1"} 60 120 180 + +# Does not drop __name__ for vector selector +eval instant at 15m metric{env="1"} + metric{env="1"} 120 + +# Drops __name__ for unary operators +eval instant at 15m -metric + {env="1"} -120 + +# Drops __name__ for binary operators +eval instant at 15m metric + another_metric + {env="1"} 300 + +# Does not drop __name__ for binary comparison operators +eval instant at 15m metric <= another_metric + metric{env="1"} 120 + +# Drops __name__ for binary comparison operators with "bool" modifier +eval instant at 15m metric <= bool another_metric + {env="1"} 1 + +# Drops __name__ for vector-scalar operations +eval instant at 15m metric * 2 + {env="1"} 240 + +# Drops __name__ for instant-vector functions +eval instant at 15m clamp(metric, 0, 100) + {env="1"} 100 + +# Drops __name__ for round function +eval instant at 15m round(metric) + {env="1"} 120 + +# Drops __name__ for range-vector functions +eval instant at 15m rate(metric{env="1"}[10m]) + {env="1"} 0.2 + +# Does not drop __name__ for last_over_time function +eval instant at 15m last_over_time(metric{env="1"}[10m]) + metric{env="1"} 120 + +# Drops name for other _over_time functions +eval instant at 15m max_over_time(metric{env="1"}[10m]) + {env="1"} 120 +`, engine) +} diff --git a/promql/functions.go b/promql/functions.go index cc5f19dae7..a509f783fa 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -534,6 +534,9 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper for _, el := range vec { f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ Metric: el.Metric, F: f, diff --git a/promql/promqltest/testdata/name_label_dropping.test b/promql/promqltest/testdata/name_label_dropping.test index 1f1dac3602..c8c0eb285f 100644 --- a/promql/promqltest/testdata/name_label_dropping.test +++ b/promql/promqltest/testdata/name_label_dropping.test @@ -31,6 +31,10 @@ eval instant at 15m metric * 2 eval instant at 15m clamp(metric, 0, 100) {env="1"} 100 +# Drops __name__ for round function +eval instant at 15m round(metric) + {env="1"} 120 + # Drops __name__ for range-vector functions eval instant at 15m rate(metric{env="1"}[10m]) {env="1"} 0.2 From 5fdd382ec364d4bfa4e000960253c700f1d32390 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Thu, 31 Oct 2024 16:22:59 +0100 Subject: [PATCH 0848/1397] CHANGELOG: remove duplicate entry Signed-off-by: Jan Fajerski --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f71b701abe..de97354f27 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,6 @@ * [CHANGE] Scraping: normalize "le" and "quantile" label values upon ingestion. #15164 * [CHANGE] Scraping: config `scrape_classic_histograms` was renamed to `always_scrape_classic_histograms`. #15178 * [CHANGE] Config: remove expand-external-labels flag, expand external labels env vars by default. #14657 -* [CHANGE] Adopt log/slog and remove go-kit/log. #14906 * [CHANGE] Disallow configuring AM with the v1 api. #13883 * [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710 * [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196 From 293b57dcb5dd22ba39db1aeb7c28e6a092bd0d75 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 23:26:22 +0000 Subject: [PATCH 0849/1397] chore(deps): bump prometheus/promci from 0.4.4 to 0.4.5 Bumps [prometheus/promci](https://github.com/prometheus/promci) from 0.4.4 to 0.4.5. - [Release notes](https://github.com/prometheus/promci/releases) - [Commits](https://github.com/prometheus/promci/compare/468927c440349ab56c4a1aafd453b312841503c2...52c7012f5f0070d7281b8db4a119e21341d43c91) --- updated-dependencies: - dependency-name: prometheus/promci dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2714211dd7..fcdff2cb5d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,7 +14,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.23-base steps: - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 + - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/setup_environment with: enable_npm: true @@ -30,7 +30,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.23-base steps: - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 + - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/setup_environment - run: go test --tags=dedupelabels ./... - run: GOARCH=386 go test ./cmd/prometheus @@ -63,7 +63,7 @@ jobs: steps: - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 + - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/setup_environment with: enable_go: false @@ -122,7 +122,7 @@ jobs: thread: [ 0, 1, 2 ] steps: - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 + - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/build with: promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" @@ -147,7 +147,7 @@ jobs: # should also be updated. steps: - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 + - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/build with: parallelism: 12 @@ -209,7 +209,7 @@ jobs: if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 + - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/publish_main with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -226,7 +226,7 @@ jobs: (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) steps: - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 + - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/publish_release with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -241,7 +241,7 @@ jobs: steps: - name: Checkout uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 + - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - name: Install nodejs uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4 with: From c2005dbf63cfc7afa8cb6a6c1c59b3c665b376f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 23:26:48 +0000 Subject: [PATCH 0850/1397] chore(deps): bump google.golang.org/api from 0.199.0 to 0.204.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.199.0 to 0.204.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.199.0...v0.204.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 14 +++++++------- go.sum | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index 3399ffb002..ac11ac35e3 100644 --- a/go.mod +++ b/go.mod @@ -79,10 +79,10 @@ require ( golang.org/x/sys v0.26.0 golang.org/x/text v0.19.0 golang.org/x/tools v0.26.0 - google.golang.org/api v0.199.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/api v0.204.0 + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 google.golang.org/grpc v1.67.1 - google.golang.org/protobuf v1.34.2 + google.golang.org/protobuf v1.35.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.1 @@ -93,8 +93,8 @@ require ( ) require ( - cloud.google.com/go/auth v0.9.5 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect @@ -192,8 +192,8 @@ require ( golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/term v0.25.0 // indirect - golang.org/x/time v0.6.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + golang.org/x/time v0.7.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 1dce748ba7..cfe262e1e7 100644 --- a/go.sum +++ b/go.sum @@ -12,10 +12,10 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw= -cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= +cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -818,8 +818,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -884,8 +884,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= -google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= +google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= +google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -920,10 +920,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -950,8 +950,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 58fd82832906610795356dcec4749017f48f21a6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 23:26:57 +0000 Subject: [PATCH 0851/1397] chore(deps): bump github.com/edsrzf/mmap-go from 1.1.0 to 1.2.0 Bumps [github.com/edsrzf/mmap-go](https://github.com/edsrzf/mmap-go) from 1.1.0 to 1.2.0. - [Release notes](https://github.com/edsrzf/mmap-go/releases) - [Commits](https://github.com/edsrzf/mmap-go/compare/v1.1.0...v1.2.0) --- updated-dependencies: - dependency-name: github.com/edsrzf/mmap-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3399ffb002..cd3fce6791 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/dennwc/varint v1.0.0 github.com/digitalocean/godo v1.126.0 github.com/docker/docker v27.3.1+incompatible - github.com/edsrzf/mmap-go v1.1.0 + github.com/edsrzf/mmap-go v1.2.0 github.com/envoyproxy/go-control-plane v0.13.0 github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb diff --git a/go.sum b/go.sum index 1dce748ba7..2387de649c 100644 --- a/go.sum +++ b/go.sum @@ -133,8 +133,8 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= -github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84= +github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= From a334e0781cc3e28283bc9cb75a658ccf1e3b75d6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 23:27:24 +0000 Subject: [PATCH 0852/1397] chore(deps): bump github.com/hetznercloud/hcloud-go/v2 Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.13.1 to 2.15.0. - [Release notes](https://github.com/hetznercloud/hcloud-go/releases) - [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.13.1...v2.15.0) --- updated-dependencies: - dependency-name: github.com/hetznercloud/hcloud-go/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3399ffb002..1a14074863 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.29.4 github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 - github.com/hetznercloud/hcloud-go/v2 v2.13.1 + github.com/hetznercloud/hcloud-go/v2 v2.15.0 github.com/ionos-cloud/sdk-go/v6 v6.2.1 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.10 diff --git a/go.sum b/go.sum index 1dce748ba7..39e9b2a6e9 100644 --- a/go.sum +++ b/go.sum @@ -353,8 +353,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1av github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ= -github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0= +github.com/hetznercloud/hcloud-go/v2 v2.15.0 h1:6mpMJ/RuX1woZj+MCJdyKNEX9129KDkEIDeeyfr4GD4= +github.com/hetznercloud/hcloud-go/v2 v2.15.0/go.mod h1:h8sHav+27Xa+48cVMAvAUMELov5h298Ilg2vflyTHgg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= From 8a7942684cd0b85b75ded92130b2bf1f2679700b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 23:27:30 +0000 Subject: [PATCH 0853/1397] chore(deps): bump github.com/linode/linodego from 1.41.0 to 1.42.0 Bumps [github.com/linode/linodego](https://github.com/linode/linodego) from 1.41.0 to 1.42.0. - [Release notes](https://github.com/linode/linodego/releases) - [Commits](https://github.com/linode/linodego/compare/v1.41.0...v1.42.0) --- updated-dependencies: - dependency-name: github.com/linode/linodego dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3399ffb002..b31965059d 100644 --- a/go.mod +++ b/go.mod @@ -41,7 +41,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.10 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.41.0 + github.com/linode/linodego v1.42.0 github.com/miekg/dns v1.1.62 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f diff --git a/go.sum b/go.sum index 1dce748ba7..fee2b08c4e 100644 --- a/go.sum +++ b/go.sum @@ -399,8 +399,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY= -github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY= +github.com/linode/linodego v1.42.0 h1:ZSbi4MtvwrfB9Y6bknesorvvueBGGilcmh2D5dq76RM= +github.com/linode/linodego v1.42.0/go.mod h1:2yzmY6pegPBDgx2HDllmt0eIk2IlzqcgK6NR0wFCFRY= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= From a30b8e2b07635bb666dac054ee361c7fcca3db23 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 23:27:48 +0000 Subject: [PATCH 0854/1397] chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity Bumps [github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://github.com/Azure/azure-sdk-for-go) from 1.7.0 to 1.8.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.7.0...sdk/azcore/v1.8.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azidentity dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3399ffb002..a8c146ce36 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ toolchain go1.23.0 require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 github.com/Code-Hex/go-generics-cache v1.5.1 diff --git a/go.sum b/go.sum index 1dce748ba7..9502350c36 100644 --- a/go.sum +++ b/go.sum @@ -38,8 +38,10 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= @@ -52,6 +54,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1. github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -121,6 +125,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/digitalocean/godo v1.126.0 h1:+Znh7VMQj/E8ArbjWnc7OKGjWfzC+I8OCSRp7r1MdD8= github.com/digitalocean/godo v1.126.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= @@ -381,6 +387,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= @@ -528,6 +536,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= From deac15142f69a811013e2a373bb6e840267ac553 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 23:39:39 +0000 Subject: [PATCH 0855/1397] chore(deps): bump golangci/golangci-lint-action in /scripts Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.1.0 to 6.1.1. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/aaa42aa0628b4ae2578232a66b541047968fac86...971e284b6050e8a5849b72094c50ab08da042db8) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- scripts/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 1c099932ba..41378202e7 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -33,7 +33,7 @@ jobs: run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 + uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 with: args: --verbose version: v1.60.2 From 30830a77f4b8babbd0481f12b998fbe07deb109c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 23:39:48 +0000 Subject: [PATCH 0856/1397] chore(deps): bump actions/setup-go from 5.0.2 to 5.1.0 in /scripts Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.1.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32...41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- scripts/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 1c099932ba..a8b5403b18 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -26,7 +26,7 @@ jobs: - name: Checkout repository uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Install Go - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: go-version: 1.23.x - name: Install snmp_exporter/generator dependencies From 716c5164e45eca472711d4c8932fe934a68303c0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 23:47:40 +0000 Subject: [PATCH 0857/1397] chore(deps): bump github.com/prometheus/client_golang Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.20.4 to 1.20.5. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.20.4...v1.20.5) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 2 +- documentation/examples/remote_storage/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 0aad437588..c740be399f 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -7,7 +7,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.6 - github.com/prometheus/client_golang v1.20.4 + github.com/prometheus/client_golang v1.20.5 github.com/prometheus/common v0.60.0 github.com/prometheus/prometheus v0.53.1 github.com/stretchr/testify v1.9.0 diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 936b448d84..301761b61c 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -253,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= -github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= From adcc873d5119c1d1046869c8d9746768689c491d Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sat, 2 Nov 2024 11:24:44 +0000 Subject: [PATCH 0858/1397] [BUILD] React-app: replace 0.55.0-rc.0 with 0.55.0 Try to stop build errors. Signed-off-by: Bryan Boreham --- web/ui/react-app/package-lock.json | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index f8d1cfb3ea..d3de03e5e0 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -4341,12 +4341,11 @@ } }, "node_modules/@prometheus-io/codemirror-promql": { - "version": "0.55.0-rc.0", - "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.55.0-rc.0.tgz", - "integrity": "sha512-BlDKH2eB8Sd9bQmQjvJvncvZ+VTtrtReSO6qWZXULyrXp+FEjONybOH3Ejq/0a2hat0GpZzcEfwKqPbdy4WdCQ==", - "license": "Apache-2.0", + "version": "0.55.0", + "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.55.0.tgz", + "integrity": "sha512-W+aBBToIvxHbcDsQYJSpgaMtcLUCy3SMIK6jluaEgJrkpOfEJnItZu/rvCC/ehCz2c+h+6WkPJklH8WogsXyEg==", "dependencies": { - "@prometheus-io/lezer-promql": "0.55.0-rc.0", + "@prometheus-io/lezer-promql": "0.55.0", "lru-cache": "^7.18.3" }, "engines": { @@ -4362,10 +4361,9 @@ } }, "node_modules/@prometheus-io/lezer-promql": { - "version": "0.55.0-rc.0", - "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.55.0-rc.0.tgz", - "integrity": "sha512-Ikaabw8gfu0HI2D2rKykLBWio+ytTEE03bdZDMpILYULoeGVPdKgbeGLLI9Kafyv48Qiis55o60EfDoywiRHqA==", - "license": "Apache-2.0", + "version": "0.55.0", + "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.55.0.tgz", + "integrity": "sha512-DHg6l6pfDnE8eLsj4DyXhFDe7OsqSBw2EnSVG4biddzLsIF5gXKazIswYTGHJ26CGHHiDPcbXjhlm9dEWI2aJA==", "peerDependencies": { "@lezer/highlight": "^1.1.2", "@lezer/lr": "^1.2.3" From 034d2b24bcae90fce3ac337b4ddd399bd2ff4bc4 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Sat, 2 Nov 2024 11:38:51 -0300 Subject: [PATCH 0859/1397] Fix typos in tests (#15312) Signed-off-by: Arthur Silva Sens --- .../remote/otlptranslator/prometheus/normalize_label_test.go | 2 +- storage/remote/otlptranslator/prometheus/normalize_name_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/remote/otlptranslator/prometheus/normalize_label_test.go b/storage/remote/otlptranslator/prometheus/normalize_label_test.go index 3ceb8760c5..21d4d6a6d8 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label_test.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label_test.go @@ -29,7 +29,7 @@ func TestNormalizeLabel(t *testing.T) { {"label:with:colons", "label_with_colons"}, // Without UTF-8 support, colons are only allowed in metric names {"LabelWithCapitalLetters", "LabelWithCapitalLetters"}, {"label!with&special$chars)", "label_with_special_chars_"}, - {"label_with_foreign_characteres_字符", "label_with_foreign_characteres___"}, + {"label_with_foreign_characters_字符", "label_with_foreign_characters___"}, {"label.with.dots", "label_with_dots"}, {"123label", "key_123label"}, {"_label_starting_with_underscore", "key_label_starting_with_underscore"}, diff --git a/storage/remote/otlptranslator/prometheus/normalize_name_test.go b/storage/remote/otlptranslator/prometheus/normalize_name_test.go index 4e55209410..8c5dc7d1f9 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name_test.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name_test.go @@ -202,5 +202,5 @@ func TestBuildCompliantNameWithoutSuffixes(t *testing.T) { require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false)) require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false)) require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false)) - require.Equal(t, "metric_with___foreign_characteres", BuildCompliantName(createCounter("metric_with_字符_foreign_characteres", ""), "", false)) + require.Equal(t, "metric_with___foreign_characters", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", false)) } From 1a22b1d84671750d5e61d5d14631342cfa8aec0a Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Sun, 3 Nov 2024 09:15:49 -0300 Subject: [PATCH 0860/1397] bugfix: Fix otlp translator switching colons to undescores (#15251) Signed-off-by: Arthur Silva Sens --- storage/remote/otlptranslator/prometheus/normalize_name.go | 2 +- .../remote/otlptranslator/prometheus/normalize_name_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index 36b647f510..94fb4465fe 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -114,7 +114,7 @@ func normalizeName(metric pmetric.Metric, namespace string) string { // Split metric name into "tokens" (remove all non-alphanumerics) nameTokens := strings.FieldsFunc( metric.Name(), - func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }, + func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != ':' }, ) // Split unit at the '/' if any diff --git a/storage/remote/otlptranslator/prometheus/normalize_name_test.go b/storage/remote/otlptranslator/prometheus/normalize_name_test.go index 8c5dc7d1f9..3ea4b70a53 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name_test.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name_test.go @@ -184,8 +184,8 @@ func TestBuildCompliantNameWithNormalize(t *testing.T) { require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true)) require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true)) require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true)) - require.Equal(t, "foo_bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true)) - require.Equal(t, "foo_bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true)) + require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true)) + require.Equal(t, ":foo::bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true)) // Gauges with unit 1 are considered ratios. require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true)) // Slashes in units are converted. From 2fbbfc3da800d3b33c7f7b430e403f66b781b962 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 3 Nov 2024 11:56:44 +0000 Subject: [PATCH 0861/1397] Revert "Fix `MemPostings.Add` and `MemPostings.Get` data race (#15141)" This reverts commit 50ef0dc954592666a13ff92ef20811f0127c3b49. Memory allocation goes so high in Prombench that the system is unusable. Signed-off-by: Bryan Boreham --- tsdb/head_bench_test.go | 8 +++--- tsdb/index/postings.go | 27 ++++++------------ tsdb/index/postings_test.go | 55 ------------------------------------- 3 files changed, 12 insertions(+), 78 deletions(-) diff --git a/tsdb/head_bench_test.go b/tsdb/head_bench_test.go index aa2cf22148..dc682602b1 100644 --- a/tsdb/head_bench_test.go +++ b/tsdb/head_bench_test.go @@ -43,7 +43,7 @@ func BenchmarkHeadStripeSeriesCreate(b *testing.B) { defer h.Close() for i := 0; i < b.N; i++ { - h.getOrCreate(uint64(i), labels.FromStrings(labels.MetricName, "test", "a", strconv.Itoa(i), "b", strconv.Itoa(i%10), "c", strconv.Itoa(i%100), "d", strconv.Itoa(i/2), "e", strconv.Itoa(i/4))) + h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(i))) } } @@ -61,8 +61,8 @@ func BenchmarkHeadStripeSeriesCreateParallel(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { - i := int(count.Inc()) - h.getOrCreate(uint64(i), labels.FromStrings(labels.MetricName, "test", "a", strconv.Itoa(i), "b", strconv.Itoa(i%10), "c", strconv.Itoa(i%100), "d", strconv.Itoa(i/2), "e", strconv.Itoa(i/4))) + i := count.Inc() + h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(int(i)))) } }) } @@ -82,7 +82,7 @@ func BenchmarkHeadStripeSeriesCreate_PreCreationFailure(b *testing.B) { defer h.Close() for i := 0; i < b.N; i++ { - h.getOrCreate(uint64(i), labels.FromStrings(labels.MetricName, "test", "a", strconv.Itoa(i), "b", strconv.Itoa(i%10), "c", strconv.Itoa(i%100), "d", strconv.Itoa(i/2), "e", strconv.Itoa(i/4))) + h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(i))) } } diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 58f3473dae..5ed41f7698 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -345,14 +345,13 @@ func (p *MemPostings) Add(id storage.SeriesRef, lset labels.Labels) { p.mtx.Unlock() } -func appendWithExponentialGrowth[T any](a []T, v T) (_ []T, copied bool) { +func appendWithExponentialGrowth[T any](a []T, v T) []T { if cap(a) < len(a)+1 { newList := make([]T, len(a), len(a)*2+1) copy(newList, a) a = newList - copied = true } - return append(a, v), copied + return append(a, v) } func (p *MemPostings) addFor(id storage.SeriesRef, l labels.Label) { @@ -361,26 +360,16 @@ func (p *MemPostings) addFor(id storage.SeriesRef, l labels.Label) { nm = map[string][]storage.SeriesRef{} p.m[l.Name] = nm } - list, copied := appendWithExponentialGrowth(nm[l.Value], id) + list := appendWithExponentialGrowth(nm[l.Value], id) nm[l.Value] = list - // Return if it shouldn't be ordered, if it only has one element or if it's already ordered. - // The invariant is that the first n-1 items in the list are already sorted. - if !p.ordered || len(list) == 1 || list[len(list)-1] >= list[len(list)-2] { + if !p.ordered { return } - - if !copied { - // We have appended to the existing slice, - // and readers may already have a copy of this postings slice, - // so we need to copy it before sorting. - old := list - list = make([]storage.SeriesRef, len(old), cap(old)) - copy(list, old) - nm[l.Value] = list - } - - // Repair order violations. + // There is no guarantee that no higher ID was inserted before as they may + // be generated independently before adding them to postings. + // We repair order violations on insert. The invariant is that the first n-1 + // items in the list are already sorted. for i := len(list) - 1; i >= 1; i-- { if list[i] >= list[i-1] { break diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 7d0b717bf0..96c9ed124b 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -1475,58 +1475,3 @@ func TestMemPostings_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) { require.Error(t, p.Err()) require.Equal(t, failAfter+1, ctx.Count()) // Plus one for the Err() call that puts the error in the result. } - -func TestMemPostings_Unordered_Add_Get(t *testing.T) { - mp := NewMemPostings() - for ref := storage.SeriesRef(1); ref < 8; ref += 2 { - // First, add next series. - next := ref + 1 - mp.Add(next, labels.FromStrings(labels.MetricName, "test", "series", strconv.Itoa(int(next)))) - nextPostings := mp.Get(labels.MetricName, "test") - - // Now add current ref. - mp.Add(ref, labels.FromStrings(labels.MetricName, "test", "series", strconv.Itoa(int(ref)))) - - // Next postings should still reference the next series. - nextExpanded, err := ExpandPostings(nextPostings) - require.NoError(t, err) - require.Len(t, nextExpanded, int(ref)) - require.Equal(t, next, nextExpanded[len(nextExpanded)-1]) - } -} - -func TestMemPostings_Concurrent_Add_Get(t *testing.T) { - refs := make(chan storage.SeriesRef) - wg := sync.WaitGroup{} - wg.Add(1) - t.Cleanup(wg.Wait) - t.Cleanup(func() { close(refs) }) - - mp := NewMemPostings() - go func() { - defer wg.Done() - for ref := range refs { - mp.Add(ref, labels.FromStrings(labels.MetricName, "test", "series", strconv.Itoa(int(ref)))) - p := mp.Get(labels.MetricName, "test") - - _, err := ExpandPostings(p) - if err != nil { - t.Errorf("unexpected error: %s", err) - } - } - }() - - for ref := storage.SeriesRef(1); ref < 8; ref += 2 { - // Add next ref in another goroutine so they would race. - refs <- ref + 1 - // Add current ref here - mp.Add(ref, labels.FromStrings(labels.MetricName, "test", "series", strconv.Itoa(int(ref)))) - - // We don't read the value of the postings here, - // this is tested in TestMemPostings_Unordered_Add_Get where it's easier to achieve the determinism. - // This test just checks that there's no data race. - p := mp.Get(labels.MetricName, "test") - _, err := ExpandPostings(p) - require.NoError(t, err) - } -} From 7c4f8778815f56f7beb28fa2a15be0c33171c860 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Sun, 3 Nov 2024 13:56:54 +0100 Subject: [PATCH 0862/1397] otlptranslator: Harmonize non-UTF8 sanitization w/ naming rules. (#15314) * otlptranslator: Harmonize non-UTF8 sanitization w/ naming rules. Harmonize non-UTF8 sanitization w/ Prometheus naming rules. --------- Signed-off-by: Arve Knudsen --- CHANGELOG.md | 1 + .../prometheus/normalize_label.go | 14 +--- .../prometheus/normalize_name.go | 83 +++++++++++-------- .../prometheus/normalize_name_test.go | 17 ++-- .../prometheusremotewrite/helper_test.go | 3 +- 5 files changed, 63 insertions(+), 55 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index de97354f27..cdfed5ba52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ * [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694 * [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941 * [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941 +* [BUGFIX] OTLP receiver: Preserve colons when generating metric names in suffix adding mode (this mode is always enabled, unless one uses Prometheus as a library). #15251 ## 3.0.0-beta.1 / 2024-10-09 diff --git a/storage/remote/otlptranslator/prometheus/normalize_label.go b/storage/remote/otlptranslator/prometheus/normalize_label.go index c22c761320..d5de2c7651 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label.go @@ -19,6 +19,8 @@ package prometheus import ( "strings" "unicode" + + "github.com/prometheus/prometheus/util/strutil" ) // Normalizes the specified label to follow Prometheus label names standard. @@ -26,7 +28,6 @@ import ( // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels. // // Labels that start with non-letter rune will be prefixed with "key_". -// // An exception is made for double-underscores which are allowed. func NormalizeLabel(label string) string { // Trivial case @@ -34,8 +35,7 @@ func NormalizeLabel(label string) string { return label } - // Replace all non-alphanumeric runes with underscores - label = strings.Map(sanitizeRune, label) + label = strutil.SanitizeLabelName(label) // If label starts with a number, prepend with "key_" if unicode.IsDigit(rune(label[0])) { @@ -46,11 +46,3 @@ func NormalizeLabel(label string) string { return label } - -// Return '_' for anything non-alphanumeric. -func sanitizeRune(r rune) rune { - if unicode.IsLower(r) || unicode.IsUpper(r) || unicode.IsDigit(r) { - return r - } - return '_' -} diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index 94fb4465fe..0119b64dff 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -17,9 +17,12 @@ package prometheus import ( + "regexp" + "slices" "strings" "unicode" + "github.com/prometheus/prometheus/util/strutil" "go.opentelemetry.io/collector/pdata/pmetric" ) @@ -84,24 +87,27 @@ var perUnitMap = map[string]string{ // // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels, // https://prometheus.io/docs/practices/naming/#metric-and-label-naming -// and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.33.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. +// and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { - var metricName string - // Full normalization following standard Prometheus naming conventions if addMetricSuffixes { return normalizeName(metric, namespace) } - // Simple case (no full normalization, no units, etc.), we simply trim out forbidden chars - metricName = RemovePromForbiddenRunes(metric.Name()) + // Regexp for metric name characters that should be replaced with _. + invalidMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:_]`) + + // Simple case (no full normalization, no units, etc.). + metricName := strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool { + return invalidMetricCharRE.MatchString(string(r)) + }), "_") // Namespace? if namespace != "" { return namespace + "_" + metricName } - // Metric name starts with a digit? Prefix it with an underscore + // Metric name starts with a digit? Prefix it with an underscore. if metricName != "" && unicode.IsDigit(rune(metricName[0])) { metricName = "_" + metricName } @@ -109,12 +115,17 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix return metricName } -// Build a normalized name for the specified metric +// Build a normalized name for the specified metric. func normalizeName(metric pmetric.Metric, namespace string) string { - // Split metric name into "tokens" (remove all non-alphanumerics) + // Regexp for characters that can't be in a metric name token. + nonTokenMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:]`) + + // Split metric name into "tokens" (of supported metric name runes). + // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. + // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. nameTokens := strings.FieldsFunc( metric.Name(), - func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != ':' }, + func(r rune) bool { return nonTokenMetricCharRE.MatchString(string(r)) }, ) // Split unit at the '/' if any @@ -123,11 +134,12 @@ func normalizeName(metric pmetric.Metric, namespace string) string { // Main unit // Append if not blank, doesn't contain '{}', and is not present in metric name already if len(unitTokens) > 0 { + var mainUnitProm, perUnitProm string mainUnitOTel := strings.TrimSpace(unitTokens[0]) if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") { - mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOTel)) - if mainUnitProm != "" && !contains(nameTokens, mainUnitProm) { - nameTokens = append(nameTokens, mainUnitProm) + mainUnitProm = cleanUpUnit(unitMapGetOrDefault(mainUnitOTel)) + if slices.Contains(nameTokens, mainUnitProm) { + mainUnitProm = "" } } @@ -136,13 +148,26 @@ func normalizeName(metric pmetric.Metric, namespace string) string { if len(unitTokens) > 1 && unitTokens[1] != "" { perUnitOTel := strings.TrimSpace(unitTokens[1]) if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") { - perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOTel)) - if perUnitProm != "" && !contains(nameTokens, perUnitProm) { - nameTokens = append(nameTokens, "per", perUnitProm) + perUnitProm = cleanUpUnit(perUnitMapGetOrDefault(perUnitOTel)) + } + if perUnitProm != "" { + perUnitProm = "per_" + perUnitProm + if slices.Contains(nameTokens, perUnitProm) { + perUnitProm = "" } } } + if perUnitProm != "" { + mainUnitProm = strings.TrimSuffix(mainUnitProm, "_") + } + + if mainUnitProm != "" { + nameTokens = append(nameTokens, mainUnitProm) + } + if perUnitProm != "" { + nameTokens = append(nameTokens, perUnitProm) + } } // Append _total for Counters @@ -235,15 +260,15 @@ func removeSuffix(tokens []string, suffix string) []string { return tokens } -// Clean up specified string so it's Prometheus compliant -func CleanUpString(s string) string { - return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsUpper(r) && !unicode.IsLower(r) && !unicode.IsDigit(r) }), "_") -} - -func RemovePromForbiddenRunes(s string) string { - return strings.Join(strings.FieldsFunc(s, func(r rune) bool { - return !unicode.IsUpper(r) && !unicode.IsLower(r) && !unicode.IsDigit(r) && r != '_' && r != ':' - }), "_") +// cleanUpUnit cleans up unit so it matches model.LabelNameRE. +func cleanUpUnit(unit string) string { + // Multiple consecutive underscores are replaced with a single underscore. + // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. + multipleUnderscoresRE := regexp.MustCompile(`__+`) + return strings.TrimPrefix(multipleUnderscoresRE.ReplaceAllString( + strutil.SanitizeLabelName(unit), + "_", + ), "_") } // Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit @@ -264,16 +289,6 @@ func perUnitMapGetOrDefault(perUnit string) string { return perUnit } -// Returns whether the slice contains the specified value -func contains(slice []string, value string) bool { - for _, sliceEntry := range slice { - if sliceEntry == value { - return true - } - } - return false -} - // Remove the specified value from the slice func removeItem(slice []string, value string) []string { newSlice := make([]string, 0, len(slice)) diff --git a/storage/remote/otlptranslator/prometheus/normalize_name_test.go b/storage/remote/otlptranslator/prometheus/normalize_name_test.go index 3ea4b70a53..2d5648e84c 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name_test.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name_test.go @@ -148,13 +148,13 @@ func TestNamespace(t *testing.T) { require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space")) } -func TestCleanUpString(t *testing.T) { - require.Equal(t, "", CleanUpString("")) - require.Equal(t, "a_b", CleanUpString("a b")) - require.Equal(t, "hello_world", CleanUpString("hello, world!")) - require.Equal(t, "hello_you_2", CleanUpString("hello you 2")) - require.Equal(t, "1000", CleanUpString("$1000")) - require.Equal(t, "", CleanUpString("*+$^=)")) +func TestCleanUpUnit(t *testing.T) { + require.Equal(t, "", cleanUpUnit("")) + require.Equal(t, "a_b", cleanUpUnit("a b")) + require.Equal(t, "hello_world", cleanUpUnit("hello, world")) + require.Equal(t, "hello_you_2", cleanUpUnit("hello you 2")) + require.Equal(t, "1000", cleanUpUnit("$1000")) + require.Equal(t, "", cleanUpUnit("*+$^=)")) } func TestUnitMapGetOrDefault(t *testing.T) { @@ -179,7 +179,7 @@ func TestRemoveItem(t *testing.T) { require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a")) } -func TestBuildCompliantNameWithNormalize(t *testing.T) { +func TestBuildCompliantNameWithSuffixes(t *testing.T) { require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", true)) require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true)) require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true)) @@ -190,6 +190,7 @@ func TestBuildCompliantNameWithNormalize(t *testing.T) { require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true)) // Slashes in units are converted. require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantName(createCounter("system.io", "foo/bar"), "", true)) + require.Equal(t, "metric_with_foreign_characters_total", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", true)) } func TestBuildCompliantNameWithoutSuffixes(t *testing.T) { diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go index 9a994c5a42..b22282097d 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go @@ -48,7 +48,6 @@ func TestCreateAttributes(t *testing.T) { resource.Attributes().PutStr(k, v) } attrs := pcommon.NewMap() - attrs.PutStr("__name__", "test_metric") attrs.PutStr("metric-attr", "metric value") testCases := []struct { @@ -162,7 +161,7 @@ func TestCreateAttributes(t *testing.T) { settings := Settings{ PromoteResourceAttributes: tc.promoteResourceAttributes, } - lbls := createAttributes(resource, attrs, settings, nil, false) + lbls := createAttributes(resource, attrs, settings, nil, false, model.MetricNameLabel, "test_metric") assert.ElementsMatch(t, lbls, tc.expectedLabels) }) From 4b56af7eb824d8e7dc13994b662b5c5b39928629 Mon Sep 17 00:00:00 2001 From: Alban Hurtaud Date: Mon, 4 Nov 2024 08:26:26 +0100 Subject: [PATCH 0863/1397] Add hidden flag for the delayed compaction random time window (#14919) * Add hidden flag for the delayed compaction random time window Signed-off-by: Alban HURTAUD * Update cmd/prometheus/main.go Co-authored-by: Ayoub Mrini Signed-off-by: Alban Hurtaud * Update cmd/prometheus/main.go Co-authored-by: Ayoub Mrini Signed-off-by: Alban Hurtaud * Update tsdb/db.go Co-authored-by: Ayoub Mrini Signed-off-by: Alban Hurtaud * Fix flag name according to review - add test for delay Signed-off-by: Alban HURTAUD * Fix afer main rebase Signed-off-by: Alban HURTAUD * Implement review comments Signed-off-by: Alban HURTAUD * Update generatedelaytest to try with limit values Signed-off-by: Alban HURTAUD --------- Signed-off-by: Alban HURTAUD Signed-off-by: Alban Hurtaud Co-authored-by: Ayoub Mrini --- cmd/prometheus/main.go | 11 +++++++++++ tsdb/db.go | 9 +++++++-- tsdb/db_test.go | 45 +++++++++++++++++++++++++++++++----------- 3 files changed, 51 insertions(+), 14 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 045389770e..8fb6d4d38e 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -433,6 +433,9 @@ func main() { serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk."). Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk) + serverOnlyFlag(a, "storage.tsdb.delayed-compaction.max-percent", "Sets the upper limit for the random compaction delay, specified as a percentage of the head chunk range. 100 means the compaction can be delayed by up to the entire head chunk range. Only effective when the delayed-compaction feature flag is enabled."). + Default("10").Hidden().IntVar(&cfg.tsdb.CompactionDelayMaxPercent) + agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage."). Default("data-agent/").StringVar(&cfg.agentStoragePath) @@ -663,6 +666,12 @@ func main() { cfg.tsdb.MaxBlockDuration = maxBlockDuration } + + // Delayed compaction checks + if cfg.tsdb.EnableDelayedCompaction && (cfg.tsdb.CompactionDelayMaxPercent > 100 || cfg.tsdb.CompactionDelayMaxPercent <= 0) { + logger.Warn("The --storage.tsdb.delayed-compaction.max-percent should have a value between 1 and 100. Using default", "default", tsdb.DefaultCompactionDelayMaxPercent) + cfg.tsdb.CompactionDelayMaxPercent = tsdb.DefaultCompactionDelayMaxPercent + } } noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{} @@ -1796,6 +1805,7 @@ type tsdbOptions struct { EnableMemorySnapshotOnShutdown bool EnableNativeHistograms bool EnableDelayedCompaction bool + CompactionDelayMaxPercent int EnableOverlappingCompaction bool EnableOOONativeHistograms bool } @@ -1820,6 +1830,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { EnableOOONativeHistograms: opts.EnableOOONativeHistograms, OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow, EnableDelayedCompaction: opts.EnableDelayedCompaction, + CompactionDelayMaxPercent: opts.CompactionDelayMaxPercent, EnableOverlappingCompaction: opts.EnableOverlappingCompaction, } } diff --git a/tsdb/db.go b/tsdb/db.go index 997bad36cb..bb9fe6ad7e 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -52,6 +52,9 @@ const ( // DefaultBlockDuration in milliseconds. DefaultBlockDuration = int64(2 * time.Hour / time.Millisecond) + // DefaultCompactionDelayMaxPercent in percentage. + DefaultCompactionDelayMaxPercent = 10 + // Block dir suffixes to make deletion and creation operations atomic. // We decided to do suffixes instead of creating meta.json as last (or delete as first) one, // because in error case you still can recover meta.json from the block content within local TSDB dir. @@ -86,6 +89,7 @@ func DefaultOptions() *Options { EnableOverlappingCompaction: true, EnableSharding: false, EnableDelayedCompaction: false, + CompactionDelayMaxPercent: DefaultCompactionDelayMaxPercent, CompactionDelay: time.Duration(0), } } @@ -204,6 +208,8 @@ type Options struct { // CompactionDelay delays the start time of auto compactions. // It can be increased by up to one minute if the DB does not commit too often. CompactionDelay time.Duration + // CompactionDelayMaxPercent is the upper limit for CompactionDelay, specified as a percentage of the head chunk range. + CompactionDelayMaxPercent int // NewCompactorFunc is a function that returns a TSDB compactor. NewCompactorFunc NewCompactorFunc @@ -1986,8 +1992,7 @@ func (db *DB) EnableCompactions() { } func (db *DB) generateCompactionDelay() time.Duration { - // Up to 10% of the head's chunkRange. - return time.Duration(rand.Int63n(db.head.chunkRange.Load()/10)) * time.Millisecond + return time.Duration(rand.Int63n(db.head.chunkRange.Load()*int64(db.opts.CompactionDelayMaxPercent)/100)) * time.Millisecond } // ForceHeadMMap is intended for use only in tests and benchmarks. diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 3f0fc0c841..50f50a3a25 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -8896,24 +8896,45 @@ func TestBlockQuerierAndBlockChunkQuerier(t *testing.T) { } func TestGenerateCompactionDelay(t *testing.T) { - assertDelay := func(delay time.Duration) { + assertDelay := func(delay time.Duration, expectedMaxPercentDelay int) { t.Helper() require.GreaterOrEqual(t, delay, time.Duration(0)) - // Less than 10% of the chunkRange. - require.LessOrEqual(t, delay, 6000*time.Millisecond) + // Expect to generate a delay up to MaxPercentDelay of the head chunk range + require.LessOrEqual(t, delay, (time.Duration(60000*expectedMaxPercentDelay/100) * time.Millisecond)) } opts := DefaultOptions() - opts.EnableDelayedCompaction = true - db := openTestDB(t, opts, []int64{60000}) - defer func() { - require.NoError(t, db.Close()) - }() - // The offset is generated and changed while opening. - assertDelay(db.opts.CompactionDelay) + cases := []struct { + compactionDelayPercent int + }{ + { + compactionDelayPercent: 1, + }, + { + compactionDelayPercent: 10, + }, + { + compactionDelayPercent: 60, + }, + { + compactionDelayPercent: 100, + }, + } - for i := 0; i < 1000; i++ { - assertDelay(db.generateCompactionDelay()) + opts.EnableDelayedCompaction = true + + for _, c := range cases { + opts.CompactionDelayMaxPercent = c.compactionDelayPercent + db := openTestDB(t, opts, []int64{60000}) + defer func() { + require.NoError(t, db.Close()) + }() + // The offset is generated and changed while opening. + assertDelay(db.opts.CompactionDelay, c.compactionDelayPercent) + + for i := 0; i < 1000; i++ { + assertDelay(db.generateCompactionDelay(), c.compactionDelayPercent) + } } } From 01a029c24c743c88855384ba10d8b78e40b1da02 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 4 Nov 2024 10:10:55 +0000 Subject: [PATCH 0864/1397] CI Build: re-enable race detector Signed-off-by: Bryan Boreham --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2714211dd7..d569af0f35 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,7 +18,7 @@ jobs: - uses: ./.github/promci/actions/setup_environment with: enable_npm: true - - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 test-flags="" + - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 - run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false - run: make -C documentation/examples/remote_storage - run: make -C documentation/examples From cfeaa2c1f15987019152a68599558d29a12eb34b Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 4 Nov 2024 12:47:16 +0000 Subject: [PATCH 0865/1397] Create release 2.55.1 With one bugfix. Signed-off-by: Bryan Boreham --- CHANGELOG.md | 4 ++++ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/package.json | 2 +- web/ui/react-app/package.json | 4 ++-- 7 files changed, 18 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dd5d4bd211..19af3f460f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ ## unreleased +## 2.55.1 / 2024-01-04 + +* [BUGFIX] `round()` function did not remove `__name__` label. #15250 + ## 2.55.0 / 2024-10-22 * [FEATURE] PromQL: Add experimental `info` function. #14495 diff --git a/VERSION b/VERSION index c2576f1624..0a756ea0a7 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.55.0 +2.55.1 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 888a4c5e5c..9abd965bf4 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.55.0", + "version": "0.55.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.55.0", + "@prometheus-io/lezer-promql": "0.55.1", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index b234426dd6..4fc27cef8c 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.55.0", + "version": "0.55.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index c62896bc3f..4dc9b97180 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.55.0", + "version": "0.55.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.55.0", + "version": "0.55.1", "workspaces": [ "react-app", "module/*" @@ -30,10 +30,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.55.0", + "version": "0.55.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.55.0", + "@prometheus-io/lezer-promql": "0.55.1", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -69,7 +69,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.55.0", + "version": "0.55.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.1", @@ -19352,7 +19352,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.55.0", + "version": "0.55.1", "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", @@ -19370,7 +19370,7 @@ "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.55.0", + "@prometheus-io/codemirror-promql": "0.55.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", diff --git a/web/ui/package.json b/web/ui/package.json index 135b793ba2..35f6503b46 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -28,5 +28,5 @@ "ts-jest": "^29.2.2", "typescript": "^4.9.5" }, - "version": "0.55.0" + "version": "0.55.1" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 79cb86b31f..3528e8f259 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.55.0", + "version": "0.55.1", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.17.0", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.55.0", + "@prometheus-io/codemirror-promql": "0.55.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", From 1ac3ca952858f329b280a0bf549ec6bc24726ee0 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Thu, 31 Oct 2024 20:53:34 +0100 Subject: [PATCH 0866/1397] Release 3.0.0-rc.0 Signed-off-by: Jan Fajerski --- CHANGELOG.md | 2 ++ VERSION | 2 +- web/ui/mantine-ui/package.json | 4 ++-- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/package.json | 2 +- web/ui/react-app/package-lock.json | 4 ++-- web/ui/react-app/package.json | 2 +- 9 files changed, 19 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cdfed5ba52..88d24adeab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## unreleased +## 3.0.0-rc.0 / 2024-10-31 + * [CHANGE] Scraping: Remove implicit fallback to the Prometheus text format in case of invalid/missing Content-Type and fail the scrape instead. Add ability to specify a `fallback_scrape_protocol` in the scrape config. #15136 * [CHANGE] Remote-write: default enable_http2 to false. #15219 * [CHANGE] Scraping: normalize "le" and "quantile" label values upon ingestion. #15164 diff --git a/VERSION b/VERSION index 1941d52827..3c6eac305d 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.0.0-beta.1 +3.0.0-rc.0 diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 4e886e482f..679685a50f 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,7 +1,7 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "0.300.0-beta.1", + "version": "0.300.0-rc.0", "type": "module", "scripts": { "start": "vite", @@ -28,7 +28,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.300.0-beta.1", + "@prometheus-io/codemirror-promql": "0.300.0-rc.0", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.59.0", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index bcc5464795..feb75d73fe 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.300.0-beta.1", + "version": "0.300.0-rc.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.300.0-beta.1", + "@prometheus-io/lezer-promql": "0.300.0-rc.0", "lru-cache": "^11.0.1" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 0883552c84..49e54c40af 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.300.0-beta.1", + "version": "0.300.0-rc.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 10b14a8bc7..242179a083 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.300.0-beta.1", + "version": "0.300.0-rc.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.300.0-beta.1", + "version": "0.300.0-rc.0", "workspaces": [ "mantine-ui", "module/*" @@ -24,7 +24,7 @@ }, "mantine-ui": { "name": "@prometheus-io/mantine-ui", - "version": "0.300.0-beta.1", + "version": "0.300.0-rc.0", "dependencies": { "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.2", @@ -42,7 +42,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.300.0-beta.1", + "@prometheus-io/codemirror-promql": "0.300.0-rc.0", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.59.0", @@ -155,10 +155,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.300.0-beta.1", + "version": "0.300.0-rc.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.300.0-beta.1", + "@prometheus-io/lezer-promql": "0.300.0-rc.0", "lru-cache": "^11.0.1" }, "devDependencies": { @@ -188,7 +188,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.300.0-beta.1", + "version": "0.300.0-rc.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.1", diff --git a/web/ui/package.json b/web/ui/package.json index 6d6e280108..ab23c6363b 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,7 +1,7 @@ { "name": "prometheus-io", "description": "Monorepo for the Prometheus UI", - "version": "0.300.0-beta.1", + "version": "0.300.0-rc.0", "private": true, "scripts": { "build": "bash build_ui.sh --all", diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index d3de03e5e0..a4c5af6ce9 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -1,12 +1,12 @@ { "name": "@prometheus-io/app", - "version": "0.55.0", + "version": "0.300.0-rc.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@prometheus-io/app", - "version": "0.55.0", + "version": "0.300.0-rc.0", "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index d75aba5c4a..9711a09e83 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.55.0", + "version": "0.300.0-rc.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.17.0", From 88818c9cb313a669ba3202fe42c2f030c4f2e52f Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Mon, 4 Nov 2024 12:05:29 -0300 Subject: [PATCH 0867/1397] chore: Remove dead code Signed-off-by: Arthur Silva Sens --- cmd/prometheus/main.go | 10 ---------- scrape/manager.go | 2 -- 2 files changed, 12 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 8fb6d4d38e..ecf179ce59 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -190,7 +190,6 @@ type flagConfig struct { queryConcurrency int queryMaxSamples int RemoteFlushDeadline model.Duration - nameEscapingScheme string maxNotificationsSubscribers int enableAutoReload bool @@ -551,15 +550,6 @@ func main() { os.Exit(1) } - if cfg.nameEscapingScheme != "" { - scheme, err := model.ToEscapingScheme(cfg.nameEscapingScheme) - if err != nil { - fmt.Fprintf(os.Stderr, `Invalid name escaping scheme: %q; Needs to be one of "values", "underscores", or "dots"`, cfg.nameEscapingScheme) - os.Exit(1) - } - model.NameEscapingScheme = scheme - } - if agentMode && len(serverOnlyFlags) > 0 { fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags) os.Exit(3) diff --git a/scrape/manager.go b/scrape/manager.go index f3dad2a048..04da3162e6 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -94,8 +94,6 @@ type Options struct { skipOffsetting bool } -const DefaultNameEscapingScheme = model.ValueEncodingEscaping - // Manager maintains a set of scrape pools and manages start/stop cycles // when receiving new target groups from the discovery manager. type Manager struct { From 16217a6fcce6e495f37d4d9ef920fe8cc36ea5d0 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 4 Nov 2024 20:01:03 +0000 Subject: [PATCH 0868/1397] Make release 2.53.3 LTS Signed-off-by: Bryan Boreham --- CHANGELOG.md | 4 ++++ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/package.json | 2 +- web/ui/react-app/package.json | 4 ++-- 7 files changed, 18 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e68bda9f29..d6c951696d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 2.53.3 / 2024-11-04 + +* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps. #14685, #14740 + ## 2.53.2 / 2024-08-09 Fix a bug where Prometheus would crash with a segmentation fault if a remote-read diff --git a/VERSION b/VERSION index f3a3607f9e..27b2061b61 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.53.2 +2.53.3 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index b77bf91e37..6b2d271102 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.53.2", + "version": "0.53.3", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.53.2", + "@prometheus-io/lezer-promql": "0.53.3", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 14721e1b51..bc39014a1c 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.53.2", + "version": "0.53.3", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index c90e742342..85c30fc563 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.53.2", + "version": "0.53.3", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.53.2", + "version": "0.53.3", "workspaces": [ "react-app", "module/*" @@ -30,10 +30,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.53.2", + "version": "0.53.3", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.53.2", + "@prometheus-io/lezer-promql": "0.53.3", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -69,7 +69,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.53.2", + "version": "0.53.3", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.5.1", @@ -19233,7 +19233,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.53.2", + "version": "0.53.3", "dependencies": { "@codemirror/autocomplete": "^6.11.1", "@codemirror/commands": "^6.3.2", @@ -19251,7 +19251,7 @@ "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.53.2", + "@prometheus-io/codemirror-promql": "0.53.3", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.2", diff --git a/web/ui/package.json b/web/ui/package.json index 7253c77864..5c1eabb11f 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -28,5 +28,5 @@ "ts-jest": "^29.1.1", "typescript": "^4.9.5" }, - "version": "0.53.2" + "version": "0.53.3" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 9af87c9b8c..41382f3fe8 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.53.2", + "version": "0.53.3", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.11.1", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.53.2", + "@prometheus-io/codemirror-promql": "0.53.3", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.2", From 241062f68e076726bb61f2aef47a1c85c7230555 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 09:59:54 +0000 Subject: [PATCH 0869/1397] chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azcore Bumps [github.com/Azure/azure-sdk-for-go/sdk/azcore](https://github.com/Azure/azure-sdk-for-go) from 1.14.0 to 1.16.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.14.0...sdk/azcore/v1.16.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azcore dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index edb9c276e1..f2677a0f65 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.0 toolchain go1.23.0 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 diff --git a/go.sum b/go.sum index 7a57b5c854..dad73bc364 100644 --- a/go.sum +++ b/go.sum @@ -36,8 +36,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= From c30cd80a670d41e2b618ab768212cbeef4c0e3dc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 10:03:09 +0000 Subject: [PATCH 0870/1397] chore(deps): bump github.com/prometheus/common Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.60.0 to 0.60.1. - [Release notes](https://github.com/prometheus/common/releases) - [Changelog](https://github.com/prometheus/common/blob/main/RELEASE.md) - [Commits](https://github.com/prometheus/common/compare/v0.60.0...v0.60.1) --- updated-dependencies: - dependency-name: github.com/prometheus/common dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 2 +- documentation/examples/remote_storage/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index c740be399f..df13c47329 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -8,7 +8,7 @@ require ( github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.6 github.com/prometheus/client_golang v1.20.5 - github.com/prometheus/common v0.60.0 + github.com/prometheus/common v0.60.1 github.com/prometheus/prometheus v0.53.1 github.com/stretchr/testify v1.9.0 ) diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 301761b61c..ec1f4dcd66 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -264,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= -github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= From d14713e9526ca152812cc8d66a2734c7482c4716 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 10:21:20 +0000 Subject: [PATCH 0871/1397] chore(deps): bump github.com/klauspost/compress from 1.17.10 to 1.17.11 Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.17.10 to 1.17.11. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.17.10...v1.17.11) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2b6af4ccc7..f7ccc4cbdf 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/hetznercloud/hcloud-go/v2 v2.15.0 github.com/ionos-cloud/sdk-go/v6 v6.2.1 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.10 + github.com/klauspost/compress v1.17.11 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/linode/linodego v1.42.0 github.com/miekg/dns v1.1.62 diff --git a/go.sum b/go.sum index 2cd697bfe3..4469b04fdd 100644 --- a/go.sum +++ b/go.sum @@ -391,8 +391,8 @@ github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKu github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= -github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= From e84bfe94ca3746f9d4d75fd2e4b4c3b0e72a0ddd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 10:21:33 +0000 Subject: [PATCH 0872/1397] chore(deps): bump github.com/hashicorp/consul/api from 1.29.4 to 1.30.0 Bumps [github.com/hashicorp/consul/api](https://github.com/hashicorp/consul) from 1.29.4 to 1.30.0. - [Release notes](https://github.com/hashicorp/consul/releases) - [Changelog](https://github.com/hashicorp/consul/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/consul/compare/api/v1.29.4...api/v1.30.0) --- updated-dependencies: - dependency-name: github.com/hashicorp/consul/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 2b6af4ccc7..a8b252e287 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/gophercloud/gophercloud v1.14.1 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.29.4 + github.com/hashicorp/consul/api v1.30.0 github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 github.com/hetznercloud/hcloud-go/v2 v2.15.0 github.com/ionos-cloud/sdk-go/v6 v6.2.1 diff --git a/go.sum b/go.sum index 2cd697bfe3..540b96e4f7 100644 --- a/go.sum +++ b/go.sum @@ -306,10 +306,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= -github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE= -github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= -github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= -github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= +github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ= +github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= From 2c49f1bb6f9056431e352a25e04980beea20b1e0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 10:33:54 +0000 Subject: [PATCH 0873/1397] chore(deps): bump github.com/digitalocean/godo from 1.126.0 to 1.128.0 Bumps [github.com/digitalocean/godo](https://github.com/digitalocean/godo) from 1.126.0 to 1.128.0. - [Release notes](https://github.com/digitalocean/godo/releases) - [Changelog](https://github.com/digitalocean/godo/blob/main/CHANGELOG.md) - [Commits](https://github.com/digitalocean/godo/compare/v1.126.0...v1.128.0) --- updated-dependencies: - dependency-name: github.com/digitalocean/godo dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3cfa2c1dfc..bf2cf29b5e 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.126.0 + github.com/digitalocean/godo v1.128.0 github.com/docker/docker v27.3.1+incompatible github.com/edsrzf/mmap-go v1.2.0 github.com/envoyproxy/go-control-plane v0.13.0 diff --git a/go.sum b/go.sum index 58e3df221c..08c6f6ce85 100644 --- a/go.sum +++ b/go.sum @@ -127,8 +127,8 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/digitalocean/godo v1.126.0 h1:+Znh7VMQj/E8ArbjWnc7OKGjWfzC+I8OCSRp7r1MdD8= -github.com/digitalocean/godo v1.126.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.128.0 h1:cGn/ibMSRZ9+8etbzMv2MnnCEPTTGlEnx3HHTPwdk1U= +github.com/digitalocean/godo v1.128.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= From 4f97b0937e81931f287676c3d17fbf0f1f4b3a8b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 10:36:50 +0000 Subject: [PATCH 0874/1397] chore(deps): bump actions/setup-node from 4.0.4 to 4.1.0 Bumps [actions/setup-node](https://github.com/actions/setup-node) from 4.0.4 to 4.1.0. - [Release notes](https://github.com/actions/setup-node/releases) - [Commits](https://github.com/actions/setup-node/compare/0a44ba7841725637a19e28fa30b79a866c81b0a6...39370e3970a6d050c480ffad4ff0ed4d3fdee5af) --- updated-dependencies: - dependency-name: actions/setup-node dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fcdff2cb5d..426f6ae231 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -243,7 +243,7 @@ jobs: uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - name: Install nodejs - uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4 + uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0 with: node-version-file: "web/ui/.nvmrc" registry-url: "https://registry.npmjs.org" From be41d3efeb3284449b0345fc825e18a43e83f803 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 10:45:13 +0000 Subject: [PATCH 0875/1397] chore(deps): bump github.com/influxdata/influxdb Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.6 to 1.11.7. - [Release notes](https://github.com/influxdata/influxdb/releases) - [Commits](https://github.com/influxdata/influxdb/compare/v1.11.6...v1.11.7) --- updated-dependencies: - dependency-name: github.com/influxdata/influxdb dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 2 +- documentation/examples/remote_storage/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index df13c47329..b6a56f9ef1 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -6,7 +6,7 @@ require ( github.com/alecthomas/kingpin/v2 v2.4.0 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/influxdata/influxdb v1.11.6 + github.com/influxdata/influxdb v1.11.7 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/common v0.60.1 github.com/prometheus/prometheus v0.53.1 diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index ec1f4dcd66..7af1984c6f 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -166,8 +166,8 @@ github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MI github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/influxdata/influxdb v1.11.6 h1:zS5MRY+RQ5/XFTer5R8xQRnY17JYSbacvO6OaP164wU= -github.com/influxdata/influxdb v1.11.6/go.mod h1:F10NoQb9qa04lME3pTPWQrYt4JZ/ke1Eei+1ttgHHrg= +github.com/influxdata/influxdb v1.11.7 h1:C31A+S9YfjTCOuAv9Qs0ZdQufslOZZBtejjxiV8QNQw= +github.com/influxdata/influxdb v1.11.7/go.mod h1:zRTAuk/Ie/V1LGxJUv8jfDmfv+ypz22lxfhc1MxC3rI= github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= From 541c7fd9fee6928a7d1b0f97f0edd98726e69d51 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 5 Nov 2024 11:03:40 +0000 Subject: [PATCH 0876/1397] [COMMENT] Remove duplicate line Signed-off-by: Bryan Boreham --- tsdb/ooo_head_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/tsdb/ooo_head_test.go b/tsdb/ooo_head_test.go index b9badfea21..b1641e29ba 100644 --- a/tsdb/ooo_head_test.go +++ b/tsdb/ooo_head_test.go @@ -27,7 +27,6 @@ import ( const testMaxSize int = 32 -// Formulas chosen to make testing easy. // Formulas chosen to make testing easy. func valEven(pos int) int64 { return int64(pos*2 + 2) } // s[0]=2, s[1]=4, s[2]=6, ..., s[31]=64 - Predictable pre-existing values func valOdd(pos int) int64 { return int64(pos*2 + 1) } // s[0]=1, s[1]=3, s[2]=5, ..., s[31]=63 - New values will interject at chosen position because they sort before the pre-existing vals. From 28f22b4d80aed4e7cafcc4f42c086521479b6def Mon Sep 17 00:00:00 2001 From: Julien Date: Tue, 5 Nov 2024 11:48:03 +0100 Subject: [PATCH 0877/1397] Update prometheus/common Signed-off-by: Julien --- go.mod | 2 +- go.sum | 4 ++-- util/logging/file_test.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 3cfa2c1dfc..9d18ebfbc8 100644 --- a/go.mod +++ b/go.mod @@ -52,7 +52,7 @@ require ( github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.60.0 + github.com/prometheus/common v0.60.1 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.13.0 diff --git a/go.sum b/go.sum index 58e3df221c..48d9efa2ca 100644 --- a/go.sum +++ b/go.sum @@ -521,8 +521,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= -github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= diff --git a/util/logging/file_test.go b/util/logging/file_test.go index 8ab4754339..00752df8db 100644 --- a/util/logging/file_test.go +++ b/util/logging/file_test.go @@ -40,7 +40,7 @@ func TestJSONFileLogger_basic(t *testing.T) { _, err = f.Read(r) require.NoError(t, err) - result, err := regexp.Match(`^{"time":"[^"]+","level":"INFO","source":\{.+\},"msg":"test","hello":"world"}\n`, r) + result, err := regexp.Match(`^{"time":"[^"]+","level":"INFO","source":"file.go:\d+","msg":"test","hello":"world"}\n`, r) require.NoError(t, err) require.True(t, result, "unexpected content: %s", r) From 9d9e86b33360c57b30aec2d346ec36e45bcf4982 Mon Sep 17 00:00:00 2001 From: Julien Date: Mon, 30 Sep 2024 12:55:11 +0200 Subject: [PATCH 0878/1397] Fix auto reload when a config file with a syntax error is reverted When we had a syntax error but restored the old file, we did not re-trigger the config reload, so the config reload metric was showing that config reload was unsucessful. I made magic to handle logs in cmd/prometheus. For now it is a separate file so we can backport this easily. I will generalize the helper in another PR. Signed-off-by: Julien --- cmd/prometheus/main.go | 11 +- cmd/prometheus/reload_test.go | 229 ++++++++++++++++++++++++++++++++++ 2 files changed, 234 insertions(+), 6 deletions(-) create mode 100644 cmd/prometheus/reload_test.go diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 8fb6d4d38e..af44331b04 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -1153,9 +1153,8 @@ func main() { if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { logger.Error("Error reloading config", "err", err) } else if cfg.enableAutoReload { - if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { - checksum = currentChecksum - } else { + checksum, err = config.GenerateChecksum(cfg.configFile) + if err != nil { logger.Error("Failed to generate checksum during configuration reload", "err", err) } } @@ -1166,9 +1165,8 @@ func main() { } else { rc <- nil if cfg.enableAutoReload { - if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { - checksum = currentChecksum - } else { + checksum, err = config.GenerateChecksum(cfg.configFile) + if err != nil { logger.Error("Failed to generate checksum during configuration reload", "err", err) } } @@ -1179,6 +1177,7 @@ func main() { } currentChecksum, err := config.GenerateChecksum(cfg.configFile) if err != nil { + checksum = currentChecksum logger.Error("Failed to generate checksum during configuration reload", "err", err) } else if currentChecksum == checksum { continue diff --git a/cmd/prometheus/reload_test.go b/cmd/prometheus/reload_test.go new file mode 100644 index 0000000000..18a7ff2ad1 --- /dev/null +++ b/cmd/prometheus/reload_test.go @@ -0,0 +1,229 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/util/testutil" +) + +const configReloadMetric = "prometheus_config_last_reload_successful" + +func TestAutoReloadConfig_ValidToValid(t *testing.T) { + steps := []struct { + configText string + expectedInterval string + expectedMetric float64 + }{ + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + { + configText: ` +global: + scrape_interval: 15s +`, + expectedInterval: "15s", + expectedMetric: 1, + }, + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + } + + runTestSteps(t, steps) +} + +func TestAutoReloadConfig_ValidToInvalidToValid(t *testing.T) { + steps := []struct { + configText string + expectedInterval string + expectedMetric float64 + }{ + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + { + configText: ` +global: + scrape_interval: 15s +invalid_syntax +`, + expectedInterval: "30s", + expectedMetric: 0, + }, + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + } + + runTestSteps(t, steps) +} + +func runTestSteps(t *testing.T, steps []struct { + configText string + expectedInterval string + expectedMetric float64 +}, +) { + configDir := t.TempDir() + configFilePath := filepath.Join(configDir, "prometheus.yml") + + t.Logf("Config file path: %s", configFilePath) + + require.NoError(t, os.WriteFile(configFilePath, []byte(steps[0].configText), 0o644), "Failed to write initial config file") + + port := testutil.RandomUnprivilegedPort(t) + runPrometheusWithLogging(t, configFilePath, port) + + baseURL := "http://localhost:" + strconv.Itoa(port) + require.Eventually(t, func() bool { + resp, err := http.Get(baseURL + "/-/ready") + if err != nil { + return false + } + defer resp.Body.Close() + return resp.StatusCode == http.StatusOK + }, 5*time.Second, 100*time.Millisecond, "Prometheus didn't become ready in time") + + for i, step := range steps { + t.Logf("Step %d", i) + require.NoError(t, os.WriteFile(configFilePath, []byte(step.configText), 0o644), "Failed to write config file for step") + + require.Eventually(t, func() bool { + return verifyScrapeInterval(t, baseURL, step.expectedInterval) && + verifyConfigReloadMetric(t, baseURL, step.expectedMetric) + }, 10*time.Second, 500*time.Millisecond, "Prometheus config reload didn't happen in time") + } +} + +func verifyScrapeInterval(t *testing.T, baseURL, expectedInterval string) bool { + resp, err := http.Get(baseURL + "/api/v1/status/config") + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + config := struct { + Data struct { + YAML string `json:"yaml"` + } `json:"data"` + }{} + + require.NoError(t, json.Unmarshal(body, &config)) + return strings.Contains(config.Data.YAML, "scrape_interval: "+expectedInterval) +} + +func verifyConfigReloadMetric(t *testing.T, baseURL string, expectedValue float64) bool { + resp, err := http.Get(baseURL + "/metrics") + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + lines := string(body) + var actualValue float64 + found := false + + for _, line := range strings.Split(lines, "\n") { + if strings.HasPrefix(line, configReloadMetric) { + parts := strings.Fields(line) + if len(parts) >= 2 { + actualValue, err = strconv.ParseFloat(parts[1], 64) + require.NoError(t, err) + found = true + break + } + } + } + + return found && actualValue == expectedValue +} + +func captureLogsToTLog(t *testing.T, r io.Reader) { + scanner := bufio.NewScanner(r) + for scanner.Scan() { + t.Log(scanner.Text()) + } + if err := scanner.Err(); err != nil { + t.Logf("Error reading logs: %v", err) + } +} + +func runPrometheusWithLogging(t *testing.T, configFilePath string, port int) { + stdoutPipe, stdoutWriter := io.Pipe() + stderrPipe, stderrWriter := io.Pipe() + + var wg sync.WaitGroup + wg.Add(2) + + prom := exec.Command(promPath, "-test.main", "--enable-feature=auto-reload-config", "--config.file="+configFilePath, "--config.auto-reload-interval=1s", "--web.listen-address=0.0.0.0:"+strconv.Itoa(port)) + prom.Stdout = stdoutWriter + prom.Stderr = stderrWriter + + go func() { + defer wg.Done() + captureLogsToTLog(t, stdoutPipe) + }() + go func() { + defer wg.Done() + captureLogsToTLog(t, stderrPipe) + }() + + t.Cleanup(func() { + prom.Process.Kill() + prom.Wait() + stdoutWriter.Close() + stderrWriter.Close() + wg.Wait() + }) + + require.NoError(t, prom.Start()) +} From b1e4052682c710526681155f4067eafc86f0809d Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 5 Nov 2024 12:59:57 +0100 Subject: [PATCH 0879/1397] MemPostings.Delete(): make pauses to unlock and let the readers read (#15242) This introduces back some unlocking that was removed in #13286 but in a more balanced way, as suggested by @pracucci. For TSDBs with a lot of churn, Delete() can take a couple of seconds, and while it's holding the mutex, reads and writes are blocked waiting for that mutex, increasing the number of connections handled and memory usage. This implementation pauses every 4K labels processed (note that also compared to #13286 we're not processing all the label-values anymore, but only the affected ones, because of #14307), makes sure that it's possible to get the read lock, and waits for a few milliseconds more. Signed-off-by: Oleg Zaytsev Co-authored-by: Marco Pracucci --- tsdb/index/postings.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 5ed41f7698..d7b497e613 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -24,6 +24,7 @@ import ( "sort" "strings" "sync" + "time" "github.com/bboreham/go-loser" @@ -312,8 +313,30 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma } } + i := 0 for l := range affected { + i++ process(l) + + // From time to time we want some readers to go through and read their postings. + // It takes around 50ms to process a 1K series batch, and 120ms to process a 10K series batch (local benchmarks on an M3). + // Note that a read query will most likely want to read multiple postings lists, say 5, 10 or 20 (depending on the number of matchers) + // And that read query will most likely evaluate only one of those matchers before we unpause here, so we want to pause often. + if i%512 == 0 { + p.mtx.Unlock() + // While it's tempting to just do a `time.Sleep(time.Millisecond)` here, + // it wouldn't ensure use that readers actually were able to get the read lock, + // because if there are writes waiting on same mutex, readers won't be able to get it. + // So we just grab one RLock ourselves. + p.mtx.RLock() + // We shouldn't wait here, because we would be blocking a potential write for no reason. + // Note that if there's a writer waiting for us to unlock, no reader will be able to get the read lock. + p.mtx.RUnlock() //nolint:staticcheck // SA2001: this is an intentionally empty critical section. + // Now we can wait a little bit just to increase the chance of a reader getting the lock. + // If we were deleting 100M series here, pausing every 512 with 1ms sleeps would be an extra of 200s, which is negligible. + time.Sleep(time.Millisecond) + p.mtx.Lock() + } } process(allPostingsKey) } From 50e83e8f98c49b41ca00a13db2b63f8a17446d2a Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 5 Nov 2024 15:09:23 +0100 Subject: [PATCH 0880/1397] Export 'go_sync_mutex_wait_total_seconds_total' metric This metric can be useful to debug mutex contention. Ref: https://github.com/golang/go/issues/49881 Signed-off-by: Oleg Zaytsev --- cmd/prometheus/main.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 8fb6d4d38e..fcbc419888 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -27,6 +27,7 @@ import ( "os" "os/signal" "path/filepath" + goregexp "regexp" //nolint:depguard // The Prometheus client library requires us to pass a regexp from this package. "runtime" "runtime/debug" "strconv" @@ -301,6 +302,7 @@ func main() { collectors.WithGoCollectorRuntimeMetrics( collectors.MetricsGC, collectors.MetricsScheduler, + collectors.GoRuntimeMetricsRule{Matcher: goregexp.MustCompile(`^/sync/mutex/wait/total:seconds$`)}, ), ), ) From 07ea8d1aeba5c1644d6f0be1d847f7faf0eb8e21 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 14:59:00 +0000 Subject: [PATCH 0881/1397] chore(deps): bump github.com/envoyproxy/go-control-plane Bumps [github.com/envoyproxy/go-control-plane](https://github.com/envoyproxy/go-control-plane) from 0.13.0 to 0.13.1. - [Release notes](https://github.com/envoyproxy/go-control-plane/releases) - [Changelog](https://github.com/envoyproxy/go-control-plane/blob/main/CHANGELOG.md) - [Commits](https://github.com/envoyproxy/go-control-plane/compare/v0.13.0...v0.13.1) --- updated-dependencies: - dependency-name: github.com/envoyproxy/go-control-plane dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2dea61f088..4c66fa440a 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/digitalocean/godo v1.128.0 github.com/docker/docker v27.3.1+incompatible github.com/edsrzf/mmap-go v1.2.0 - github.com/envoyproxy/go-control-plane v0.13.0 + github.com/envoyproxy/go-control-plane v0.13.1 github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/fsnotify/fsnotify v1.7.0 diff --git a/go.sum b/go.sum index b85778516e..7e867dc9eb 100644 --- a/go.sum +++ b/go.sum @@ -146,8 +146,8 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= From b8d06c79ea038581986caae3d429e5b6a3f40a3c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 14:59:52 +0000 Subject: [PATCH 0882/1397] chore(deps): bump the go-opentelemetry-io group with 6 updates Bumps the go-opentelemetry-io group with 6 updates: | Package | From | To | | --- | --- | --- | | [go.opentelemetry.io/collector/pdata](https://github.com/open-telemetry/opentelemetry-collector) | `1.16.0` | `1.18.0` | | [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector) | `0.110.0` | `0.112.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace](https://github.com/open-telemetry/opentelemetry-go) | `1.30.0` | `1.31.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://github.com/open-telemetry/opentelemetry-go) | `1.30.0` | `1.31.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://github.com/open-telemetry/opentelemetry-go) | `1.30.0` | `1.31.0` | | [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) | `1.30.0` | `1.31.0` | Updates `go.opentelemetry.io/collector/pdata` from 1.16.0 to 1.18.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.16.0...pdata/v1.18.0) Updates `go.opentelemetry.io/collector/semconv` from 0.110.0 to 0.112.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.110.0...v0.112.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from 1.30.0 to 1.31.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.30.0...v1.31.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` from 1.30.0 to 1.31.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.30.0...v1.31.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` from 1.30.0 to 1.31.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.30.0...v1.31.0) Updates `go.opentelemetry.io/otel/sdk` from 1.30.0 to 1.31.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.30.0...v1.31.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/collector/pdata dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/semconv dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/sdk dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io ... Signed-off-by: dependabot[bot] --- go.mod | 12 ++++++------ go.sum | 28 ++++++++++++++-------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/go.mod b/go.mod index 2dea61f088..8319fe16f0 100644 --- a/go.mod +++ b/go.mod @@ -60,15 +60,15 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v1.16.0 - go.opentelemetry.io/collector/semconv v0.110.0 + go.opentelemetry.io/collector/pdata v1.18.0 + go.opentelemetry.io/collector/semconv v0.112.0 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 go.opentelemetry.io/otel v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 - go.opentelemetry.io/otel/sdk v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 + go.opentelemetry.io/otel/sdk v1.31.0 go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.6.0 diff --git a/go.sum b/go.sum index b85778516e..a0b22a14c2 100644 --- a/go.sum +++ b/go.sum @@ -538,8 +538,8 @@ github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0 github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8= @@ -595,26 +595,26 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.16.0 h1:g02K8jlRnmQ7TQDuXpdgVL6vIxIVqr5Gbb1qIR27rto= -go.opentelemetry.io/collector/pdata v1.16.0/go.mod h1:YZZJIt2ehxosYf/Y1pbvexjNWsIGNNrzzlCTO9jC1F4= -go.opentelemetry.io/collector/semconv v0.110.0 h1:KHQnOHe3gUz0zsxe8ph9kN5OTypCFD4V+06AiBTfeNk= -go.opentelemetry.io/collector/semconv v0.110.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= +go.opentelemetry.io/collector/pdata v1.18.0 h1:/yg2rO2dxqDM2p6GutsMCxXN6sKlXwyIz/ZYyUPONBg= +go.opentelemetry.io/collector/pdata v1.18.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs= +go.opentelemetry.io/collector/semconv v0.112.0 h1:JPQyvZhlNLVSuVI+FScONaiFygB7h7NTZceUEKIQUEc= +go.opentelemetry.io/collector/semconv v0.112.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= -go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= From 3294b7d58b868f57cf954626c811711fc40644e1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 15:10:10 +0000 Subject: [PATCH 0883/1397] chore(deps): bump actions/checkout from 4.2.0 to 4.2.2 in /scripts Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.0 to 4.2.2. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/d632683dd7b4114ad314bca15554477dd762a938...11bd71901bbe5b1630ceea73d27597364c9af683) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- scripts/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 7183091ac3..7af9bba77b 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: From c1f8036228a1bcd8182208c1253c20bff6461ae2 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Tue, 5 Nov 2024 16:12:05 +0100 Subject: [PATCH 0884/1397] Fix selector / series formatting for empty metric names Fixes https://github.com/prometheus/prometheus/issues/15335 Signed-off-by: Julius Volz --- .../src/promql/serializeAndFormat.test.ts | 14 ++++++++++++++ web/ui/mantine-ui/src/promql/utils.ts | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts b/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts index a2b97ec904..da4be7ced1 100644 --- a/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts +++ b/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts @@ -157,6 +157,20 @@ describe("serializeNode and formatNode", () => { }, output: "metric_name[5m] @ start() offset -10m", }, + { + node: { + type: nodeType.vectorSelector, + name: "", // Test formatting a selector with an empty metric name. + matchers: [ + { type: matchType.equal, name: "label1", value: "value1" }, + ], + offset: 0, + timestamp: null, + startOrEnd: null, + }, + output: + '{label1="value1"}', + }, // Aggregations. { diff --git a/web/ui/mantine-ui/src/promql/utils.ts b/web/ui/mantine-ui/src/promql/utils.ts index 2f1cc11d2f..2addeed8ab 100644 --- a/web/ui/mantine-ui/src/promql/utils.ts +++ b/web/ui/mantine-ui/src/promql/utils.ts @@ -271,7 +271,7 @@ const metricNameRe = /^[a-zA-Z_:][a-zA-Z0-9_:]*$/; const labelNameCharsetRe = /^[a-zA-Z_][a-zA-Z0-9_]*$/; export const metricContainsExtendedCharset = (str: string) => { - return !metricNameRe.test(str); + return str !== "" && !metricNameRe.test(str); }; export const labelNameContainsExtendedCharset = (str: string) => { From d19e749dc6a76053c7f07709221344782818ea75 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Tue, 5 Nov 2024 16:12:05 +0100 Subject: [PATCH 0885/1397] Fix selector / series formatting for empty metric names Fixes https://github.com/prometheus/prometheus/issues/15335 Signed-off-by: Julius Volz --- .../src/promql/serializeAndFormat.test.ts | 14 ++++++++++++++ web/ui/mantine-ui/src/promql/utils.ts | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts b/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts index a2b97ec904..da4be7ced1 100644 --- a/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts +++ b/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts @@ -157,6 +157,20 @@ describe("serializeNode and formatNode", () => { }, output: "metric_name[5m] @ start() offset -10m", }, + { + node: { + type: nodeType.vectorSelector, + name: "", // Test formatting a selector with an empty metric name. + matchers: [ + { type: matchType.equal, name: "label1", value: "value1" }, + ], + offset: 0, + timestamp: null, + startOrEnd: null, + }, + output: + '{label1="value1"}', + }, // Aggregations. { diff --git a/web/ui/mantine-ui/src/promql/utils.ts b/web/ui/mantine-ui/src/promql/utils.ts index 2f1cc11d2f..2addeed8ab 100644 --- a/web/ui/mantine-ui/src/promql/utils.ts +++ b/web/ui/mantine-ui/src/promql/utils.ts @@ -271,7 +271,7 @@ const metricNameRe = /^[a-zA-Z_:][a-zA-Z0-9_:]*$/; const labelNameCharsetRe = /^[a-zA-Z_][a-zA-Z0-9_]*$/; export const metricContainsExtendedCharset = (str: string) => { - return !metricNameRe.test(str); + return str !== "" && !metricNameRe.test(str); }; export const labelNameContainsExtendedCharset = (str: string) => { From d60cdcdac8b7f173d37b9e34049586e3e00f1435 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Nov 2024 10:29:01 +0000 Subject: [PATCH 0886/1397] chore(deps): bump actions/checkout from 4.2.0 to 4.2.2 Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.0 to 4.2.2. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/d632683dd7b4114ad314bca15554477dd762a938...11bd71901bbe5b1630ceea73d27597364c9af683) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- .github/workflows/ci.yml | 26 ++++++++++----------- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/container_description.yml | 4 ++-- .github/workflows/repo_sync.yml | 2 +- .github/workflows/scorecards.yml | 2 +- 7 files changed, 20 insertions(+), 20 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index bf7f681b69..463a725b7a 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -12,7 +12,7 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 669305ebd3..ce2014ecff 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest if: github.repository_owner == 'prometheus' steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 66a5f267d5..1fe67f850b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ jobs: # should also be updated. image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/setup_environment with: @@ -29,7 +29,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/setup_environment - run: go test --tags=dedupelabels ./... @@ -48,7 +48,7 @@ jobs: # The go version in this image should be N-1 wrt test_go. image: quay.io/prometheus/golang-builder:1.22-base steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - run: make build # Don't run NPM build; don't run race-detector. - run: make test GO_ONLY=1 test-flags="" @@ -62,7 +62,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/setup_environment with: @@ -79,7 +79,7 @@ jobs: name: Go tests on Windows runs-on: windows-latest steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: 1.23.x @@ -96,7 +96,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.23-base steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - run: go install ./cmd/promtool/. - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest @@ -121,7 +121,7 @@ jobs: matrix: thread: [ 0, 1, 2 ] steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/build with: @@ -146,7 +146,7 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/build with: @@ -169,7 +169,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: @@ -182,7 +182,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: @@ -208,7 +208,7 @@ jobs: needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/publish_main with: @@ -225,7 +225,7 @@ jobs: || (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/publish_release with: @@ -240,7 +240,7 @@ jobs: needs: [test_ui, codeql] steps: - name: Checkout - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - name: Install nodejs uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 77fbd4dafb..9002f4c8ef 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -24,7 +24,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Initialize CodeQL uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 diff --git a/.github/workflows/container_description.yml b/.github/workflows/container_description.yml index 144859486d..dcca16ff34 100644 --- a/.github/workflows/container_description.yml +++ b/.github/workflows/container_description.yml @@ -18,7 +18,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Set docker hub repo name run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV - name: Push README to Dockerhub @@ -40,7 +40,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Set quay.io org name run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV - name: Set quay.io repo name diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index aa306c46d0..a659d431d0 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -13,7 +13,7 @@ jobs: container: image: quay.io/prometheus/golang-builder steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - run: ./scripts/sync_repo_files.sh env: GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }} diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index c63727f7f1..440ac8a734 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -21,7 +21,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # tag=v4.2.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2 with: persist-credentials: false From 160a4801d2f36100cdeefaf5f901e48e8f944d9c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Nov 2024 10:29:01 +0000 Subject: [PATCH 0887/1397] chore(deps): bump golangci/golangci-lint-action from 6.1.0 to 6.1.1 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.1.0 to 6.1.1. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/aaa42aa0628b4ae2578232a66b541047968fac86...971e284b6050e8a5849b72094c50ab08da042db8) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 66a5f267d5..cb8fafdcf3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -191,7 +191,7 @@ jobs: run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 + uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 with: args: --verbose # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. From 37f3f3f2db95539a9c84242936018908ffb3dd50 Mon Sep 17 00:00:00 2001 From: Simon Pasquier Date: Wed, 6 Nov 2024 14:57:14 +0100 Subject: [PATCH 0888/1397] Fix scrape failure logs Before this change, logs would show like: ``` {...,"target":"http://localhost:8080/metrics","!BADKEY":"Get ..."} ``` After this change ``` {...,"msg":"Get ...","job_name":...,"target":...} ``` Signed-off-by: Simon Pasquier --- scrape/scrape.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index 7e270bb3a3..c5e3449820 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1421,7 +1421,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er sl.l.Debug("Scrape failed", "err", scrapeErr) sl.scrapeFailureLoggerMtx.RLock() if sl.scrapeFailureLogger != nil { - sl.scrapeFailureLogger.Error("err", scrapeErr) + sl.scrapeFailureLogger.Error(scrapeErr.Error()) } sl.scrapeFailureLoggerMtx.RUnlock() if errc != nil { From af1a19fc78ac7f7c3164bfb240d48d091f0ada1f Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sun, 3 Nov 2024 13:15:51 +0100 Subject: [PATCH 0889/1397] enable errorf rule from perfsprint linter Signed-off-by: Matthieu MOREL --- .golangci.yml | 2 +- cmd/promtool/analyze.go | 8 ++++---- cmd/promtool/backfill.go | 2 +- cmd/promtool/main.go | 2 +- cmd/promtool/tsdb.go | 4 ++-- config/config.go | 4 ++-- discovery/aws/ec2.go | 2 +- discovery/aws/lightsail.go | 2 +- discovery/azure/azure.go | 2 +- discovery/consul/consul.go | 2 +- discovery/digitalocean/digitalocean.go | 3 ++- discovery/dns/dns.go | 2 +- discovery/dns/dns_test.go | 4 ++-- discovery/eureka/eureka.go | 3 +-- discovery/file/file.go | 2 +- discovery/gce/gce.go | 2 +- discovery/hetzner/hetzner.go | 2 +- discovery/http/http.go | 8 ++++---- discovery/ionos/ionos.go | 3 +-- discovery/kubernetes/kubernetes.go | 22 ++++++++++----------- discovery/linode/linode.go | 2 +- discovery/manager_test.go | 7 ++++--- discovery/marathon/marathon.go | 2 +- discovery/moby/docker.go | 5 +++-- discovery/moby/dockerswarm.go | 7 ++++--- discovery/nomad/nomad.go | 2 +- discovery/openstack/openstack.go | 2 +- discovery/ovhcloud/ovhcloud.go | 2 +- discovery/puppetdb/puppetdb.go | 11 ++++++----- discovery/refresh/refresh_test.go | 4 ++-- discovery/registry.go | 4 ++-- discovery/scaleway/scaleway.go | 2 +- discovery/triton/triton.go | 2 +- discovery/uyuni/uyuni.go | 2 +- discovery/vultr/vultr.go | 3 ++- discovery/xds/kuma.go | 3 ++- model/histogram/float_histogram.go | 15 +++++++------- model/histogram/histogram.go | 11 ++++++----- model/relabel/relabel.go | 5 +++-- model/rulefmt/rulefmt.go | 12 ++++++------ model/textparse/promparse.go | 2 +- promql/engine.go | 6 +++--- promql/info.go | 2 +- promql/promqltest/test.go | 2 +- rules/alerting.go | 3 ++- rules/recording.go | 3 ++- scrape/manager_test.go | 5 +++-- scrape/scrape_test.go | 6 +++--- storage/interface.go | 10 +++++----- storage/remote/azuread/azuread.go | 27 +++++++++++++------------- storage/remote/queue_manager_test.go | 2 +- storage/remote/write_handler_test.go | 4 ++-- tsdb/chunkenc/chunk_test.go | 2 +- tsdb/chunkenc/float_histogram.go | 7 ++++--- tsdb/chunkenc/histogram.go | 7 ++++--- tsdb/chunks/chunks.go | 7 ++++--- tsdb/compact.go | 2 +- tsdb/db.go | 4 ++-- tsdb/db_test.go | 5 +++-- tsdb/ooo_head_read_test.go | 2 +- tsdb/querier_test.go | 2 +- tsdb/tsdbblockutil.go | 3 ++- web/api/v1/api.go | 4 ++-- web/api/v1/api_test.go | 12 ++++++------ 64 files changed, 164 insertions(+), 149 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index c512101e1b..dfc74139f9 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -109,7 +109,7 @@ linters-settings: extra-rules: true perfsprint: # Optimizes `fmt.Errorf`. - errorf: false + errorf: true revive: # By default, revive will enable only the linting rules that are named in the configuration file. # So, it's needed to explicitly enable all required rules here. diff --git a/cmd/promtool/analyze.go b/cmd/promtool/analyze.go index c1f523de52..26e6f2188c 100644 --- a/cmd/promtool/analyze.go +++ b/cmd/promtool/analyze.go @@ -34,8 +34,8 @@ import ( ) var ( - errNotNativeHistogram = fmt.Errorf("not a native histogram") - errNotEnoughData = fmt.Errorf("not enough data") + errNotNativeHistogram = errors.New("not a native histogram") + errNotEnoughData = errors.New("not enough data") outputHeader = `Bucket stats for each histogram series over time ------------------------------------------------ @@ -169,7 +169,7 @@ func querySamples(ctx context.Context, api v1.API, query string, end time.Time) matrix, ok := values.(model.Matrix) if !ok { - return nil, fmt.Errorf("query of buckets resulted in non-Matrix") + return nil, errors.New("query of buckets resulted in non-Matrix") } return matrix, nil @@ -259,7 +259,7 @@ func getBucketCountsAtTime(matrix model.Matrix, numBuckets, timeIdx int) ([]int, prev := matrix[i].Values[timeIdx] // Assume the results are nicely aligned. if curr.Timestamp != prev.Timestamp { - return counts, fmt.Errorf("matrix result is not time aligned") + return counts, errors.New("matrix result is not time aligned") } counts[i+1] = int(curr.Value - prev.Value) } diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 1408975df9..125c9a08e6 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -49,7 +49,7 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) { _, ts, _ := p.Series() if ts == nil { - return 0, 0, fmt.Errorf("expected timestamp for series got none") + return 0, 0, errors.New("expected timestamp for series got none") } if *ts > maxt { diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 49676ee5c4..b52fe7cdbb 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -444,7 +444,7 @@ func checkExperimental(f bool) { } } -var errLint = fmt.Errorf("lint error") +var errLint = errors.New("lint error") type lintConfig struct { all bool diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 727275aa6b..847ea6be0c 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -662,7 +662,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb. histogramChunkSize = append(histogramChunkSize, len(chk.Bytes())) fhchk, ok := chk.(*chunkenc.FloatHistogramChunk) if !ok { - return fmt.Errorf("chunk is not FloatHistogramChunk") + return errors.New("chunk is not FloatHistogramChunk") } it := fhchk.Iterator(nil) bucketCount := 0 @@ -677,7 +677,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb. histogramChunkSize = append(histogramChunkSize, len(chk.Bytes())) hchk, ok := chk.(*chunkenc.HistogramChunk) if !ok { - return fmt.Errorf("chunk is not HistogramChunk") + return errors.New("chunk is not HistogramChunk") } it := hchk.Iterator(nil) bucketCount := 0 diff --git a/config/config.go b/config/config.go index 30a74e0402..7fb77b0e6b 100644 --- a/config/config.go +++ b/config/config.go @@ -1072,7 +1072,7 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil if httpClientConfigAuthEnabled && c.SigV4Config != nil { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") + return errors.New("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") } // Check for users putting URLs in target groups. @@ -1420,7 +1420,7 @@ func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { for i, attr := range c.PromoteResourceAttributes { attr = strings.TrimSpace(attr) if attr == "" { - err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute")) + err = errors.Join(err, errors.New("empty promoted OTel resource attribute")) continue } if _, exists := seen[attr]; exists { diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index 5a725cb48f..0f35c401e6 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -161,7 +161,7 @@ type EC2Discovery struct { func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) { m, ok := metrics.(*ec2Metrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index 0b046be6d9..b892867f1b 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -134,7 +134,7 @@ type LightsailDiscovery struct { func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) { m, ok := metrics.(*lightsailMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index 35bbc3847c..ec1c51ace9 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -186,7 +186,7 @@ type Discovery struct { func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*azureMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index fcae7b186f..cb3dfe1373 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -189,7 +189,7 @@ type Discovery struct { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*consulMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { diff --git a/discovery/digitalocean/digitalocean.go b/discovery/digitalocean/digitalocean.go index 52f3a9c57a..fce8d1a354 100644 --- a/discovery/digitalocean/digitalocean.go +++ b/discovery/digitalocean/digitalocean.go @@ -15,6 +15,7 @@ package digitalocean import ( "context" + "errors" "fmt" "log/slog" "net" @@ -114,7 +115,7 @@ type Discovery struct { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*digitaloceanMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ diff --git a/discovery/dns/dns.go b/discovery/dns/dns.go index 5de7f64886..405dba44f7 100644 --- a/discovery/dns/dns.go +++ b/discovery/dns/dns.go @@ -121,7 +121,7 @@ type Discovery struct { func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*dnsMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { diff --git a/discovery/dns/dns_test.go b/discovery/dns/dns_test.go index 96bb32491f..f01a075c45 100644 --- a/discovery/dns/dns_test.go +++ b/discovery/dns/dns_test.go @@ -15,7 +15,7 @@ package dns import ( "context" - "fmt" + "errors" "log/slog" "net" "testing" @@ -53,7 +53,7 @@ func TestDNS(t *testing.T) { Type: "A", }, lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { - return nil, fmt.Errorf("some error") + return nil, errors.New("some error") }, expected: []*targetgroup.Group{}, }, diff --git a/discovery/eureka/eureka.go b/discovery/eureka/eureka.go index 5087346486..3cac667f85 100644 --- a/discovery/eureka/eureka.go +++ b/discovery/eureka/eureka.go @@ -16,7 +16,6 @@ package eureka import ( "context" "errors" - "fmt" "log/slog" "net" "net/http" @@ -129,7 +128,7 @@ type Discovery struct { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*eurekaMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd") diff --git a/discovery/file/file.go b/discovery/file/file.go index 1c36b254cc..beea03222b 100644 --- a/discovery/file/file.go +++ b/discovery/file/file.go @@ -184,7 +184,7 @@ type Discovery struct { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { fm, ok := metrics.(*fileMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { diff --git a/discovery/gce/gce.go b/discovery/gce/gce.go index a509a144e1..9a5b0e856e 100644 --- a/discovery/gce/gce.go +++ b/discovery/gce/gce.go @@ -132,7 +132,7 @@ type Discovery struct { func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*gceMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ diff --git a/discovery/hetzner/hetzner.go b/discovery/hetzner/hetzner.go index 980c197d77..02e2272999 100644 --- a/discovery/hetzner/hetzner.go +++ b/discovery/hetzner/hetzner.go @@ -138,7 +138,7 @@ type Discovery struct { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*hetznerMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } r, err := newRefresher(conf, logger) diff --git a/discovery/http/http.go b/discovery/http/http.go index 65404694c4..667fd36f6c 100644 --- a/discovery/http/http.go +++ b/discovery/http/http.go @@ -86,17 +86,17 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } if c.URL == "" { - return fmt.Errorf("URL is missing") + return errors.New("URL is missing") } parsedURL, err := url.Parse(c.URL) if err != nil { return err } if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { - return fmt.Errorf("URL scheme must be 'http' or 'https'") + return errors.New("URL scheme must be 'http' or 'https'") } if parsedURL.Host == "" { - return fmt.Errorf("host is missing in URL") + return errors.New("host is missing in URL") } return c.HTTPClientConfig.Validate() } @@ -118,7 +118,7 @@ type Discovery struct { func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*httpMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { diff --git a/discovery/ionos/ionos.go b/discovery/ionos/ionos.go index 1aa21667e3..1badda48cb 100644 --- a/discovery/ionos/ionos.go +++ b/discovery/ionos/ionos.go @@ -15,7 +15,6 @@ package ionos import ( "errors" - "fmt" "log/slog" "time" @@ -46,7 +45,7 @@ type Discovery struct{} func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*ionosMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if conf.ionosEndpoint == "" { diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index 64e8886cfd..9aff89e4fa 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -173,7 +173,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } if c.Role == "" { - return fmt.Errorf("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)") + return errors.New("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)") } err = c.HTTPClientConfig.Validate() if err != nil { @@ -181,20 +181,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } if c.APIServer.URL != nil && c.KubeConfig != "" { // Api-server and kubeconfig_file are mutually exclusive - return fmt.Errorf("cannot use 'kubeconfig_file' and 'api_server' simultaneously") + return errors.New("cannot use 'kubeconfig_file' and 'api_server' simultaneously") } if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { // Kubeconfig_file and custom http config are mutually exclusive - return fmt.Errorf("cannot use a custom HTTP client configuration together with 'kubeconfig_file'") + return errors.New("cannot use a custom HTTP client configuration together with 'kubeconfig_file'") } if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { - return fmt.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly") + return errors.New("to use custom HTTP client configuration please provide the 'api_server' URL explicitly") } if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace { - return fmt.Errorf("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously") + return errors.New("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously") } if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace { - return fmt.Errorf("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously") + return errors.New("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously") } foundSelectorRoles := make(map[Role]struct{}) @@ -288,7 +288,7 @@ func (d *Discovery) getNamespaces() []string { func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { m, ok := metrics.(*kubernetesMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if l == nil { @@ -672,7 +672,7 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde indexers[nodeIndex] = func(obj interface{}) ([]string, error) { pod, ok := obj.(*apiv1.Pod) if !ok { - return nil, fmt.Errorf("object is not a pod") + return nil, errors.New("object is not a pod") } return []string{pod.Spec.NodeName}, nil } @@ -686,7 +686,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share indexers[podIndex] = func(obj interface{}) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { - return nil, fmt.Errorf("object is not endpoints") + return nil, errors.New("object is not endpoints") } var pods []string for _, target := range e.Subsets { @@ -705,7 +705,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share indexers[nodeIndex] = func(obj interface{}) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { - return nil, fmt.Errorf("object is not endpoints") + return nil, errors.New("object is not endpoints") } var nodes []string for _, target := range e.Subsets { @@ -751,7 +751,7 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object } } default: - return nil, fmt.Errorf("object is not an endpointslice") + return nil, errors.New("object is not an endpointslice") } return nodes, nil diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index dfc12417c0..90a91fc920 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -141,7 +141,7 @@ type Discovery struct { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*linodeMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ diff --git a/discovery/manager_test.go b/discovery/manager_test.go index b882c0b02e..0ff82d5415 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -15,6 +15,7 @@ package discovery import ( "context" + "errors" "fmt" "sort" "strconv" @@ -1209,9 +1210,9 @@ func TestGaugeFailedConfigs(t *testing.T) { c := map[string]Configs{ "prometheus": { - errorConfig{fmt.Errorf("tests error 0")}, - errorConfig{fmt.Errorf("tests error 1")}, - errorConfig{fmt.Errorf("tests error 2")}, + errorConfig{errors.New("tests error 0")}, + errorConfig{errors.New("tests error 1")}, + errorConfig{errors.New("tests error 2")}, }, } discoveryManager.ApplyConfig(c) diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index f81a4410eb..9c93e43f51 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -143,7 +143,7 @@ type Discovery struct { func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*marathonMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd") diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index 1a732c0502..13cf20d6dd 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -15,6 +15,7 @@ package moby import ( "context" + "errors" "fmt" "log/slog" "net" @@ -110,7 +111,7 @@ func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error return err } if c.Host == "" { - return fmt.Errorf("host missing") + return errors.New("host missing") } if _, err = url.Parse(c.Host); err != nil { return err @@ -131,7 +132,7 @@ type DockerDiscovery struct { func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) { m, ok := metrics.(*dockerMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } d := &DockerDiscovery{ diff --git a/discovery/moby/dockerswarm.go b/discovery/moby/dockerswarm.go index 9e93e581f3..ba42253412 100644 --- a/discovery/moby/dockerswarm.go +++ b/discovery/moby/dockerswarm.go @@ -15,6 +15,7 @@ package moby import ( "context" + "errors" "fmt" "log/slog" "net/http" @@ -99,7 +100,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e return err } if c.Host == "" { - return fmt.Errorf("host missing") + return errors.New("host missing") } if _, err = url.Parse(c.Host); err != nil { return err @@ -107,7 +108,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e switch c.Role { case "services", "nodes", "tasks": case "": - return fmt.Errorf("role missing (one of: tasks, services, nodes)") + return errors.New("role missing (one of: tasks, services, nodes)") default: return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role) } @@ -128,7 +129,7 @@ type Discovery struct { func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*dockerswarmMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ diff --git a/discovery/nomad/nomad.go b/discovery/nomad/nomad.go index 1dbd8f1608..7516308026 100644 --- a/discovery/nomad/nomad.go +++ b/discovery/nomad/nomad.go @@ -124,7 +124,7 @@ type Discovery struct { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*nomadMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ diff --git a/discovery/openstack/openstack.go b/discovery/openstack/openstack.go index fa7e0cce90..cd0bcc1267 100644 --- a/discovery/openstack/openstack.go +++ b/discovery/openstack/openstack.go @@ -145,7 +145,7 @@ type refresher interface { func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*openstackMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } r, err := newRefresher(conf, l) diff --git a/discovery/ovhcloud/ovhcloud.go b/discovery/ovhcloud/ovhcloud.go index 08ed70296b..a75e9694fe 100644 --- a/discovery/ovhcloud/ovhcloud.go +++ b/discovery/ovhcloud/ovhcloud.go @@ -151,7 +151,7 @@ func newRefresher(conf *SDConfig, logger *slog.Logger) (refresher, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*ovhcloudMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } r, err := newRefresher(conf, logger) diff --git a/discovery/puppetdb/puppetdb.go b/discovery/puppetdb/puppetdb.go index 6122a76da7..561bf78ba7 100644 --- a/discovery/puppetdb/puppetdb.go +++ b/discovery/puppetdb/puppetdb.go @@ -17,6 +17,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "log/slog" @@ -109,20 +110,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } if c.URL == "" { - return fmt.Errorf("URL is missing") + return errors.New("URL is missing") } parsedURL, err := url.Parse(c.URL) if err != nil { return err } if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { - return fmt.Errorf("URL scheme must be 'http' or 'https'") + return errors.New("URL scheme must be 'http' or 'https'") } if parsedURL.Host == "" { - return fmt.Errorf("host is missing in URL") + return errors.New("host is missing in URL") } if c.Query == "" { - return fmt.Errorf("query missing") + return errors.New("query missing") } return c.HTTPClientConfig.Validate() } @@ -142,7 +143,7 @@ type Discovery struct { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*puppetdbMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if logger == nil { diff --git a/discovery/refresh/refresh_test.go b/discovery/refresh/refresh_test.go index b70a326355..0d4460ffa6 100644 --- a/discovery/refresh/refresh_test.go +++ b/discovery/refresh/refresh_test.go @@ -15,7 +15,7 @@ package refresh import ( "context" - "fmt" + "errors" "testing" "time" @@ -64,7 +64,7 @@ func TestRefresh(t *testing.T) { case 2: return tg2, nil } - return nil, fmt.Errorf("some error") + return nil, errors.New("some error") } interval := time.Millisecond diff --git a/discovery/registry.go b/discovery/registry.go index 1f491d4ca9..2401d78fba 100644 --- a/discovery/registry.go +++ b/discovery/registry.go @@ -267,7 +267,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) { err := rmm.Register() if err != nil { - return nil, fmt.Errorf("failed to create service discovery refresh metrics") + return nil, errors.New("failed to create service discovery refresh metrics") } metrics := make(map[string]DiscovererMetrics) @@ -275,7 +275,7 @@ func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManag currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm) err = currentSdMetrics.Register() if err != nil { - return nil, fmt.Errorf("failed to create service discovery metrics") + return nil, errors.New("failed to create service discovery metrics") } metrics[conf.Name()] = currentSdMetrics } diff --git a/discovery/scaleway/scaleway.go b/discovery/scaleway/scaleway.go index 670e439c4f..bc9282feaa 100644 --- a/discovery/scaleway/scaleway.go +++ b/discovery/scaleway/scaleway.go @@ -188,7 +188,7 @@ type Discovery struct{} func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { m, ok := metrics.(*scalewayMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } r, err := newRefresher(conf) diff --git a/discovery/triton/triton.go b/discovery/triton/triton.go index 7b3b18f471..5ec7b65215 100644 --- a/discovery/triton/triton.go +++ b/discovery/triton/triton.go @@ -149,7 +149,7 @@ type Discovery struct { func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*tritonMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } tls, err := config.NewTLSConfig(&conf.TLSConfig) diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index de806895d7..1bd0cd2d49 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -215,7 +215,7 @@ func getEndpointInfoForSystems( func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*uyuniMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } apiURL, err := url.Parse(conf.Server) diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go index f82b22168a..ee92f01699 100644 --- a/discovery/vultr/vultr.go +++ b/discovery/vultr/vultr.go @@ -15,6 +15,7 @@ package vultr import ( "context" + "errors" "fmt" "log/slog" "net" @@ -117,7 +118,7 @@ type Discovery struct { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { m, ok := metrics.(*vultrMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } d := &Discovery{ diff --git a/discovery/xds/kuma.go b/discovery/xds/kuma.go index 55b3d628e5..6208e6182a 100644 --- a/discovery/xds/kuma.go +++ b/discovery/xds/kuma.go @@ -14,6 +14,7 @@ package xds import ( + "errors" "fmt" "log/slog" "net/url" @@ -161,7 +162,7 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) { m, ok := metrics.(*xdsMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } // Default to "prometheus" if hostname is unavailable. diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index a6ad47acd3..e5519a56d6 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -14,6 +14,7 @@ package histogram import ( + "errors" "fmt" "math" "strings" @@ -784,16 +785,16 @@ func (h *FloatHistogram) Validate() error { return fmt.Errorf("custom buckets: %w", err) } if h.ZeroCount != 0 { - return fmt.Errorf("custom buckets: must have zero count of 0") + return errors.New("custom buckets: must have zero count of 0") } if h.ZeroThreshold != 0 { - return fmt.Errorf("custom buckets: must have zero threshold of 0") + return errors.New("custom buckets: must have zero threshold of 0") } if len(h.NegativeSpans) > 0 { - return fmt.Errorf("custom buckets: must not have negative spans") + return errors.New("custom buckets: must not have negative spans") } if len(h.NegativeBuckets) > 0 { - return fmt.Errorf("custom buckets: must not have negative buckets") + return errors.New("custom buckets: must not have negative buckets") } } else { if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { @@ -807,7 +808,7 @@ func (h *FloatHistogram) Validate() error { return fmt.Errorf("negative side: %w", err) } if h.CustomValues != nil { - return fmt.Errorf("histogram with exponential schema must not have custom bounds") + return errors.New("histogram with exponential schema must not have custom bounds") } } err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false) @@ -948,10 +949,10 @@ func (h *FloatHistogram) floatBucketIterator( positive bool, absoluteStartValue float64, targetSchema int32, ) floatBucketIterator { if h.UsesCustomBuckets() && targetSchema != h.Schema { - panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema")) + panic(errors.New("cannot merge from custom buckets schema to exponential schema")) } if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) { - panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema")) + panic(errors.New("cannot merge from exponential buckets schema to custom schema")) } if targetSchema > h.Schema { panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index e4b99ec420..778aefe282 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -14,6 +14,7 @@ package histogram import ( + "errors" "fmt" "math" "slices" @@ -432,16 +433,16 @@ func (h *Histogram) Validate() error { return fmt.Errorf("custom buckets: %w", err) } if h.ZeroCount != 0 { - return fmt.Errorf("custom buckets: must have zero count of 0") + return errors.New("custom buckets: must have zero count of 0") } if h.ZeroThreshold != 0 { - return fmt.Errorf("custom buckets: must have zero threshold of 0") + return errors.New("custom buckets: must have zero threshold of 0") } if len(h.NegativeSpans) > 0 { - return fmt.Errorf("custom buckets: must not have negative spans") + return errors.New("custom buckets: must not have negative spans") } if len(h.NegativeBuckets) > 0 { - return fmt.Errorf("custom buckets: must not have negative buckets") + return errors.New("custom buckets: must not have negative buckets") } } else { if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { @@ -455,7 +456,7 @@ func (h *Histogram) Validate() error { return fmt.Errorf("negative side: %w", err) } if h.CustomValues != nil { - return fmt.Errorf("histogram with exponential schema must not have custom bounds") + return errors.New("histogram with exponential schema must not have custom bounds") } } err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index eb79f7be21..93331cf99f 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -16,6 +16,7 @@ package relabel import ( "crypto/md5" "encoding/binary" + "errors" "fmt" "strconv" "strings" @@ -114,10 +115,10 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { func (c *Config) Validate() error { if c.Action == "" { - return fmt.Errorf("relabel action cannot be empty") + return errors.New("relabel action cannot be empty") } if c.Modulus == 0 && c.Action == HashMod { - return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") + return errors.New("relabel configuration for hashmod requires non-zero modulus") } if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) diff --git a/model/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go index ef6ac17fe3..bb36a21208 100644 --- a/model/rulefmt/rulefmt.go +++ b/model/rulefmt/rulefmt.go @@ -184,14 +184,14 @@ type RuleNode struct { func (r *RuleNode) Validate() (nodes []WrappedError) { if r.Record.Value != "" && r.Alert.Value != "" { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("only one of 'record' and 'alert' must be set"), + err: errors.New("only one of 'record' and 'alert' must be set"), node: &r.Record, nodeAlt: &r.Alert, }) } if r.Record.Value == "" && r.Alert.Value == "" { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("one of 'record' or 'alert' must be set"), + err: errors.New("one of 'record' or 'alert' must be set"), node: &r.Record, nodeAlt: &r.Alert, }) @@ -199,7 +199,7 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { if r.Expr.Value == "" { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("field 'expr' must be set in rule"), + err: errors.New("field 'expr' must be set in rule"), node: &r.Expr, }) } else if _, err := parser.ParseExpr(r.Expr.Value); err != nil { @@ -211,19 +211,19 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { if r.Record.Value != "" { if len(r.Annotations) > 0 { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("invalid field 'annotations' in recording rule"), + err: errors.New("invalid field 'annotations' in recording rule"), node: &r.Record, }) } if r.For != 0 { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("invalid field 'for' in recording rule"), + err: errors.New("invalid field 'for' in recording rule"), node: &r.Record, }) } if r.KeepFiringFor != 0 { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("invalid field 'keep_firing_for' in recording rule"), + err: errors.New("invalid field 'keep_firing_for' in recording rule"), node: &r.Record, }) } diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index 0ab932c665..17b0c3db8b 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -509,7 +509,7 @@ func yoloString(b []byte) string { func parseFloat(s string) (float64, error) { // Keep to pre-Go 1.13 float formats. if strings.ContainsAny(s, "pP_") { - return 0, fmt.Errorf("unsupported character in float") + return 0, errors.New("unsupported character in float") } return strconv.ParseFloat(s, 64) } diff --git a/promql/engine.go b/promql/engine.go index 56323748fe..cc7ff694e4 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2047,7 +2047,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, } for i := range mat { if len(mat[i].Floats)+len(mat[i].Histograms) != 1 { - panic(fmt.Errorf("unexpected number of samples")) + panic(errors.New("unexpected number of samples")) } for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts += ev.interval { if len(mat[i].Floats) > 0 { @@ -3626,7 +3626,7 @@ func detectHistogramStatsDecoding(expr parser.Expr) { if n, ok := node.(*parser.BinaryExpr); ok { detectHistogramStatsDecoding(n.LHS) detectHistogramStatsDecoding(n.RHS) - return fmt.Errorf("stop") + return errors.New("stop") } n, ok := (node).(*parser.VectorSelector) @@ -3648,7 +3648,7 @@ func detectHistogramStatsDecoding(expr parser.Expr) { break } } - return fmt.Errorf("stop") + return errors.New("stop") }) } diff --git a/promql/info.go b/promql/info.go index 1a9f7eb18e..3fe9a2ce99 100644 --- a/promql/info.go +++ b/promql/info.go @@ -90,7 +90,7 @@ func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints { nodeTimestamp = n.Timestamp } offset = durationMilliseconds(n.OriginalOffset) - return fmt.Errorf("end traversal") + return errors.New("end traversal") default: return nil } diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index e078bcb60b..9fc31ca0b8 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -672,7 +672,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { switch val := result.(type) { case promql.Matrix: if ev.ordered { - return fmt.Errorf("expected ordered result, but query returned a matrix") + return errors.New("expected ordered result, but query returned a matrix") } if ev.expectScalar { diff --git a/rules/alerting.go b/rules/alerting.go index 7e74c176aa..e7f15baefe 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -15,6 +15,7 @@ package rules import ( "context" + "errors" "fmt" "log/slog" "net/url" @@ -403,7 +404,7 @@ func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts t resultFPs[h] = struct{}{} if _, ok := alerts[h]; ok { - return nil, fmt.Errorf("vector contains metrics with the same labelset after applying alert labels") + return nil, errors.New("vector contains metrics with the same labelset after applying alert labels") } alerts[h] = &Alert{ diff --git a/rules/recording.go b/rules/recording.go index 17a75fdd1a..52c2a875ab 100644 --- a/rules/recording.go +++ b/rules/recording.go @@ -15,6 +15,7 @@ package rules import ( "context" + "errors" "fmt" "net/url" "time" @@ -103,7 +104,7 @@ func (rule *RecordingRule) Eval(ctx context.Context, queryOffset time.Duration, // Check that the rule does not produce identical metrics after applying // labels. if vector.ContainsSameLabelset() { - return nil, fmt.Errorf("vector contains metrics with the same labelset after applying rule labels") + return nil, errors.New("vector contains metrics with the same labelset after applying rule labels") } numSeries := len(vector) diff --git a/scrape/manager_test.go b/scrape/manager_test.go index c3544f6344..f446c99789 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -16,6 +16,7 @@ package scrape import ( "bytes" "context" + "errors" "fmt" "net/http" "net/http/httptest" @@ -898,7 +899,7 @@ func doOneScrape(t *testing.T, manager *Manager, appender *collectResultAppender if len(appender.resultFloats) > 0 { return nil } - return fmt.Errorf("expected some float samples, got none") + return errors.New("expected some float samples, got none") }), "after 1 minute") manager.Stop() } @@ -1061,7 +1062,7 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) { if len(app.resultHistograms) > 0 { return nil } - return fmt.Errorf("expected some histogram samples, got none") + return errors.New("expected some histogram samples, got none") }), "after 1 minute") scrapeManager.Stop() diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index f75e1db89a..02a31b7627 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1010,7 +1010,7 @@ func TestScrapeLoopForcedErr(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) sl := newBasicScrapeLoop(t, ctx, scraper, app, time.Second) - forcedErr := fmt.Errorf("forced err") + forcedErr := errors.New("forced err") sl.setForcedError(forcedErr) scraper.scrapeFunc = func(context.Context, io.Writer) error { @@ -1464,7 +1464,7 @@ func TestScrapeLoopCache(t *testing.T) { case 4: cancel() } - return fmt.Errorf("scrape failed") + return errors.New("scrape failed") } go func() { @@ -3264,7 +3264,7 @@ func TestScrapeReportSingleAppender(t *testing.T) { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { numScrapes++ if numScrapes%4 == 0 { - return fmt.Errorf("scrape failed") + return errors.New("scrape failed") } w.Write([]byte("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n")) return nil diff --git a/storage/interface.go b/storage/interface.go index 56bb53dfe0..32b90cc10a 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -41,17 +41,17 @@ var ( ErrOutOfOrderExemplar = errors.New("out of order exemplar") ErrDuplicateExemplar = errors.New("duplicate exemplar") ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) - ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0") - ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled") - ErrOOONativeHistogramsDisabled = fmt.Errorf("out-of-order native histogram ingestion is disabled") + ErrExemplarsDisabled = errors.New("exemplar storage is disabled or max exemplars is less than or equal to 0") + ErrNativeHistogramsDisabled = errors.New("native histograms are disabled") + ErrOOONativeHistogramsDisabled = errors.New("out-of-order native histogram ingestion is disabled") // ErrOutOfOrderCT indicates failed append of CT to the storage // due to CT being older the then newer sample. // NOTE(bwplotka): This can be both an instrumentation failure or commonly expected // behaviour, and we currently don't have a way to determine this. As a result // it's recommended to ignore this error for now. - ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") - ErrCTNewerThanSample = fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") + ErrOutOfOrderCT = errors.New("created timestamp out of order, ignoring") + ErrCTNewerThanSample = errors.New("CT is newer or the same as sample's timestamp, ignoring") ) // SeriesRef is a generic series reference. In prometheus it is either a diff --git a/storage/remote/azuread/azuread.go b/storage/remote/azuread/azuread.go index 82f46b82f6..20ec53d6f6 100644 --- a/storage/remote/azuread/azuread.go +++ b/storage/remote/azuread/azuread.go @@ -16,7 +16,6 @@ package azuread import ( "context" "errors" - "fmt" "net/http" "strings" "sync" @@ -110,55 +109,55 @@ func (c *AzureADConfig) Validate() error { } if c.Cloud != AzureChina && c.Cloud != AzureGovernment && c.Cloud != AzurePublic { - return fmt.Errorf("must provide a cloud in the Azure AD config") + return errors.New("must provide a cloud in the Azure AD config") } if c.ManagedIdentity == nil && c.OAuth == nil && c.SDK == nil { - return fmt.Errorf("must provide an Azure Managed Identity, Azure OAuth or Azure SDK in the Azure AD config") + return errors.New("must provide an Azure Managed Identity, Azure OAuth or Azure SDK in the Azure AD config") } if c.ManagedIdentity != nil && c.OAuth != nil { - return fmt.Errorf("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config") + return errors.New("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config") } if c.ManagedIdentity != nil && c.SDK != nil { - return fmt.Errorf("cannot provide both Azure Managed Identity and Azure SDK in the Azure AD config") + return errors.New("cannot provide both Azure Managed Identity and Azure SDK in the Azure AD config") } if c.OAuth != nil && c.SDK != nil { - return fmt.Errorf("cannot provide both Azure OAuth and Azure SDK in the Azure AD config") + return errors.New("cannot provide both Azure OAuth and Azure SDK in the Azure AD config") } if c.ManagedIdentity != nil { if c.ManagedIdentity.ClientID == "" { - return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config") + return errors.New("must provide an Azure Managed Identity client_id in the Azure AD config") } _, err := uuid.Parse(c.ManagedIdentity.ClientID) if err != nil { - return fmt.Errorf("the provided Azure Managed Identity client_id is invalid") + return errors.New("the provided Azure Managed Identity client_id is invalid") } } if c.OAuth != nil { if c.OAuth.ClientID == "" { - return fmt.Errorf("must provide an Azure OAuth client_id in the Azure AD config") + return errors.New("must provide an Azure OAuth client_id in the Azure AD config") } if c.OAuth.ClientSecret == "" { - return fmt.Errorf("must provide an Azure OAuth client_secret in the Azure AD config") + return errors.New("must provide an Azure OAuth client_secret in the Azure AD config") } if c.OAuth.TenantID == "" { - return fmt.Errorf("must provide an Azure OAuth tenant_id in the Azure AD config") + return errors.New("must provide an Azure OAuth tenant_id in the Azure AD config") } var err error _, err = uuid.Parse(c.OAuth.ClientID) if err != nil { - return fmt.Errorf("the provided Azure OAuth client_id is invalid") + return errors.New("the provided Azure OAuth client_id is invalid") } _, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.OAuth.TenantID) if err != nil { - return fmt.Errorf("the provided Azure OAuth tenant_id is invalid") + return errors.New("the provided Azure OAuth tenant_id is invalid") } } @@ -168,7 +167,7 @@ func (c *AzureADConfig) Validate() error { if c.SDK.TenantID != "" { _, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.SDK.TenantID) if err != nil { - return fmt.Errorf("the provided Azure OAuth tenant_id is invalid") + return errors.New("the provided Azure OAuth tenant_id is invalid") } } } diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 4b7c5a4e90..21fdf92e36 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -763,7 +763,7 @@ func TestDisableReshardOnRetry(t *testing.T) { onStoreCalled() return WriteResponseStats{}, RecoverableError{ - error: fmt.Errorf("fake error"), + error: errors.New("fake error"), retryAfter: model.Duration(retryAfter), } }, diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 580c7c143e..c40f227ea8 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -672,7 +672,7 @@ func TestCommitErr_V1Message(t *testing.T) { req, err := http.NewRequest("", "", bytes.NewReader(payload)) require.NoError(t, err) - appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")} + appendable := &mockAppendable{commitErr: errors.New("commit error")} handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() @@ -696,7 +696,7 @@ func TestCommitErr_V2Message(t *testing.T) { req.Header.Set("Content-Encoding", string(SnappyBlockCompression)) req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) - appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")} + appendable := &mockAppendable{commitErr: errors.New("commit error")} handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) recorder := httptest.NewRecorder() diff --git a/tsdb/chunkenc/chunk_test.go b/tsdb/chunkenc/chunk_test.go index e6b89be401..a5e75ca32b 100644 --- a/tsdb/chunkenc/chunk_test.go +++ b/tsdb/chunkenc/chunk_test.go @@ -132,7 +132,7 @@ func TestPool(t *testing.T) { { name: "invalid encoding", encoding: EncNone, - expErr: fmt.Errorf(`invalid chunk encoding "none"`), + expErr: errors.New(`invalid chunk encoding "none"`), }, } { t.Run(tc.name, func(t *testing.T) { diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index f18eb77dad..1c7e2c3ace 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -15,6 +15,7 @@ package chunkenc import ( "encoding/binary" + "errors" "fmt" "math" @@ -761,9 +762,9 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend if !okToAppend || counterReset { if appendOnly { if counterReset { - return nil, false, a, fmt.Errorf("float histogram counter reset") + return nil, false, a, errors.New("float histogram counter reset") } - return nil, false, a, fmt.Errorf("float histogram schema change") + return nil, false, a, errors.New("float histogram schema change") } newChunk := NewFloatHistogramChunk() app, err := newChunk.Appender() @@ -812,7 +813,7 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h) if !okToAppend { if appendOnly { - return nil, false, a, fmt.Errorf("float gauge histogram schema change") + return nil, false, a, errors.New("float gauge histogram schema change") } newChunk := NewFloatHistogramChunk() app, err := newChunk.Appender() diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index f8796d64ec..bb747e135f 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -15,6 +15,7 @@ package chunkenc import ( "encoding/binary" + "errors" "fmt" "math" @@ -795,9 +796,9 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h if !okToAppend || counterReset { if appendOnly { if counterReset { - return nil, false, a, fmt.Errorf("histogram counter reset") + return nil, false, a, errors.New("histogram counter reset") } - return nil, false, a, fmt.Errorf("histogram schema change") + return nil, false, a, errors.New("histogram schema change") } newChunk := NewHistogramChunk() app, err := newChunk.Appender() @@ -846,7 +847,7 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h) if !okToAppend { if appendOnly { - return nil, false, a, fmt.Errorf("gauge histogram schema change") + return nil, false, a, errors.New("gauge histogram schema change") } newChunk := NewHistogramChunk() app, err := newChunk.Appender() diff --git a/tsdb/chunks/chunks.go b/tsdb/chunks/chunks.go index ec0f6d4036..f505d762bb 100644 --- a/tsdb/chunks/chunks.go +++ b/tsdb/chunks/chunks.go @@ -16,6 +16,7 @@ package chunks import ( "bufio" "encoding/binary" + "errors" "fmt" "hash" "hash/crc32" @@ -172,7 +173,7 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) { return emptyChunk, err } if newChunk != nil { - return emptyChunk, fmt.Errorf("did not expect to start a second chunk") + return emptyChunk, errors.New("did not expect to start a second chunk") } case chunkenc.ValFloatHistogram: newChunk, _, ca, err = ca.AppendFloatHistogram(nil, s.Get(i).T(), s.Get(i).FH(), false) @@ -180,7 +181,7 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) { return emptyChunk, err } if newChunk != nil { - return emptyChunk, fmt.Errorf("did not expect to start a second chunk") + return emptyChunk, errors.New("did not expect to start a second chunk") } default: panic(fmt.Sprintf("unknown sample type %s", sampleType.String())) @@ -250,7 +251,7 @@ func (cm *Meta) OverlapsClosedInterval(mint, maxt int64) bool { return cm.MinTime <= maxt && mint <= cm.MaxTime } -var errInvalidSize = fmt.Errorf("invalid size") +var errInvalidSize = errors.New("invalid size") var castagnoliTable *crc32.Table diff --git a/tsdb/compact.go b/tsdb/compact.go index ff35679e3f..17374531d4 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -184,7 +184,7 @@ func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l *slog.L func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) { if len(ranges) == 0 { - return nil, fmt.Errorf("at least one range must be provided") + return nil, errors.New("at least one range must be provided") } if pool == nil { pool = chunkenc.NewPool() diff --git a/tsdb/db.go b/tsdb/db.go index bb9fe6ad7e..ab919c3100 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -2004,10 +2004,10 @@ func (db *DB) ForceHeadMMap() { // will create a new block containing all data that's currently in the memory buffer/WAL. func (db *DB) Snapshot(dir string, withHead bool) error { if dir == db.dir { - return fmt.Errorf("cannot snapshot into base directory") + return errors.New("cannot snapshot into base directory") } if _, err := ulid.ParseStrict(dir); err == nil { - return fmt.Errorf("dir must not be a valid ULID") + return errors.New("dir must not be a valid ULID") } db.cmtx.Lock() diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 50f50a3a25..bfdf7aa4ad 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "encoding/binary" + "errors" "flag" "fmt" "hash/crc32" @@ -1432,7 +1433,7 @@ func (*mockCompactorFailing) Plan(string) ([]string, error) { func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *BlockMeta) ([]ulid.ULID, error) { if len(c.blocks) >= c.max { - return []ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail") + return []ulid.ULID{}, errors.New("the compactor already did the maximum allowed blocks so it is time to fail") } block, err := OpenBlock(nil, createBlock(c.t, dest, genSeries(1, 1, 0, 1)), nil) @@ -1459,7 +1460,7 @@ func (*mockCompactorFailing) Compact(string, []string, []*Block) ([]ulid.ULID, e } func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ulid.ULID, err error) { - return nil, fmt.Errorf("mock compaction failing CompactOOO") + return nil, errors.New("mock compaction failing CompactOOO") } func TestTimeRetention(t *testing.T) { diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index 17f551dd7d..bc1cb67d1e 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -509,7 +509,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { Ref: 0x1800000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300, }) require.Nil(t, iterable) - require.Equal(t, err, fmt.Errorf("not found")) + require.EqualError(t, err, "not found") require.Nil(t, c) }) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index aca6c845b1..2d66102bfe 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -3324,7 +3324,7 @@ func (m mockMatcherIndex) LabelNames(context.Context, ...*labels.Matcher) ([]str } func (m mockMatcherIndex) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings { - return index.ErrPostings(fmt.Errorf("PostingsForLabelMatching called")) + return index.ErrPostings(errors.New("PostingsForLabelMatching called")) } func TestPostingsForMatcher(t *testing.T) { diff --git a/tsdb/tsdbblockutil.go b/tsdb/tsdbblockutil.go index b49757223f..af2348019a 100644 --- a/tsdb/tsdbblockutil.go +++ b/tsdb/tsdbblockutil.go @@ -15,6 +15,7 @@ package tsdb import ( "context" + "errors" "fmt" "log/slog" "path/filepath" @@ -23,7 +24,7 @@ import ( "github.com/prometheus/prometheus/tsdb/chunkenc" ) -var ErrInvalidTimes = fmt.Errorf("max time is lesser than min time") +var ErrInvalidTimes = errors.New("max time is lesser than min time") // CreateBlock creates a chunkrange block from the samples passed to it, and writes it to disk. func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger *slog.Logger) (string, error) { diff --git a/web/api/v1/api.go b/web/api/v1/api.go index b37605f5d5..c4acafab6c 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1606,7 +1606,7 @@ func parseListRulesPaginationRequest(r *http.Request) (int64, string, *apiFuncRe nextToken := r.URL.Query().Get("group_next_token") if nextToken != "" && maxGroups == "" { - errResult := invalidParamError(fmt.Errorf("group_limit needs to be present in order to paginate over the groups"), "group_next_token") + errResult := invalidParamError(errors.New("group_limit needs to be present in order to paginate over the groups"), "group_next_token") return -1, "", &errResult } @@ -1617,7 +1617,7 @@ func parseListRulesPaginationRequest(r *http.Request) (int64, string, *apiFuncRe return -1, "", &errResult } if parsedMaxGroups <= 0 { - errResult := invalidParamError(fmt.Errorf("group_limit needs to be greater than 0"), "group_limit") + errResult := invalidParamError(errors.New("group_limit needs to be greater than 0"), "group_limit") return -1, "", &errResult } } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 35ad4a9ad3..f5c81ebfa4 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -615,7 +615,7 @@ func TestGetSeries(t *testing.T) { matchers: []string{`{foo="boo"}`, `{foo="baz"}`}, expectedErrorType: errorExec, api: &API{ - Queryable: errorTestQueryable{err: fmt.Errorf("generic")}, + Queryable: errorTestQueryable{err: errors.New("generic")}, }, }, { @@ -623,7 +623,7 @@ func TestGetSeries(t *testing.T) { matchers: []string{`{foo="boo"}`, `{foo="baz"}`}, expectedErrorType: errorInternal, api: &API{ - Queryable: errorTestQueryable{err: promql.ErrStorage{Err: fmt.Errorf("generic")}}, + Queryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}}, }, }, } { @@ -717,7 +717,7 @@ func TestQueryExemplars(t *testing.T) { name: "should return errorExec upon genetic error", expectedErrorType: errorExec, api: &API{ - ExemplarQueryable: errorTestQueryable{err: fmt.Errorf("generic")}, + ExemplarQueryable: errorTestQueryable{err: errors.New("generic")}, }, query: url.Values{ "query": []string{`test_metric3{foo="boo"} - test_metric4{foo="bar"}`}, @@ -729,7 +729,7 @@ func TestQueryExemplars(t *testing.T) { name: "should return errorInternal err type is ErrStorage", expectedErrorType: errorInternal, api: &API{ - ExemplarQueryable: errorTestQueryable{err: promql.ErrStorage{Err: fmt.Errorf("generic")}}, + ExemplarQueryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}}, }, query: url.Values{ "query": []string{`test_metric3{foo="boo"} - test_metric4{foo="bar"}`}, @@ -838,7 +838,7 @@ func TestLabelNames(t *testing.T) { matchers: []string{`{foo="boo"}`, `{foo="baz"}`}, expectedErrorType: errorExec, api: &API{ - Queryable: errorTestQueryable{err: fmt.Errorf("generic")}, + Queryable: errorTestQueryable{err: errors.New("generic")}, }, }, { @@ -846,7 +846,7 @@ func TestLabelNames(t *testing.T) { matchers: []string{`{foo="boo"}`, `{foo="baz"}`}, expectedErrorType: errorInternal, api: &API{ - Queryable: errorTestQueryable{err: promql.ErrStorage{Err: fmt.Errorf("generic")}}, + Queryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}}, }, }, } { From f42b37ff2fe56a92cfa1de64f814b5b5c9528e7d Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 6 Nov 2024 15:51:39 +0000 Subject: [PATCH 0890/1397] [BUGFIX] TSDB: Fix race on stale values in headAppender (#15322) * [BUGFIX] TSDB: Fix race on stale values in headAppender Signed-off-by: Bryan Boreham * Simplify Signed-off-by: Bryan Boreham --------- Signed-off-by: Bryan Boreham --- tsdb/head_append.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 603b96cfcc..ea2a163f26 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -356,21 +356,21 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 } } + s.Lock() if value.IsStaleNaN(v) { - // This is not thread safe as we should be holding the lock for "s". // TODO(krajorama): reorganize Commit() to handle samples in append order // not floats first and then histograms. Then we could do this conversion // in commit. This code should move into Commit(). switch { case s.lastHistogramValue != nil: + s.Unlock() return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil) case s.lastFloatHistogramValue != nil: + s.Unlock() return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v}) } } - s.Lock() - defer s.Unlock() // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // to skip that sample from the WAL and write only in the WBL. @@ -1517,7 +1517,7 @@ type chunkOpts struct { // append adds the sample (t, v) to the series. The caller also has to provide // the appendID for isolation. (The appendID can be zero, which results in no // isolation for this append.) -// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. +// Series lock must be held when calling. func (s *memSeries) append(t int64, v float64, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) { c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, o) if !sampleInOrder { From 54a3f5054cfc90ce270d0268f9c70d442d0d1859 Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Wed, 6 Nov 2024 16:20:45 +0000 Subject: [PATCH 0891/1397] docs: formatting and typo fixes to 3.0 migration guide Signed-off-by: Fiona Liao --- docs/migration.md | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/docs/migration.md b/docs/migration.md index 43fc43df2a..8a78ee63ab 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -29,8 +29,8 @@ This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0 `https://example.com/metrics` or `http://exmaple.com/metrics` to be represented as `https://example.com/metrics:443` and `http://example.com/metrics:80` respectively, add them to your target URLs - - `agent` - Instead use the dedicated `--agent` cli flag. + - `agent` + Instead use the dedicated `--agent` CLI flag. Prometheus v3 will log a warning if you continue to pass these to `--enable-feature`. @@ -55,37 +55,37 @@ This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0 - The `.` pattern in regular expressions in PromQL matches newline characters. With this change a regular expressions like `.*` matches strings that include `\n`. This applies to matchers in queries and relabel configs. For example the - following regular expressions now match the accompanying strings, wheras in + following regular expressions now match the accompanying strings, whereas in Prometheus v2 these combinations didn't match. -| Regex | Additional matches | -| ----- | ------ | -| ".*" | "foo\n", "Foo\nBar" | -| "foo.?bar" | "foo\nbar" | -| "foo.+bar" | "foo\nbar" | + | Regex | Additional matches | + | ----- | ------ | + | ".*" | "foo\n", "Foo\nBar" | + | "foo.?bar" | "foo\nbar" | + | "foo.+bar" | "foo\nbar" | If you want Prometheus v3 to behave like v2 did, you will have to change your - regular expressions by replacing all `.` patterns with `[^\n]`, e.g. + regular expressions by replacing all `.` patterns with `[^\n]`, e.g. `foo[^\n]*`. - Lookback and range selectors are left open and right closed (previously left closed and right closed). This change affects queries when the evaluation time perfectly aligns with the sample timestamps. For example assume querying a - timeseries with even spaced samples exactly 1 minute apart. Before Prometheus - 3.x, range query with `5m` will mostly return 5 samples. But if the query + timeseries with evenly spaced samples exactly 1 minute apart. Before Prometheus + v3, a range query with `5m` would usually return 5 samples. But if the query evaluation aligns perfectly with a scrape, it would return 6 samples. In - Prometheus 3.x queries like this will always return 5 samples. - This change has likely few effects for everyday use, except for some sub query + Prometheus v3 queries like this will always return 5 samples. + This change has likely few effects for everyday use, except for some subquery use cases. - Query front-ends that align queries usually align sub-queries to multiples of - the step size. These sub queries will likely be affected. + Query front-ends that align queries usually align subqueries to multiples of + the step size. These subqueries will likely be affected. Tests are more likely to affected. To fix those either adjust the expected - number of samples or extend to range by less then one sample interval. + number of samples or extend the range by less than one sample interval. - The `holt_winters` function has been renamed to `double_exponential_smoothing` and is now guarded by the `promql-experimental-functions` feature flag. - If you want to keep using holt_winters, you have to do both of these things: - - Rename holt_winters to double_exponential_smoothing in your queries. + If you want to keep using `holt_winters`, you have to do both of these things: + - Rename `holt_winters` to `double_exponential_smoothing` in your queries. - Pass `--enable-feature=promql-experimental-functions` in your Prometheus - cli invocation.. + CLI invocation. ## Scrape protocols Prometheus v3 is more strict concerning the Content-Type header received when @@ -105,12 +105,12 @@ may now fail if this fallback protocol is not specified. ### TSDB format and downgrade The TSDB format has been changed in Prometheus v2.55 in preparation for changes -to the index format. Consequently a Prometheus v3 tsdb can only be read by a +to the index format. Consequently, a Prometheus v3 TSDB can only be read by a Prometheus v2.55 or newer. Before upgrading to Prometheus v3 please upgrade to v2.55 first and confirm Prometheus works as expected. Only then continue with the upgrade to v3. -### TSDB Storage contract +### TSDB storage contract TSDB compatible storage is now expected to return results matching the specified selectors. This might impact some third party implementations, most likely implementing `remote_read`. @@ -123,7 +123,7 @@ endpoints. Furthermore, metric and label names that would have previously been flagged as invalid no longer will be. Users wishing to preserve the original validation behavior can update their -prometheus yaml configuration to specify the legacy validation scheme: +Prometheus yaml configuration to specify the legacy validation scheme: ``` global: @@ -159,7 +159,7 @@ time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.co ### `le` and `quantile` label values In Prometheus v3, the values of the `le` label of classic histograms and the -`quantile` label of summaries are normalized upon ingestions. In Prometheus v2 +`quantile` label of summaries are normalized upon ingestion. In Prometheus v2 the value of these labels depended on the scrape protocol (protobuf vs text format) in some situations. This led to label values changing based on the scrape protocol. E.g. a metric exposed as `my_classic_hist{le="1"}` would be From 5c05c89b8b57a50ef59aedd7115b6b14d0b447e8 Mon Sep 17 00:00:00 2001 From: Julien Date: Tue, 5 Nov 2024 11:48:03 +0100 Subject: [PATCH 0892/1397] Update prometheus/common Signed-off-by: Julien --- go.mod | 2 +- go.sum | 4 ++-- util/logging/file_test.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 3399ffb002..596d1449bc 100644 --- a/go.mod +++ b/go.mod @@ -52,7 +52,7 @@ require ( github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.60.0 + github.com/prometheus/common v0.60.1 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.13.0 diff --git a/go.sum b/go.sum index 1dce748ba7..7c5dcfba82 100644 --- a/go.sum +++ b/go.sum @@ -513,8 +513,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= -github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= diff --git a/util/logging/file_test.go b/util/logging/file_test.go index 8ab4754339..00752df8db 100644 --- a/util/logging/file_test.go +++ b/util/logging/file_test.go @@ -40,7 +40,7 @@ func TestJSONFileLogger_basic(t *testing.T) { _, err = f.Read(r) require.NoError(t, err) - result, err := regexp.Match(`^{"time":"[^"]+","level":"INFO","source":\{.+\},"msg":"test","hello":"world"}\n`, r) + result, err := regexp.Match(`^{"time":"[^"]+","level":"INFO","source":"file.go:\d+","msg":"test","hello":"world"}\n`, r) require.NoError(t, err) require.True(t, result, "unexpected content: %s", r) From 3aab02c63bc742ffa70539967439765fd622dc4b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Nov 2024 09:08:53 +0000 Subject: [PATCH 0893/1397] chore(deps): bump github.com/prometheus/exporter-toolkit Bumps [github.com/prometheus/exporter-toolkit](https://github.com/prometheus/exporter-toolkit) from 0.13.0 to 0.13.1. - [Release notes](https://github.com/prometheus/exporter-toolkit/releases) - [Changelog](https://github.com/prometheus/exporter-toolkit/blob/master/CHANGELOG.md) - [Commits](https://github.com/prometheus/exporter-toolkit/compare/v0.13.0...v0.13.1) --- updated-dependencies: - dependency-name: github.com/prometheus/exporter-toolkit dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b1f18bd48e..f568fa0c69 100644 --- a/go.mod +++ b/go.mod @@ -55,7 +55,7 @@ require ( github.com/prometheus/common v0.60.1 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 - github.com/prometheus/exporter-toolkit v0.13.0 + github.com/prometheus/exporter-toolkit v0.13.1 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 diff --git a/go.sum b/go.sum index 5c492d35d1..66086e22fd 100644 --- a/go.sum +++ b/go.sum @@ -525,8 +525,8 @@ github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/ github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.13.0 h1:lmA0Q+8IaXgmFRKw09RldZmZdnvu9wwcDLIXGmTPw1c= -github.com/prometheus/exporter-toolkit v0.13.0/go.mod h1:2uop99EZl80KdXhv/MxVI2181fMcwlsumFOqBecGkG0= +github.com/prometheus/exporter-toolkit v0.13.1 h1:Evsh0gWQo2bdOHlnz9+0Nm7/OFfIwhE2Ws4A2jIlR04= +github.com/prometheus/exporter-toolkit v0.13.1/go.mod h1:ujdv2YIOxtdFxxqtloLpbqmxd5J0Le6IITUvIRSWjj0= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= From 6b09f094e120931a66defee73bdb913c28a34624 Mon Sep 17 00:00:00 2001 From: alexgreenbank Date: Thu, 7 Nov 2024 11:30:03 +0000 Subject: [PATCH 0894/1397] scrape: stop erroring on empty scrapes Signed-off-by: alexgreenbank --- scrape/scrape.go | 48 ++++++++++------- scrape/scrape_test.go | 119 +++++++++++++++++++++++++++--------------- 2 files changed, 108 insertions(+), 59 deletions(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index 7e270bb3a3..a668eb465b 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1552,7 +1552,34 @@ type appendErrors struct { numExemplarOutOfOrder int } +// Update the stale markers. +func (sl *scrapeLoop) updateStaleMarkers(app storage.Appender, defTime int64) (err error) { + sl.cache.forEachStale(func(lset labels.Labels) bool { + // Series no longer exposed, mark it stale. + app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true}) + _, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN)) + app.SetOptions(nil) + switch { + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): + // Do not count these in logging, as this is expected if a target + // goes away and comes back again with a new scrape loop. + err = nil + } + return err == nil + }) + return +} + func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { + defTime := timestamp.FromTime(ts) + + if len(b) == 0 { + // Empty scrape. Just update the stale makers and swap the cache (but don't flush it). + err = sl.updateStaleMarkers(app, defTime) + sl.cache.iterDone(false) + return + } + p, err := textparse.New(b, contentType, sl.fallbackScrapeProtocol, sl.alwaysScrapeClassicHist, sl.enableCTZeroIngestion, sl.symbolTable) if p == nil { sl.l.Error( @@ -1574,9 +1601,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, "err", err, ) } - var ( - defTime = timestamp.FromTime(ts) appErrs = appendErrors{} sampleLimitErr error bucketLimitErr error @@ -1617,9 +1642,8 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, if err != nil { return } - // Only perform cache cleaning if the scrape was not empty. - // An empty scrape (usually) is used to indicate a failed scrape. - sl.cache.iterDone(len(b) > 0) + // Flush and swap the cache as the scrape was non-empty. + sl.cache.iterDone(true) }() loop: @@ -1862,19 +1886,7 @@ loop: sl.l.Warn("Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder) } if err == nil { - sl.cache.forEachStale(func(lset labels.Labels) bool { - // Series no longer exposed, mark it stale. - app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true}) - _, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN)) - app.SetOptions(nil) - switch { - case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): - // Do not count these in logging, as this is expected if a target - // goes away and comes back again with a new scrape loop. - err = nil - } - return err == nil - }) + err = sl.updateStaleMarkers(app, defTime) } return } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index f75e1db89a..9ec1980677 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -120,13 +120,13 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde timestampInorder2 := now.Add(5 * time.Minute) slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "", timestampInorder1) + _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "text/plain", timestampInorder1) require.NoError(t, err) - _, _, _, err = sl.append(slApp, []byte(`metric_a{a="1",b="1"} 2`), "", timestampOutOfOrder) + _, _, _, err = sl.append(slApp, []byte(`metric_a{a="1",b="1"} 2`), "text/plain", timestampOutOfOrder) require.NoError(t, err) - _, _, _, err = sl.append(slApp, []byte(`metric_a{a="1",b="1"} 3`), "", timestampInorder2) + _, _, _, err = sl.append(slApp, []byte(`metric_a{a="1",b="1"} 3`), "text/plain", timestampInorder2) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -756,6 +756,10 @@ func TestScrapePoolScrapeLoopsStarted(t *testing.T) { } func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app func(ctx context.Context) storage.Appender, interval time.Duration) *scrapeLoop { + return newBasicScrapeLoopWithFallback(t, ctx, scraper, app, interval, "") +} + +func newBasicScrapeLoopWithFallback(t testing.TB, ctx context.Context, scraper scraper, app func(ctx context.Context) storage.Appender, interval time.Duration, fallback string) *scrapeLoop { return newScrapeLoop(ctx, scraper, nil, nil, @@ -783,7 +787,7 @@ func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app newTestScrapeMetrics(t), false, model.LegacyValidation, - "text/plain", + fallback, ) } @@ -844,7 +848,8 @@ func TestScrapeLoopStop(t *testing.T) { app = func(ctx context.Context) storage.Appender { return appender } ) - sl := newBasicScrapeLoop(t, context.Background(), scraper, app, 10*time.Millisecond) + // Since we're writing samples directly below we need to provide a protocol fallback. + sl := newBasicScrapeLoopWithFallback(t, context.Background(), scraper, app, 10*time.Millisecond, "text/plain") // Terminate loop after 2 scrapes. numScrapes := 0 @@ -928,7 +933,7 @@ func TestScrapeLoopRun(t *testing.T) { scrapeMetrics, false, model.LegacyValidation, - "text/plain", + "", ) // The loop must terminate during the initial offset if the context @@ -1075,7 +1080,7 @@ func TestScrapeLoopMetadata(t *testing.T) { scrapeMetrics, false, model.LegacyValidation, - "text/plain", + "", ) defer cancel() @@ -1126,7 +1131,7 @@ func TestScrapeLoopSeriesAdded(t *testing.T) { ctx, sl := simpleTestScrapeLoop(t) slApp := sl.appender(ctx) - total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{}) + total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "text/plain", time.Time{}) require.NoError(t, err) require.NoError(t, slApp.Commit()) require.Equal(t, 1, total) @@ -1134,7 +1139,7 @@ func TestScrapeLoopSeriesAdded(t *testing.T) { require.Equal(t, 1, seriesAdded) slApp = sl.appender(ctx) - total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{}) + total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\n"), "text/plain", time.Time{}) require.NoError(t, slApp.Commit()) require.NoError(t, err) require.Equal(t, 1, total) @@ -1164,7 +1169,7 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) { } slApp := sl.appender(ctx) - total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{}) + total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "text/plain", time.Time{}) require.ErrorContains(t, err, "invalid metric name or label names") require.NoError(t, slApp.Rollback()) require.Equal(t, 1, total) @@ -1188,7 +1193,7 @@ func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) { sl.validationScheme = model.LegacyValidation slApp := sl.appender(ctx) - total, added, seriesAdded, err := sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "", time.Time{}) + total, added, seriesAdded, err := sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "text/plain", time.Time{}) require.ErrorContains(t, err, "invalid metric name or label names") require.NoError(t, slApp.Rollback()) require.Equal(t, 1, total) @@ -1199,7 +1204,7 @@ func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) { sl.validationScheme = model.UTF8Validation slApp = sl.appender(ctx) - total, added, seriesAdded, err = sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "", time.Time{}) + total, added, seriesAdded, err = sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "text/plain", time.Time{}) require.NoError(t, err) require.Equal(t, 1, total) require.Equal(t, 1, added) @@ -1229,7 +1234,7 @@ func BenchmarkScrapeLoopAppend(b *testing.B) { for i := 0; i < b.N; i++ { ts = ts.Add(time.Second) - _, _, _, _ = sl.append(slApp, metrics, "", ts) + _, _, _, _ = sl.append(slApp, metrics, "text/plain", ts) } } @@ -1338,7 +1343,8 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + // Since we're writing samples directly below we need to provide a protocol fallback. + sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain") // Succeed once, several failures, then stop. numScrapes := 0 @@ -1384,7 +1390,8 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + // Since we're writing samples directly below we need to provide a protocol fallback. + sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain") // Succeed once, several failures, then stop. scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { @@ -1435,7 +1442,8 @@ func TestScrapeLoopCache(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) // Decreasing the scrape interval could make the test fail, as multiple scrapes might be initiated at identical millisecond timestamps. // See https://github.com/prometheus/prometheus/issues/12727. - sl := newBasicScrapeLoop(t, ctx, scraper, app, 100*time.Millisecond) + // Since we're writing samples directly below we need to provide a protocol fallback. + sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 100*time.Millisecond, "text/plain") numScrapes := 0 @@ -1600,7 +1608,7 @@ func TestScrapeLoopAppend(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "", now) + _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -1681,7 +1689,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil) } slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) + _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "text/plain", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -1719,7 +1727,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, metric, "", now) + _, _, _, err := sl.append(slApp, metric, "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -1756,7 +1764,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - total, added, seriesAdded, err := sl.append(app, []byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), "", now) + total, added, seriesAdded, err := sl.append(app, []byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), "text/plain", now) require.ErrorIs(t, err, errSampleLimit) require.NoError(t, slApp.Rollback()) require.Equal(t, 3, total) @@ -1785,7 +1793,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { now = time.Now() slApp = sl.appender(context.Background()) - total, added, seriesAdded, err = sl.append(slApp, []byte("metric_a 1\nmetric_b 1\nmetric_c{deleteme=\"yes\"} 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h{deleteme=\"yes\"} 1\nmetric_i{deleteme=\"yes\"} 1\n"), "", now) + total, added, seriesAdded, err = sl.append(slApp, []byte("metric_a 1\nmetric_b 1\nmetric_c{deleteme=\"yes\"} 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h{deleteme=\"yes\"} 1\nmetric_i{deleteme=\"yes\"} 1\n"), "text/plain", now) require.ErrorIs(t, err, errSampleLimit) require.NoError(t, slApp.Rollback()) require.Equal(t, 9, total) @@ -1913,12 +1921,12 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "", now) + _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) slApp = sl.appender(context.Background()) - _, _, _, err = sl.append(slApp, []byte(`metric_a{b="1",a="1"} 2`), "", now.Add(time.Minute)) + _, _, _, err = sl.append(slApp, []byte(`metric_a{b="1",a="1"} 2`), "text/plain", now.Add(time.Minute)) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -1937,6 +1945,33 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) { require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender) } +func TestScrapeLoopAppendFailsWithNoContentType(t *testing.T) { + app := &collectResultAppender{} + + // Explicitly setting the lack of fallback protocol here to make it obvious. + sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0, "") + + now := time.Now() + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte("metric_a 1\n"), "", now) + // We expect the appropriate error. + require.ErrorContains(t, err, "non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target", "Expected \"non-compliant scrape\" error but got: %s", err) +} + +func TestScrapeLoopAppendEmptyWithNoContentType(t *testing.T) { + // This test ensures we there are no errors when we get a blank scrape or just want to append a stale marker. + app := &collectResultAppender{} + + // Explicitly setting the lack of fallback protocol here to make it obvious. + sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0, "") + + now := time.Now() + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte(""), "", now) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) +} + func TestScrapeLoopAppendStaleness(t *testing.T) { app := &collectResultAppender{} @@ -1944,7 +1979,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte("metric_a 1\n"), "", now) + _, _, _, err := sl.append(slApp, []byte("metric_a 1\n"), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -1973,7 +2008,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "", now) + _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -1999,7 +2034,7 @@ func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "", now) + _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -2519,7 +2554,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T now := time.Unix(1, 0) slApp := sl.appender(context.Background()) - total, added, seriesAdded, err := sl.append(slApp, []byte("out_of_order 1\namend 1\nnormal 1\nout_of_bounds 1\n"), "", now) + total, added, seriesAdded, err := sl.append(slApp, []byte("out_of_order 1\namend 1\nnormal 1\nout_of_bounds 1\n"), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -2550,7 +2585,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) { now := time.Now().Add(20 * time.Minute) slApp := sl.appender(context.Background()) - total, added, seriesAdded, err := sl.append(slApp, []byte("normal 1\n"), "", now) + total, added, seriesAdded, err := sl.append(slApp, []byte("normal 1\n"), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) require.Equal(t, 1, total) @@ -2850,7 +2885,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "", now) + _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -2877,7 +2912,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) { now := time.Now() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "", now) + _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "text/plain", now) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -2901,7 +2936,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { // We add a good and a bad metric to check that both are discarded. slApp := sl.appender(ctx) - _, _, _, err := sl.append(slApp, []byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "", time.Time{}) + _, _, _, err := sl.append(slApp, []byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "text/plain", time.Time{}) require.Error(t, err) require.NoError(t, slApp.Rollback()) // We need to cycle staleness cache maps after a manual rollback. Otherwise they will have old entries in them, @@ -2916,7 +2951,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { // We add a good metric to check that it is recorded. slApp = sl.appender(ctx) - _, _, _, err = sl.append(slApp, []byte("test_metric{le=\"500\"} 1\n"), "", time.Time{}) + _, _, _, err = sl.append(slApp, []byte("test_metric{le=\"500\"} 1\n"), "text/plain", time.Time{}) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -2945,7 +2980,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { defer cancel() slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte("nok 1\nnok2{drop=\"drop\"} 1\n"), "", time.Time{}) + _, _, _, err := sl.append(slApp, []byte("nok 1\nnok2{drop=\"drop\"} 1\n"), "text/plain", time.Time{}) require.Error(t, err) require.NoError(t, slApp.Rollback()) require.Equal(t, errNameLabelMandatory, err) @@ -3191,7 +3226,7 @@ func TestScrapeAddFast(t *testing.T) { defer cancel() slApp := sl.appender(ctx) - _, _, _, err := sl.append(slApp, []byte("up 1\n"), "", time.Time{}) + _, _, _, err := sl.append(slApp, []byte("up 1\n"), "text/plain", time.Time{}) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -3202,7 +3237,7 @@ func TestScrapeAddFast(t *testing.T) { } slApp = sl.appender(ctx) - _, _, _, err = sl.append(slApp, []byte("up 1\n"), "", time.Time{}.Add(time.Second)) + _, _, _, err = sl.append(slApp, []byte("up 1\n"), "text/plain", time.Time{}.Add(time.Second)) require.NoError(t, err) require.NoError(t, slApp.Commit()) } @@ -3257,7 +3292,8 @@ func TestScrapeReportSingleAppender(t *testing.T) { ) ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, scraper, s.Appender, 10*time.Millisecond) + // Since we're writing samples directly below we need to provide a protocol fallback. + sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, s.Appender, 10*time.Millisecond, "text/plain") numScrapes := 0 @@ -3484,7 +3520,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) { sl.labelLimits = &test.labelLimits slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "", time.Now()) + _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "text/plain", time.Now()) t.Logf("Test:%s", test.title) if test.expectErr { @@ -4176,7 +4212,8 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t * ) ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) + // Since we're writing samples directly below we need to provide a protocol fallback. + sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain") sl.trackTimestampsStaleness = true // Succeed once, several failures, then stop. numScrapes := 0 @@ -4421,7 +4458,7 @@ func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) { ctx, sl := simpleTestScrapeLoop(t) slApp := sl.appender(ctx) - total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\ntest_metric 2\ntest_metric 3\n"), "", time.Time{}) + total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\ntest_metric 2\ntest_metric 3\n"), "text/plain", time.Time{}) require.NoError(t, err) require.NoError(t, slApp.Commit()) require.Equal(t, 3, total) @@ -4430,7 +4467,7 @@ func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) { require.Equal(t, 2.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate)) slApp = sl.appender(ctx) - total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\ntest_metric 1\ntest_metric 1\n"), "", time.Time{}) + total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\ntest_metric 1\ntest_metric 1\n"), "text/plain", time.Time{}) require.NoError(t, err) require.NoError(t, slApp.Commit()) require.Equal(t, 3, total) @@ -4440,7 +4477,7 @@ func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) { // When different timestamps are supplied, multiple samples are accepted. slApp = sl.appender(ctx) - total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1 1001\ntest_metric 1 1002\ntest_metric 1 1003\n"), "", time.Time{}) + total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1 1001\ntest_metric 1 1002\ntest_metric 1 1003\n"), "text/plain", time.Time{}) require.NoError(t, err) require.NoError(t, slApp.Commit()) require.Equal(t, 3, total) From e7c0d21a8b9c3f407ed66c40775630961ffdc46d Mon Sep 17 00:00:00 2001 From: alexgreenbank Date: Thu, 7 Nov 2024 15:43:45 +0000 Subject: [PATCH 0895/1397] add CHANGELOG entry Signed-off-by: alexgreenbank --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 88d24adeab..43c0aa371e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## unreleased +* [BUGFIX] Scraping: Don't log errors on empty scrapes. #15357 + ## 3.0.0-rc.0 / 2024-10-31 * [CHANGE] Scraping: Remove implicit fallback to the Prometheus text format in case of invalid/missing Content-Type and fail the scrape instead. Add ability to specify a `fallback_scrape_protocol` in the scrape config. #15136 From f9bc50b24739235906b6f97223a7e5f668c90a9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=F0=9F=8C=B2=20Harry=20=F0=9F=8C=8A=20John=20=F0=9F=8F=94?= Date: Thu, 7 Nov 2024 08:52:55 -0800 Subject: [PATCH 0896/1397] storage: Implement limit in mergeGenericQuerier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 🌲 Harry 🌊 John 🏔 --- cmd/promtool/tsdb.go | 2 +- storage/merge.go | 104 +++++++++++++++++++++++------------------- storage/merge_test.go | 66 +++++++++++++++++++++++++-- tsdb/compact.go | 2 +- tsdb/querier_test.go | 2 +- web/api/v1/api.go | 2 +- web/federate.go | 2 +- 7 files changed, 123 insertions(+), 57 deletions(-) diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 847ea6be0c..87ea6e5be0 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -733,7 +733,7 @@ func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt i for _, mset := range matcherSets { sets = append(sets, q.Select(ctx, true, nil, mset...)) } - ss = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) + ss = storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge) } else { ss = q.Select(ctx, false, nil, matcherSets[0]...) } diff --git a/storage/merge.go b/storage/merge.go index a4d0934b16..1953d5df09 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -19,7 +19,6 @@ import ( "context" "fmt" "math" - "slices" "sync" "github.com/prometheus/prometheus/model/histogram" @@ -136,13 +135,17 @@ func filterChunkQueriers(qs []ChunkQuerier) []ChunkQuerier { // Select returns a set of series that matches the given label matchers. func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) + var limit int + if hints != nil { + limit = hints.Limit + } if !q.concurrentSelect { for _, querier := range q.queriers { // We need to sort for merge to work. seriesSets = append(seriesSets, querier.Select(ctx, true, hints, matchers...)) } return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) { - s := newGenericMergeSeriesSet(seriesSets, q.mergeFn) + s := newGenericMergeSeriesSet(seriesSets, limit, q.mergeFn) return s, s.Next() }} } @@ -175,7 +178,7 @@ func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints seriesSets = append(seriesSets, r) } return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) { - s := newGenericMergeSeriesSet(seriesSets, q.mergeFn) + s := newGenericMergeSeriesSet(seriesSets, limit, q.mergeFn) return s, s.Next() }} } @@ -193,35 +196,44 @@ func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQ // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - res, ws, err := q.lvals(ctx, q.queriers, name, hints, matchers...) + res, ws, err := q.mergeResults(q.queriers, hints, func(q LabelQuerier) ([]string, annotations.Annotations, error) { + return q.LabelValues(ctx, name, hints, matchers...) + }) if err != nil { return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err) } return res, ws, nil } -// lvals performs merge sort for LabelValues from multiple queriers. -func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +// mergeResults performs merge sort on the results of invoking the resultsFn against multiple queriers. +func (q *mergeGenericQuerier) mergeResults(lq labelGenericQueriers, hints *LabelHints, resultsFn func(q LabelQuerier) ([]string, annotations.Annotations, error)) ([]string, annotations.Annotations, error) { if lq.Len() == 0 { return nil, nil, nil } if lq.Len() == 1 { - return lq.Get(0).LabelValues(ctx, n, hints, matchers...) + return resultsFn(lq.Get(0)) } a, b := lq.SplitByHalf() var ws annotations.Annotations - s1, w, err := q.lvals(ctx, a, n, hints, matchers...) + s1, w, err := q.mergeResults(a, hints, resultsFn) ws.Merge(w) if err != nil { return nil, ws, err } - s2, ws, err := q.lvals(ctx, b, n, hints, matchers...) + s2, w, err := q.mergeResults(b, hints, resultsFn) ws.Merge(w) if err != nil { return nil, ws, err } - return mergeStrings(s1, s2), ws, nil + + s1 = truncateToLimit(s1, hints) + s2 = truncateToLimit(s2, hints) + + merged := mergeStrings(s1, s2) + merged = truncateToLimit(merged, hints) + + return merged, ws, nil } func mergeStrings(a, b []string) []string { @@ -253,33 +265,13 @@ func mergeStrings(a, b []string) []string { // LabelNames returns all the unique label names present in all queriers in sorted order. func (q *mergeGenericQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - var ( - labelNamesMap = make(map[string]struct{}) - warnings annotations.Annotations - ) - for _, querier := range q.queriers { - names, wrn, err := querier.LabelNames(ctx, hints, matchers...) - if wrn != nil { - // TODO(bwplotka): We could potentially wrap warnings. - warnings.Merge(wrn) - } - if err != nil { - return nil, nil, fmt.Errorf("LabelNames() from merge generic querier: %w", err) - } - for _, name := range names { - labelNamesMap[name] = struct{}{} - } + res, ws, err := q.mergeResults(q.queriers, hints, func(q LabelQuerier) ([]string, annotations.Annotations, error) { + return q.LabelNames(ctx, hints, matchers...) + }) + if err != nil { + return nil, nil, fmt.Errorf("LabelNames() from merge generic querier: %w", err) } - if len(labelNamesMap) == 0 { - return nil, warnings, nil - } - - labelNames := make([]string, 0, len(labelNamesMap)) - for name := range labelNamesMap { - labelNames = append(labelNames, name) - } - slices.Sort(labelNames) - return labelNames, warnings, nil + return res, ws, nil } // Close releases the resources of the generic querier. @@ -293,17 +285,25 @@ func (q *mergeGenericQuerier) Close() error { return errs.Err() } +func truncateToLimit(s []string, hints *LabelHints) []string { + if hints != nil && hints.Limit > 0 && len(s) > hints.Limit { + s = s[:hints.Limit] + } + return s +} + // VerticalSeriesMergeFunc returns merged series implementation that merges series with same labels together. // It has to handle time-overlapped series as well. type VerticalSeriesMergeFunc func(...Series) Series // NewMergeSeriesSet returns a new SeriesSet that merges many SeriesSets together. -func NewMergeSeriesSet(sets []SeriesSet, mergeFunc VerticalSeriesMergeFunc) SeriesSet { +// If limit is set, the SeriesSet will be limited up-to the limit. 0 means disabled. +func NewMergeSeriesSet(sets []SeriesSet, limit int, mergeFunc VerticalSeriesMergeFunc) SeriesSet { genericSets := make([]genericSeriesSet, 0, len(sets)) for _, s := range sets { genericSets = append(genericSets, &genericSeriesSetAdapter{s}) } - return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge)} + return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, limit, (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge)} } // VerticalChunkSeriesMergeFunc returns merged chunk series implementation that merges potentially time-overlapping @@ -313,12 +313,12 @@ func NewMergeSeriesSet(sets []SeriesSet, mergeFunc VerticalSeriesMergeFunc) Seri type VerticalChunkSeriesMergeFunc func(...ChunkSeries) ChunkSeries // NewMergeChunkSeriesSet returns a new ChunkSeriesSet that merges many SeriesSet together. -func NewMergeChunkSeriesSet(sets []ChunkSeriesSet, mergeFunc VerticalChunkSeriesMergeFunc) ChunkSeriesSet { +func NewMergeChunkSeriesSet(sets []ChunkSeriesSet, limit int, mergeFunc VerticalChunkSeriesMergeFunc) ChunkSeriesSet { genericSets := make([]genericSeriesSet, 0, len(sets)) for _, s := range sets { genericSets = append(genericSets, &genericChunkSeriesSetAdapter{s}) } - return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFunc}).Merge)} + return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, limit, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFunc}).Merge)} } // genericMergeSeriesSet implements genericSeriesSet. @@ -326,9 +326,11 @@ type genericMergeSeriesSet struct { currentLabels labels.Labels mergeFunc genericSeriesMergeFunc - heap genericSeriesSetHeap - sets []genericSeriesSet - currentSets []genericSeriesSet + heap genericSeriesSetHeap + sets []genericSeriesSet + currentSets []genericSeriesSet + seriesLimit int + mergedSeries int // tracks the total number of series merged and returned. } // newGenericMergeSeriesSet returns a new genericSeriesSet that merges (and deduplicates) @@ -336,7 +338,8 @@ type genericMergeSeriesSet struct { // Each series set must return its series in labels order, otherwise // merged series set will be incorrect. // Overlapped situations are merged using provided mergeFunc. -func newGenericMergeSeriesSet(sets []genericSeriesSet, mergeFunc genericSeriesMergeFunc) genericSeriesSet { +// If seriesLimit is set, only limited series are returned. +func newGenericMergeSeriesSet(sets []genericSeriesSet, seriesLimit int, mergeFunc genericSeriesMergeFunc) genericSeriesSet { if len(sets) == 1 { return sets[0] } @@ -356,13 +359,19 @@ func newGenericMergeSeriesSet(sets []genericSeriesSet, mergeFunc genericSeriesMe } } return &genericMergeSeriesSet{ - mergeFunc: mergeFunc, - sets: sets, - heap: h, + mergeFunc: mergeFunc, + sets: sets, + heap: h, + seriesLimit: seriesLimit, } } func (c *genericMergeSeriesSet) Next() bool { + if c.seriesLimit > 0 && c.mergedSeries >= c.seriesLimit { + // Exit early if seriesLimit is set. + return false + } + // Run in a loop because the "next" series sets may not be valid anymore. // If, for the current label set, all the next series sets come from // failed remote storage sources, we want to keep trying with the next label set. @@ -393,6 +402,7 @@ func (c *genericMergeSeriesSet) Next() bool { break } } + c.mergedSeries++ return true } diff --git a/storage/merge_test.go b/storage/merge_test.go index b145743c86..04d4e92078 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -1345,7 +1345,7 @@ func makeMergeSeriesSet(serieses [][]Series) SeriesSet { for i, s := range serieses { seriesSets[i] = &genericSeriesSetAdapter{NewMockSeriesSet(s...)} } - return &seriesSetAdapter{newGenericMergeSeriesSet(seriesSets, (&seriesMergerAdapter{VerticalSeriesMergeFunc: ChainedSeriesMerge}).Merge)} + return &seriesSetAdapter{newGenericMergeSeriesSet(seriesSets, 0, (&seriesMergerAdapter{VerticalSeriesMergeFunc: ChainedSeriesMerge}).Merge)} } func benchmarkDrain(b *testing.B, makeSeriesSet func() SeriesSet) { @@ -1390,6 +1390,34 @@ func BenchmarkMergeSeriesSet(b *testing.B) { } } +func BenchmarkMergeLabelValuesWithLimit(b *testing.B) { + var queriers []genericQuerier + + for i := 0; i < 5; i++ { + var lbls []string + for j := 0; j < 100000; j++ { + lbls = append(lbls, fmt.Sprintf("querier_%d_label_%d", i, j)) + } + q := &mockQuerier{resp: lbls} + queriers = append(queriers, newGenericQuerierFrom(q)) + } + + mergeQuerier := &mergeGenericQuerier{ + queriers: queriers, // Assume querying 5 blocks. + mergeFn: func(l ...Labels) Labels { + return l[0] + }, + } + + b.Run("benchmark", func(b *testing.B) { + ctx := context.Background() + hints := &LabelHints{ + Limit: 1000, + } + mergeQuerier.LabelValues(ctx, "name", hints) + }) +} + func visitMockQueriers(t *testing.T, qr Querier, f func(t *testing.T, q *mockQuerier)) int { count := 0 switch x := qr.(type) { @@ -1428,6 +1456,7 @@ func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) { name string primaries []Querier secondaries []Querier + limit int expectedSelectsSeries []labels.Labels expectedLabels []string @@ -1553,12 +1582,39 @@ func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) { expectedLabels: []string{"a", "b"}, expectedWarnings: annotations.New().Add(warnStorage), }, + { + name: "successful queriers with limit", + primaries: []Querier{ + &mockQuerier{resp: []string{"a", "d"}, warnings: annotations.New().Add(warnStorage), err: nil}, + }, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b", "c"}, warnings: annotations.New().Add(warnStorage), err: nil}, + }, + limit: 2, + expectedSelectsSeries: []labels.Labels{ + labels.FromStrings("test", "a"), + labels.FromStrings("test", "b"), + }, + expectedLabels: []string{"a", "b"}, + expectedWarnings: annotations.New().Add(warnStorage), + }, } { + var labelHints *LabelHints + var selectHints *SelectHints + if tcase.limit > 0 { + labelHints = &LabelHints{ + Limit: tcase.limit, + } + selectHints = &SelectHints{ + Limit: tcase.limit, + } + } + t.Run(tcase.name, func(t *testing.T) { q := NewMergeQuerier(tcase.primaries, tcase.secondaries, func(s ...Series) Series { return s[0] }) t.Run("Select", func(t *testing.T) { - res := q.Select(context.Background(), false, nil) + res := q.Select(context.Background(), false, selectHints) var lbls []labels.Labels for res.Next() { lbls = append(lbls, res.At().Labels()) @@ -1577,7 +1633,7 @@ func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) { require.Equal(t, len(tcase.primaries)+len(tcase.secondaries), n) }) t.Run("LabelNames", func(t *testing.T) { - res, w, err := q.LabelNames(ctx, nil) + res, w, err := q.LabelNames(ctx, labelHints) require.Subset(t, tcase.expectedWarnings, w) require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match") requireEqualSlice(t, tcase.expectedLabels, res) @@ -1590,7 +1646,7 @@ func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) { }) }) t.Run("LabelValues", func(t *testing.T) { - res, w, err := q.LabelValues(ctx, "test", nil) + res, w, err := q.LabelValues(ctx, "test", labelHints) require.Subset(t, tcase.expectedWarnings, w) require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match") requireEqualSlice(t, tcase.expectedLabels, res) @@ -1604,7 +1660,7 @@ func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) { }) t.Run("LabelValuesWithMatchers", func(t *testing.T) { matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue") - res, w, err := q.LabelValues(ctx, "test2", nil, matcher) + res, w, err := q.LabelValues(ctx, "test2", labelHints, matcher) require.Subset(t, tcase.expectedWarnings, w) require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match") requireEqualSlice(t, tcase.expectedLabels, res) diff --git a/tsdb/compact.go b/tsdb/compact.go index 17374531d4..e27de9093b 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -831,7 +831,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa if len(sets) > 1 { // Merge series using specified chunk series merger. // The default one is the compacting series merger. - set = storage.NewMergeChunkSeriesSet(sets, mergeFunc) + set = storage.NewMergeChunkSeriesSet(sets, 0, mergeFunc) } // Iterate over all sorted chunk series. diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 2d66102bfe..04800fe9c4 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -2030,7 +2030,7 @@ func TestPopulateWithDelSeriesIterator_NextWithMinTime(t *testing.T) { // TODO(bwplotka): Merge with storage merged series set benchmark. func BenchmarkMergedSeriesSet(b *testing.B) { sel := func(sets []storage.SeriesSet) storage.SeriesSet { - return storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) + return storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge) } for _, k := range []int{ diff --git a/web/api/v1/api.go b/web/api/v1/api.go index c4acafab6c..85d48f28ff 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -917,7 +917,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { s := q.Select(ctx, true, hints, mset...) sets = append(sets, s) } - set = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) + set = storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge) } else { // At this point at least one match exists. set = q.Select(ctx, false, hints, matcherSets[0]...) diff --git a/web/federate.go b/web/federate.go index 8e20a60f0f..a1513e46c1 100644 --- a/web/federate.go +++ b/web/federate.go @@ -100,7 +100,7 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) { sets = append(sets, s) } - set := storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) + set := storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge) it := storage.NewBuffer(int64(h.lookbackDelta / 1e6)) var chkIter chunkenc.Iterator Loop: From 3f6a133f2636a7ccb36efac50102dd7bd5f287f5 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 10 Apr 2024 10:38:15 +0100 Subject: [PATCH 0897/1397] [Test] Scraping: check reload metrics Need to extend `newTestScrapeMetrics`` to get at the Registry. `gatherLabels` function could go upstream to `client_golang`. Signed-off-by: Bryan Boreham --- scrape/scrape_test.go | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 02a31b7627..2aaf5e8e4f 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -65,10 +65,15 @@ func TestMain(m *testing.M) { testutil.TolerantVerifyLeak(m) } -func newTestScrapeMetrics(t testing.TB) *scrapeMetrics { +func newTestRegistryAndScrapeMetrics(t testing.TB) (*prometheus.Registry, *scrapeMetrics) { reg := prometheus.NewRegistry() metrics, err := newScrapeMetrics(reg) require.NoError(t, err) + return reg, metrics +} + +func newTestScrapeMetrics(t testing.TB) *scrapeMetrics { + _, metrics := newTestRegistryAndScrapeMetrics(t) return metrics } @@ -370,6 +375,7 @@ func TestScrapePoolReload(t *testing.T) { return l } + reg, metrics := newTestRegistryAndScrapeMetrics(t) sp := &scrapePool{ appendable: &nopAppendable{}, activeTargets: map[uint64]*Target{}, @@ -377,7 +383,7 @@ func TestScrapePoolReload(t *testing.T) { newLoop: newLoop, logger: nil, client: http.DefaultClient, - metrics: newTestScrapeMetrics(t), + metrics: metrics, symbolTable: labels.NewSymbolTable(), } @@ -432,6 +438,12 @@ func TestScrapePoolReload(t *testing.T) { require.Equal(t, sp.activeTargets, beforeTargets, "Reloading affected target states unexpectedly") require.Len(t, sp.loops, numTargets, "Unexpected number of stopped loops after reload") + + got, err := gatherLabels(reg, "prometheus_target_reload_length_seconds") + require.NoError(t, err) + expectedName, expectedValue := "interval", "3s" + require.Equal(t, [][]*dto.LabelPair{{{Name: &expectedName, Value: &expectedValue}}}, got) + require.Equal(t, 1.0, prom_testutil.ToFloat64(sp.metrics.targetScrapePoolReloads)) } func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) { @@ -447,6 +459,7 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) { } return l } + reg, metrics := newTestRegistryAndScrapeMetrics(t) sp := &scrapePool{ appendable: &nopAppendable{}, activeTargets: map[uint64]*Target{ @@ -460,7 +473,7 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) { newLoop: newLoop, logger: nil, client: http.DefaultClient, - metrics: newTestScrapeMetrics(t), + metrics: metrics, symbolTable: labels.NewSymbolTable(), } @@ -468,6 +481,30 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) { if err != nil { t.Fatalf("unable to reload configuration: %s", err) } + // Check that the reload metric is labeled with the pool interval, not the overridden interval. + got, err := gatherLabels(reg, "prometheus_target_reload_length_seconds") + require.NoError(t, err) + expectedName, expectedValue := "interval", "3s" + require.Equal(t, [][]*dto.LabelPair{{{Name: &expectedName, Value: &expectedValue}}}, got) +} + +// Gather metrics from the provided Gatherer with specified familyName, +// and return all sets of name/value pairs. +func gatherLabels(g prometheus.Gatherer, familyName string) ([][]*dto.LabelPair, error) { + families, err := g.Gather() + if err != nil { + return nil, err + } + ret := make([][]*dto.LabelPair, 0) + for _, f := range families { + if f.GetName() == familyName { + for _, m := range f.GetMetric() { + ret = append(ret, m.GetLabel()) + } + break + } + } + return ret, nil } func TestScrapePoolTargetLimit(t *testing.T) { From 6380229c712aec0a17b38c60e7c79982f5bdc3d6 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 10 Apr 2024 15:39:17 +0100 Subject: [PATCH 0898/1397] [REFACTOR] Scraping: rename variables for clarity Instead of shadowing the outer variables, use different names. Signed-off-by: Bryan Boreham --- scrape/scrape.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index c5e3449820..d1b389a77c 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -350,12 +350,12 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { } t := sp.activeTargets[fp] - interval, timeout, err := t.intervalAndTimeout(interval, timeout) + targetInterval, targetTimeout, err := t.intervalAndTimeout(interval, timeout) var ( s = &targetScraper{ Target: t, client: sp.client, - timeout: timeout, + timeout: targetTimeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme), acceptEncodingHeader: acceptEncodingHeader(enableCompression), @@ -373,8 +373,8 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { trackTimestampsStaleness: trackTimestampsStaleness, mrc: mrc, cache: cache, - interval: interval, - timeout: timeout, + interval: targetInterval, + timeout: targetTimeout, validationScheme: validationScheme, fallbackScrapeProtocol: fallbackScrapeProtocol, }) From 405b088d6df572ed87131b6997ede48d0f462089 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 08:50:27 +0000 Subject: [PATCH 0899/1397] chore(deps): bump actions/upload-artifact from 4.4.0 to 4.4.3 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.4.0 to 4.4.3. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/50769540e7f4bd5e21e526ee35c689e35e0d6874...b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/fuzzing.yml | 2 +- .github/workflows/scorecards.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index 80356e45bf..5f1b0f25ce 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -21,7 +21,7 @@ jobs: fuzz-seconds: 600 dry-run: false - name: Upload Crash - uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 if: failure() && steps.build.outcome == 'success' with: name: artifacts diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 440ac8a734..4586f4617e 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -37,7 +37,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # tag=v4.4.0 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # tag=v4.4.3 with: name: SARIF file path: results.sarif From 03995775a7674a0f571abf667b82e07b9d48df24 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Fri, 8 Nov 2024 10:02:48 -0300 Subject: [PATCH 0900/1397] Allow UTF-8 characters in metric and label names as opt-in feature (#15258) * Allow UTF-8 characters in metric and label names as opt-in feature --------- Signed-off-by: Arthur Silva Sens --- config/config.go | 29 ++- config/config_test.go | 62 +++++++ config/testdata/otlp_allow_utf8.bad.yml | 4 + config/testdata/otlp_allow_utf8.good.yml | 2 + .../testdata/otlp_allow_utf8.incompatible.yml | 4 + documentation/examples/prometheus-otlp.yml | 31 ++++ .../prometheus/helpers_from_stdlib.go | 106 +++++++++++ .../prometheus/normalize_label.go | 4 +- .../prometheus/normalize_label_test.go | 27 +-- .../prometheus/normalize_name.go | 60 ++++--- .../prometheus/normalize_name_test.go | 165 ++++++++++-------- .../prometheusremotewrite/helper.go | 6 +- .../prometheusremotewrite/histograms_test.go | 2 +- .../prometheusremotewrite/metrics_to_prw.go | 3 +- .../otlp_to_openmetrics_metadata.go | 4 +- storage/remote/write_handler.go | 1 + 16 files changed, 396 insertions(+), 114 deletions(-) create mode 100644 config/testdata/otlp_allow_utf8.bad.yml create mode 100644 config/testdata/otlp_allow_utf8.good.yml create mode 100644 config/testdata/otlp_allow_utf8.incompatible.yml create mode 100644 documentation/examples/prometheus-otlp.yml create mode 100644 storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go diff --git a/config/config.go b/config/config.go index 7fb77b0e6b..ef3ea5e67e 100644 --- a/config/config.go +++ b/config/config.go @@ -106,6 +106,18 @@ func Load(s string, logger *slog.Logger) (*Config, error) { if !b.Labels().IsEmpty() { cfg.GlobalConfig.ExternalLabels = b.Labels() } + + switch cfg.OTLPConfig.TranslationStrategy { + case UnderscoreEscapingWithSuffixes: + case "": + case NoUTF8EscapingWithSuffixes: + if cfg.GlobalConfig.MetricNameValidationScheme == LegacyValidationConfig { + return nil, errors.New("OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled") + } + default: + return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy) + } + return cfg, nil } @@ -239,7 +251,9 @@ var ( } // DefaultOTLPConfig is the default OTLP configuration. - DefaultOTLPConfig = OTLPConfig{} + DefaultOTLPConfig = OTLPConfig{ + TranslationStrategy: UnderscoreEscapingWithSuffixes, + } ) // Config is the top-level configuration for Prometheus's config files. @@ -1402,9 +1416,20 @@ func getGoGCEnv() int { return DefaultRuntimeConfig.GoGC } +type translationStrategyOption string + +var ( + // NoUTF8EscapingWithSuffixes will keep UTF-8 characters as they are, units and type suffixes will still be added. + NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes" + // UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus. + // This option will translate all UTF-8 characters to underscores, while adding units and type suffixes. + UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes" +) + // OTLPConfig is the configuration for writing to the OTLP endpoint. type OTLPConfig struct { - PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` + PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` + TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/config/config_test.go b/config/config_test.go index c3148f93a7..77cbf9b2eb 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -168,6 +168,7 @@ var expectedConf = &Config{ PromoteResourceAttributes: []string{ "k8s.cluster.name", "k8s.job.name", "k8s.namespace.name", }, + TranslationStrategy: UnderscoreEscapingWithSuffixes, }, RemoteReadConfigs: []*RemoteReadConfig{ @@ -1553,6 +1554,67 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) { }) } +func TestOTLPAllowUTF8(t *testing.T) { + t.Run("good config", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml") + verify := func(t *testing.T, conf *Config, err error) { + t.Helper() + require.NoError(t, err) + require.Equal(t, NoUTF8EscapingWithSuffixes, conf.OTLPConfig.TranslationStrategy) + } + + t.Run("LoadFile", func(t *testing.T) { + conf, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, conf, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + conf, err := Load(string(content), promslog.NewNopLogger()) + verify(t, conf, err) + }) + }) + + t.Run("incompatible config", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_allow_utf8.incompatible.yml") + verify := func(t *testing.T, err error) { + t.Helper() + require.ErrorContains(t, err, `OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled`) + } + + t.Run("LoadFile", func(t *testing.T) { + _, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + _, err = Load(string(content), promslog.NewNopLogger()) + t.Log("err", err) + verify(t, err) + }) + }) + + t.Run("bad config", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_allow_utf8.bad.yml") + verify := func(t *testing.T, err error) { + t.Helper() + require.ErrorContains(t, err, `unsupported OTLP translation strategy "Invalid"`) + } + + t.Run("LoadFile", func(t *testing.T) { + _, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + _, err = Load(string(content), promslog.NewNopLogger()) + verify(t, err) + }) + }) +} + func TestLoadConfig(t *testing.T) { // Parse a valid file that sets a global scrape timeout. This tests whether parsing // an overwritten default field in the global config permanently changes the default. diff --git a/config/testdata/otlp_allow_utf8.bad.yml b/config/testdata/otlp_allow_utf8.bad.yml new file mode 100644 index 0000000000..488e4b0558 --- /dev/null +++ b/config/testdata/otlp_allow_utf8.bad.yml @@ -0,0 +1,4 @@ +global: + metric_name_validation_scheme: legacy +otlp: + translation_strategy: Invalid diff --git a/config/testdata/otlp_allow_utf8.good.yml b/config/testdata/otlp_allow_utf8.good.yml new file mode 100644 index 0000000000..f3069d2fdd --- /dev/null +++ b/config/testdata/otlp_allow_utf8.good.yml @@ -0,0 +1,2 @@ +otlp: + translation_strategy: NoUTF8EscapingWithSuffixes diff --git a/config/testdata/otlp_allow_utf8.incompatible.yml b/config/testdata/otlp_allow_utf8.incompatible.yml new file mode 100644 index 0000000000..2625c24131 --- /dev/null +++ b/config/testdata/otlp_allow_utf8.incompatible.yml @@ -0,0 +1,4 @@ +global: + metric_name_validation_scheme: legacy +otlp: + translation_strategy: NoUTF8EscapingWithSuffixes diff --git a/documentation/examples/prometheus-otlp.yml b/documentation/examples/prometheus-otlp.yml new file mode 100644 index 0000000000..f0a8ab8b11 --- /dev/null +++ b/documentation/examples/prometheus-otlp.yml @@ -0,0 +1,31 @@ +# my global config +global: + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + +otlp: + # Recommended attributes to be promoted to labels. + promote_resource_attributes: + - service.instance.id + - service.name + - service.namespace + - cloud.availability_zone + - cloud.region + - container.name + - deployment.environment.name + - k8s.cluster.name + - k8s.container.name + - k8s.cronjob.name + - k8s.daemonset.name + - k8s.deployment.name + - k8s.job.name + - k8s.namespace.name + - k8s.pod.name + - k8s.replicaset.name + - k8s.statefulset.name + # Ingest OTLP data keeping UTF-8 characters in metric/label names. + translation_strategy: NoUTF8EscapingWithSuffixes + +storage: + # OTLP is a push-based protocol, Out of order samples is a common scenario. + tsdb: + out_of_order_time_window: 30m diff --git a/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go b/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go new file mode 100644 index 0000000000..cb9257d073 --- /dev/null +++ b/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go @@ -0,0 +1,106 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/golang/go/blob/f2d118fd5f7e872804a5825ce29797f81a28b0fa/src/strings/strings.go +// Provenance-includes-license: BSD-3-Clause +// Provenance-includes-copyright: Copyright The Go Authors. + +package prometheus + +import "strings" + +// fieldsFunc is a copy of strings.FieldsFunc from the Go standard library, +// but it also returns the separators as part of the result. +func fieldsFunc(s string, f func(rune) bool) ([]string, []string) { + // A span is used to record a slice of s of the form s[start:end]. + // The start index is inclusive and the end index is exclusive. + type span struct { + start int + end int + } + spans := make([]span, 0, 32) + separators := make([]string, 0, 32) + + // Find the field start and end indices. + // Doing this in a separate pass (rather than slicing the string s + // and collecting the result substrings right away) is significantly + // more efficient, possibly due to cache effects. + start := -1 // valid span start if >= 0 + for end, rune := range s { + if f(rune) { + if start >= 0 { + spans = append(spans, span{start, end}) + // Set start to a negative value. + // Note: using -1 here consistently and reproducibly + // slows down this code by a several percent on amd64. + start = ^start + separators = append(separators, string(s[end])) + } + } else { + if start < 0 { + start = end + } + } + } + + // Last field might end at EOF. + if start >= 0 { + spans = append(spans, span{start, len(s)}) + } + + // Create strings from recorded field indices. + a := make([]string, len(spans)) + for i, span := range spans { + a[i] = s[span.start:span.end] + } + + return a, separators +} + +// join is a copy of strings.Join from the Go standard library, +// but it also accepts a slice of separators to join the elements with. +// If the slice of separators is shorter than the slice of elements, use a default value. +// We also don't check for integer overflow. +func join(elems []string, separators []string, def string) string { + switch len(elems) { + case 0: + return "" + case 1: + return elems[0] + } + + var n int + var sep string + sepLen := len(separators) + for i, elem := range elems { + if i >= sepLen { + sep = def + } else { + sep = separators[i] + } + n += len(sep) + len(elem) + } + + var b strings.Builder + b.Grow(n) + b.WriteString(elems[0]) + for i, s := range elems[1:] { + if i >= sepLen { + sep = def + } else { + sep = separators[i] + } + b.WriteString(sep) + b.WriteString(s) + } + return b.String() +} diff --git a/storage/remote/otlptranslator/prometheus/normalize_label.go b/storage/remote/otlptranslator/prometheus/normalize_label.go index d5de2c7651..b928e6888d 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label.go @@ -29,9 +29,9 @@ import ( // // Labels that start with non-letter rune will be prefixed with "key_". // An exception is made for double-underscores which are allowed. -func NormalizeLabel(label string) string { +func NormalizeLabel(label string, allowUTF8 bool) string { // Trivial case - if len(label) == 0 { + if len(label) == 0 || allowUTF8 { return label } diff --git a/storage/remote/otlptranslator/prometheus/normalize_label_test.go b/storage/remote/otlptranslator/prometheus/normalize_label_test.go index 21d4d6a6d8..19ab6cd173 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label_test.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label_test.go @@ -22,24 +22,27 @@ import ( func TestNormalizeLabel(t *testing.T) { tests := []struct { - label string - expected string + label string + expected string + expectedUTF8 string }{ - {"", ""}, - {"label:with:colons", "label_with_colons"}, // Without UTF-8 support, colons are only allowed in metric names - {"LabelWithCapitalLetters", "LabelWithCapitalLetters"}, - {"label!with&special$chars)", "label_with_special_chars_"}, - {"label_with_foreign_characters_字符", "label_with_foreign_characters___"}, - {"label.with.dots", "label_with_dots"}, - {"123label", "key_123label"}, - {"_label_starting_with_underscore", "key_label_starting_with_underscore"}, - {"__label_starting_with_2underscores", "__label_starting_with_2underscores"}, + {"", "", ""}, + {"label:with:colons", "label_with_colons", "label:with:colons"}, // Without UTF-8 support, colons are only allowed in metric names + {"LabelWithCapitalLetters", "LabelWithCapitalLetters", "LabelWithCapitalLetters"}, + {"label!with&special$chars)", "label_with_special_chars_", "label!with&special$chars)"}, + {"label_with_foreign_characters_字符", "label_with_foreign_characters___", "label_with_foreign_characters_字符"}, + {"label.with.dots", "label_with_dots", "label.with.dots"}, + {"123label", "key_123label", "123label"}, + {"_label_starting_with_underscore", "key_label_starting_with_underscore", "_label_starting_with_underscore"}, + {"__label_starting_with_2underscores", "__label_starting_with_2underscores", "__label_starting_with_2underscores"}, } for i, test := range tests { t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) { - result := NormalizeLabel(test.label) + result := NormalizeLabel(test.label, false) require.Equal(t, test.expected, result) + uTF8result := NormalizeLabel(test.label, true) + require.Equal(t, test.expectedUTF8, uTF8result) }) } } diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index 0119b64dff..335705aa8d 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -88,27 +88,32 @@ var perUnitMap = map[string]string{ // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels, // https://prometheus.io/docs/practices/naming/#metric-and-label-naming // and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. -func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { +func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes, allowUTF8 bool) string { // Full normalization following standard Prometheus naming conventions if addMetricSuffixes { - return normalizeName(metric, namespace) + return normalizeName(metric, namespace, allowUTF8) } - // Regexp for metric name characters that should be replaced with _. - invalidMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:_]`) + var metricName string + if !allowUTF8 { + // Regexp for metric name characters that should be replaced with _. + invalidMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:_]`) - // Simple case (no full normalization, no units, etc.). - metricName := strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool { - return invalidMetricCharRE.MatchString(string(r)) - }), "_") + // Simple case (no full normalization, no units, etc.). + metricName = strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool { + return invalidMetricCharRE.MatchString(string(r)) + }), "_") + } else { + metricName = metric.Name() + } // Namespace? if namespace != "" { return namespace + "_" + metricName } - // Metric name starts with a digit? Prefix it with an underscore. - if metricName != "" && unicode.IsDigit(rune(metricName[0])) { + // Metric name starts with a digit and utf8 not allowed? Prefix it with an underscore. + if metricName != "" && unicode.IsDigit(rune(metricName[0])) && !allowUTF8 { metricName = "_" + metricName } @@ -116,17 +121,18 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix } // Build a normalized name for the specified metric. -func normalizeName(metric pmetric.Metric, namespace string) string { - // Regexp for characters that can't be in a metric name token. - nonTokenMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:]`) - +func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) string { + var translationFunc func(rune) bool + if !allowUTF8 { + nonTokenMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:]`) + translationFunc = func(r rune) bool { return nonTokenMetricCharRE.MatchString(string(r)) } + } else { + translationFunc = func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != ':' } + } // Split metric name into "tokens" (of supported metric name runes). // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. - nameTokens := strings.FieldsFunc( - metric.Name(), - func(r rune) bool { return nonTokenMetricCharRE.MatchString(string(r)) }, - ) + nameTokens, separators := fieldsFunc(metric.Name(), translationFunc) // Split unit at the '/' if any unitTokens := strings.SplitN(metric.Unit(), "/", 2) @@ -137,7 +143,10 @@ func normalizeName(metric pmetric.Metric, namespace string) string { var mainUnitProm, perUnitProm string mainUnitOTel := strings.TrimSpace(unitTokens[0]) if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") { - mainUnitProm = cleanUpUnit(unitMapGetOrDefault(mainUnitOTel)) + mainUnitProm = unitMapGetOrDefault(mainUnitOTel) + if !allowUTF8 { + mainUnitProm = cleanUpUnit(mainUnitProm) + } if slices.Contains(nameTokens, mainUnitProm) { mainUnitProm = "" } @@ -148,7 +157,10 @@ func normalizeName(metric pmetric.Metric, namespace string) string { if len(unitTokens) > 1 && unitTokens[1] != "" { perUnitOTel := strings.TrimSpace(unitTokens[1]) if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") { - perUnitProm = cleanUpUnit(perUnitMapGetOrDefault(perUnitOTel)) + perUnitProm = perUnitMapGetOrDefault(perUnitOTel) + if !allowUTF8 { + perUnitProm = cleanUpUnit(perUnitProm) + } } if perUnitProm != "" { perUnitProm = "per_" + perUnitProm @@ -189,8 +201,12 @@ func normalizeName(metric pmetric.Metric, namespace string) string { nameTokens = append([]string{namespace}, nameTokens...) } - // Build the string from the tokens, separated with underscores - normalizedName := strings.Join(nameTokens, "_") + // Build the string from the tokens + separators. + // If UTF-8 isn't allowed, we'll use underscores as separators. + if !allowUTF8 { + separators = []string{} + } + normalizedName := join(nameTokens, separators, "_") // Metric name cannot start with a digit, so prefix it with "_" in this case if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) { diff --git a/storage/remote/otlptranslator/prometheus/normalize_name_test.go b/storage/remote/otlptranslator/prometheus/normalize_name_test.go index 2d5648e84c..d97e7a560a 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name_test.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name_test.go @@ -25,92 +25,119 @@ import ( ) func TestByte(t *testing.T) { - require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), "")) + require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), "", false)) } func TestByteCounter(t *testing.T) { - require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), "")) - require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), "")) + require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), "", false)) + require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), "", false)) } func TestWhiteSpaces(t *testing.T) { - require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), "")) + require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), "", false)) } func TestNonStandardUnit(t *testing.T) { - require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "")) + require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "", false)) } func TestNonStandardUnitCounter(t *testing.T) { - require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), "")) + require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), "", false)) } func TestBrokenUnit(t *testing.T) { - require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), "")) - require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), "")) + require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), "", false)) + require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), "", false)) + require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), "", false)) } func TestBrokenUnitCounter(t *testing.T) { - require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), "")) + require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), "", false)) + require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), "", false)) + require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), "", false)) } func TestRatio(t *testing.T) { - require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), "")) - require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), "")) - require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), "")) + require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), "", false)) + require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), "", false)) + require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), "", false)) } func TestHertz(t *testing.T) { - require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), "")) + require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), "", false)) } func TestPer(t *testing.T) { - require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "")) - require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "")) + require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "", false)) + require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "", false)) } func TestPercent(t *testing.T) { - require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), "")) - require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), "")) + require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), "", false)) + require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), "", false)) } func TestEmpty(t *testing.T) { - require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), "")) - require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), "")) + require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), "", false)) + require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), "", false)) } -func TestUnsupportedRunes(t *testing.T) { - require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "")) - require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "")) - require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "")) +func TestAllowUTF8(t *testing.T) { + t.Run("allow UTF8", func(t *testing.T) { + require.Equal(t, "unsupported.metric.temperature_°F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "", true)) + require.Equal(t, "unsupported.metric.weird_+=.:,!* & #", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "", true)) + require.Equal(t, "unsupported.metric.redundant___test $_per_°C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "", true)) + require.Equal(t, "metric_with_字符_foreign_characters_ど", normalizeName(createGauge("metric_with_字符_foreign_characters", "ど"), "", true)) + }) + t.Run("disallow UTF8", func(t *testing.T) { + require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "", false)) + require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "", false)) + require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "", false)) + require.Equal(t, "metric_with_foreign_characters", normalizeName(createGauge("metric_with_字符_foreign_characters", "ど"), "", false)) + }) +} + +func TestAllowUTF8KnownBugs(t *testing.T) { + // Due to historical reasons, the translator code was copied from OpenTelemetry collector codebase. + // Over there, they tried to provide means to translate metric names following Prometheus conventions that are documented here: + // https://prometheus.io/docs/practices/naming/ + // + // Althogh not explicitly said, it was implied that words should be separated by a single underscore and the codebase was written + // with that in mind. + // + // Now that we're allowing OTel users to have their original names stored in prometheus without any transformation, we're facing problems + // where two (or more) UTF-8 characters are being used to separate words. + // TODO(arthursens): Fix it! + + // We're asserting on 'NotEqual', which proves the bug. + require.NotEqual(t, "metric....split_=+by_//utf8characters", normalizeName(createGauge("metric....split_=+by_//utf8characters", ""), "", true)) + // Here we're asserting on 'Equal', showing the current behavior. + require.Equal(t, "metric.split_by_utf8characters", normalizeName(createGauge("metric....split_=+by_//utf8characters", ""), "", true)) } func TestOTelReceivers(t *testing.T) { - require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), "")) - require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "")) - require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "")) - require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), "")) - require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), "")) - require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), "")) - require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), "")) - require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), "")) - require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), "")) - require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), "")) - require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), "")) - require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), "")) - require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), "")) - require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), "")) - require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), "")) - require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), "")) - require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), "")) - require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), "")) - require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), "")) - require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), "")) - require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), "")) - require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "")) + require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), "", false)) + require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "", false)) + require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "", false)) + require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), "", false)) + require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), "", false)) + require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), "", false)) + require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), "", false)) + require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), "", false)) + require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), "", false)) + require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), "", false)) + require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), "", false)) + require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), "", false)) + require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), "", false)) + require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), "", false)) + require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), "", false)) + require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), "", false)) + require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), "", false)) + require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), "", false)) + require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), "", false)) + require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), "", false)) + require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), "", false)) + require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "", false)) } func TestTrimPromSuffixes(t *testing.T) { @@ -144,8 +171,8 @@ func TestTrimPromSuffixes(t *testing.T) { } func TestNamespace(t *testing.T) { - require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space")) - require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space")) + require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space", false)) + require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space", false)) } func TestCleanUpUnit(t *testing.T) { @@ -180,28 +207,28 @@ func TestRemoveItem(t *testing.T) { } func TestBuildCompliantNameWithSuffixes(t *testing.T) { - require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", true)) - require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true)) - require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true)) - require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true)) - require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true)) - require.Equal(t, ":foo::bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true)) + require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", true, false)) + require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true, false)) + require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true, false)) + require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true, false)) + require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true, false)) + require.Equal(t, ":foo::bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true, false)) // Gauges with unit 1 are considered ratios. - require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true)) + require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true, false)) // Slashes in units are converted. - require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantName(createCounter("system.io", "foo/bar"), "", true)) - require.Equal(t, "metric_with_foreign_characters_total", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", true)) + require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantName(createCounter("system.io", "foo/bar"), "", true, false)) + require.Equal(t, "metric_with_foreign_characters_total", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", true, false)) } func TestBuildCompliantNameWithoutSuffixes(t *testing.T) { - require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", false)) - require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", false)) - require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", false)) - require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", false)) - require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false)) - require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", false)) - require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false)) - require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false)) - require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false)) - require.Equal(t, "metric_with___foreign_characters", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", false)) + require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", false, false)) + require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", false, false)) + require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", false, false)) + require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", false, false)) + require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false, false)) + require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", false, false)) + require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false, false)) + require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false, false)) + require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false, false)) + require.Equal(t, "metric_with___foreign_characters", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", false, false)) } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index f7fede258b..30cfa86436 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -157,7 +157,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting // map ensures no duplicate label names. l := make(map[string]string, maxLabelCount) for _, label := range labels { - var finalKey = prometheustranslator.NormalizeLabel(label.Name) + var finalKey = prometheustranslator.NormalizeLabel(label.Name, settings.AllowUTF8) if existingValue, alreadyExists := l[finalKey]; alreadyExists { l[finalKey] = existingValue + ";" + label.Value } else { @@ -166,7 +166,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting } for _, lbl := range promotedAttrs { - normalized := prometheustranslator.NormalizeLabel(lbl.Name) + normalized := prometheustranslator.NormalizeLabel(lbl.Name, settings.AllowUTF8) if _, exists := l[normalized]; !exists { l[normalized] = lbl.Value } @@ -205,7 +205,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting } // internal labels should be maintained if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { - name = prometheustranslator.NormalizeLabel(name) + name = prometheustranslator.NormalizeLabel(name, settings.AllowUTF8) } l[name] = extras[i+1] } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go index 5fdd26ef29..dcd83b7f93 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go @@ -762,7 +762,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { Settings{ ExportCreatedMetric: true, }, - prometheustranslator.BuildCompliantName(metric, "", true), + prometheustranslator.BuildCompliantName(metric, "", true, true), ) require.NoError(t, err) require.Empty(t, annots) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 0afd2ad57e..4f8baf3100 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -38,6 +38,7 @@ type Settings struct { ExportCreatedMetric bool AddMetricSuffixes bool SendMetadata bool + AllowUTF8 bool PromoteResourceAttributes []string } @@ -84,7 +85,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric continue } - promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) + promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes, settings.AllowUTF8) // handle individual metrics based on type //exhaustive:enforce diff --git a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go index ba48704193..b423d2cc6e 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go @@ -43,7 +43,7 @@ func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) prompb.MetricMeta return prompb.MetricMetadata_UNKNOWN } -func OtelMetricsToMetadata(md pmetric.Metrics, addMetricSuffixes bool) []*prompb.MetricMetadata { +func OtelMetricsToMetadata(md pmetric.Metrics, addMetricSuffixes, allowUTF8 bool) []*prompb.MetricMetadata { resourceMetricsSlice := md.ResourceMetrics() metadataLength := 0 @@ -65,7 +65,7 @@ func OtelMetricsToMetadata(md pmetric.Metrics, addMetricSuffixes bool) []*prompb metric := scopeMetrics.Metrics().At(k) entry := prompb.MetricMetadata{ Type: otelMetricTypeToPromMetricType(metric), - MetricFamilyName: prometheustranslator.BuildCompliantName(metric, "", addMetricSuffixes), + MetricFamilyName: prometheustranslator.BuildCompliantName(metric, "", addMetricSuffixes, allowUTF8), Help: metric.Description(), } metadata = append(metadata, &entry) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 466673c99d..87102a374b 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -513,6 +513,7 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { converter := otlptranslator.NewPrometheusConverter() annots, err := converter.FromMetrics(r.Context(), req.Metrics(), otlptranslator.Settings{ AddMetricSuffixes: true, + AllowUTF8: otlpCfg.TranslationStrategy == config.NoUTF8EscapingWithSuffixes, PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, }) if err != nil { From 5e8de28aec9a8aa572417f60e8012d0a6b7f1234 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Fri, 8 Nov 2024 15:31:03 +0100 Subject: [PATCH 0901/1397] Update golangci-lint Update to the latest golangci-lint version. Signed-off-by: SuperQ --- .github/workflows/ci.yml | 2 +- scripts/golangci-lint.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fcd0d91eea..b7074a887e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -195,7 +195,7 @@ jobs: with: args: --verbose # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. - version: v1.60.2 + version: v1.61.0 fuzzing: uses: ./.github/workflows/fuzzing.yml if: github.event_name == 'pull_request' diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 7af9bba77b..3051469937 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -36,4 +36,4 @@ jobs: uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 with: args: --verbose - version: v1.60.2 + version: v1.61.0 From 650abacccebedd0fac317550075870a8748436e0 Mon Sep 17 00:00:00 2001 From: Aniket Kaulavkar Date: Fri, 8 Nov 2024 21:38:27 +0530 Subject: [PATCH 0902/1397] Parallelize tests in cmd/promtool/ Signed-off-by: Aniket Kaulavkar --- cmd/promtool/analyze_test.go | 4 ++++ cmd/promtool/backfill_test.go | 2 ++ cmd/promtool/main_test.go | 22 ++++++++++++++++++++++ cmd/promtool/rules_test.go | 4 ++++ cmd/promtool/sd_test.go | 1 + cmd/promtool/tsdb_test.go | 1 + cmd/promtool/unittest_test.go | 5 +++++ 7 files changed, 39 insertions(+) diff --git a/cmd/promtool/analyze_test.go b/cmd/promtool/analyze_test.go index 83d2ac4a3d..1e16a22d35 100644 --- a/cmd/promtool/analyze_test.go +++ b/cmd/promtool/analyze_test.go @@ -109,6 +109,7 @@ func init() { } func TestGetBucketCountsAtTime(t *testing.T) { + t.Parallel() cases := []struct { matrix model.Matrix length int @@ -137,6 +138,7 @@ func TestGetBucketCountsAtTime(t *testing.T) { for _, c := range cases { t.Run(fmt.Sprintf("exampleMatrix@%d", c.timeIdx), func(t *testing.T) { + t.Parallel() res, err := getBucketCountsAtTime(c.matrix, c.length, c.timeIdx) require.NoError(t, err) require.Equal(t, c.expected, res) @@ -145,6 +147,7 @@ func TestGetBucketCountsAtTime(t *testing.T) { } func TestCalcClassicBucketStatistics(t *testing.T) { + t.Parallel() cases := []struct { matrix model.Matrix expected *statistics @@ -162,6 +165,7 @@ func TestCalcClassicBucketStatistics(t *testing.T) { for i, c := range cases { t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { + t.Parallel() res, err := calcClassicBucketStatistics(c.matrix) require.NoError(t, err) require.Equal(t, c.expected, res) diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go index b818194e86..da64b5dca1 100644 --- a/cmd/promtool/backfill_test.go +++ b/cmd/promtool/backfill_test.go @@ -86,6 +86,7 @@ func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, exp } func TestBackfill(t *testing.T) { + t.Parallel() tests := []struct { ToParse string IsOk bool @@ -729,6 +730,7 @@ after_eof 1 2 } for _, test := range tests { t.Run(test.Description, func(t *testing.T) { + t.Parallel() t.Logf("Test:%s", test.Description) outputDir := t.TempDir() diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 698e6641d1..9a07269188 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -60,6 +60,7 @@ func TestMain(m *testing.M) { } func TestQueryRange(t *testing.T) { + t.Parallel() s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "matrix", "result": []}}`) defer s.Close() @@ -83,6 +84,7 @@ func TestQueryRange(t *testing.T) { } func TestQueryInstant(t *testing.T) { + t.Parallel() s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "vector", "result": []}}`) defer s.Close() @@ -114,6 +116,7 @@ func mockServer(code int, body string) (*httptest.Server, func() *http.Request) } func TestCheckSDFile(t *testing.T) { + t.Parallel() cases := []struct { name string file string @@ -144,6 +147,7 @@ func TestCheckSDFile(t *testing.T) { } for _, test := range cases { t.Run(test.name, func(t *testing.T) { + t.Parallel() _, err := checkSDFile(test.file) if test.err != "" { require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error()) @@ -155,6 +159,7 @@ func TestCheckSDFile(t *testing.T) { } func TestCheckDuplicates(t *testing.T) { + t.Parallel() cases := []struct { name string ruleFile string @@ -179,6 +184,7 @@ func TestCheckDuplicates(t *testing.T) { for _, test := range cases { c := test t.Run(c.name, func(t *testing.T) { + t.Parallel() rgs, err := rulefmt.ParseFile(c.ruleFile) require.Empty(t, err) dups := checkDuplicates(rgs.Groups) @@ -198,6 +204,7 @@ func BenchmarkCheckDuplicates(b *testing.B) { } func TestCheckTargetConfig(t *testing.T) { + t.Parallel() cases := []struct { name string file string @@ -226,6 +233,7 @@ func TestCheckTargetConfig(t *testing.T) { } for _, test := range cases { t.Run(test.name, func(t *testing.T) { + t.Parallel() _, err := checkConfig(false, "testdata/"+test.file, false) if test.err != "" { require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error()) @@ -237,6 +245,7 @@ func TestCheckTargetConfig(t *testing.T) { } func TestCheckConfigSyntax(t *testing.T) { + t.Parallel() cases := []struct { name string file string @@ -309,6 +318,7 @@ func TestCheckConfigSyntax(t *testing.T) { } for _, test := range cases { t.Run(test.name, func(t *testing.T) { + t.Parallel() _, err := checkConfig(false, "testdata/"+test.file, test.syntaxOnly) expectedErrMsg := test.err if strings.Contains(runtime.GOOS, "windows") { @@ -324,6 +334,7 @@ func TestCheckConfigSyntax(t *testing.T) { } func TestAuthorizationConfig(t *testing.T) { + t.Parallel() cases := []struct { name string file string @@ -343,6 +354,7 @@ func TestAuthorizationConfig(t *testing.T) { for _, test := range cases { t.Run(test.name, func(t *testing.T) { + t.Parallel() _, err := checkConfig(false, "testdata/"+test.file, false) if test.err != "" { require.ErrorContains(t, err, test.err, "Expected error to contain %q, got %q", test.err, err.Error()) @@ -357,6 +369,7 @@ func TestCheckMetricsExtended(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping on windows") } + t.Parallel() f, err := os.Open("testdata/metrics-test.prom") require.NoError(t, err) @@ -393,6 +406,7 @@ func TestExitCodes(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() for _, c := range []struct { file string @@ -417,8 +431,10 @@ func TestExitCodes(t *testing.T) { }, } { t.Run(c.file, func(t *testing.T) { + t.Parallel() for _, lintFatal := range []bool{true, false} { t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) { + t.Parallel() args := []string{"-test.main", "check", "config", "testdata/" + c.file} if lintFatal { args = append(args, "--lint-fatal") @@ -449,6 +465,7 @@ func TestDocumentation(t *testing.T) { if runtime.GOOS == "windows" { t.SkipNow() } + t.Parallel() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -542,16 +559,19 @@ func TestCheckRules(t *testing.T) { func TestCheckRulesWithRuleFiles(t *testing.T) { t.Run("rules-good", func(t *testing.T) { + t.Parallel() exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules.yml") require.Equal(t, successExitCode, exitCode, "") }) t.Run("rules-bad", func(t *testing.T) { + t.Parallel() exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules-bad.yml") require.Equal(t, failureExitCode, exitCode, "") }) t.Run("rules-lint-fatal", func(t *testing.T) { + t.Parallel() exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true), "./testdata/prometheus-rules.lint.yml") require.Equal(t, lintErrExitCode, exitCode, "") }) @@ -561,6 +581,7 @@ func TestTSDBDumpCommand(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } + t.Parallel() storage := promqltest.LoadedStorage(t, ` load 1m @@ -593,6 +614,7 @@ func TestTSDBDumpCommand(t *testing.T) { }, } { t.Run(c.name, func(t *testing.T) { + t.Parallel() args := []string{"-test.main", "tsdb", c.subCmd, storage.Dir()} cmd := exec.Command(promtoolPath, args...) require.NoError(t, cmd.Run()) diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index 94e28e570d..e22b8a556b 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -43,6 +43,7 @@ const defaultBlockDuration = time.Duration(tsdb.DefaultBlockDuration) * time.Mil // TestBackfillRuleIntegration is an integration test that runs all the rule importer code to confirm the parts work together. func TestBackfillRuleIntegration(t *testing.T) { + t.Parallel() const ( testMaxSampleCount = 50 testValue = 123 @@ -72,6 +73,7 @@ func TestBackfillRuleIntegration(t *testing.T) { } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { + t.Parallel() tmpDir := t.TempDir() ctx := context.Background() @@ -210,6 +212,7 @@ func createMultiRuleTestFiles(path string) error { // TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics // received from Prometheus Query API, including the __name__ label. func TestBackfillLabels(t *testing.T) { + t.Parallel() tmpDir := t.TempDir() ctx := context.Background() @@ -251,6 +254,7 @@ func TestBackfillLabels(t *testing.T) { require.NoError(t, err) t.Run("correct-labels", func(t *testing.T) { + t.Parallel() selectedSeries := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) for selectedSeries.Next() { series := selectedSeries.At() diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go index 44d8084651..90318ba983 100644 --- a/cmd/promtool/sd_test.go +++ b/cmd/promtool/sd_test.go @@ -29,6 +29,7 @@ import ( ) func TestSDCheckResult(t *testing.T) { + t.Parallel() targetGroups := []*targetgroup.Group{{ Targets: []model.LabelSet{ map[model.LabelName]model.LabelValue{"__address__": "localhost:8080", "foo": "bar"}, diff --git a/cmd/promtool/tsdb_test.go b/cmd/promtool/tsdb_test.go index 90192e31ab..e745a3fe7a 100644 --- a/cmd/promtool/tsdb_test.go +++ b/cmd/promtool/tsdb_test.go @@ -32,6 +32,7 @@ import ( ) func TestGenerateBucket(t *testing.T) { + t.Parallel() tcs := []struct { min, max int start, end, step int diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go index 9b73dcdc1c..ec34ad3185 100644 --- a/cmd/promtool/unittest_test.go +++ b/cmd/promtool/unittest_test.go @@ -26,6 +26,7 @@ import ( ) func TestRulesUnitTest(t *testing.T) { + t.Parallel() type args struct { files []string } @@ -141,12 +142,14 @@ func TestRulesUnitTest(t *testing.T) { reuseCount[tt.want] += len(tt.args.files) } t.Run(tt.name, func(t *testing.T) { + t.Parallel() if got := RulesUnitTest(tt.queryOpts, nil, false, false, tt.args.files...); got != tt.want { t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want) } }) } t.Run("Junit xml output ", func(t *testing.T) { + t.Parallel() var buf bytes.Buffer if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, false, reuseFiles...); got != 1 { t.Errorf("RulesUnitTestResults() = %v, want 1", got) @@ -185,6 +188,7 @@ func TestRulesUnitTest(t *testing.T) { } func TestRulesUnitTestRun(t *testing.T) { + t.Parallel() type args struct { run []string files []string @@ -230,6 +234,7 @@ func TestRulesUnitTestRun(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + t.Parallel() got := RulesUnitTest(tt.queryOpts, tt.args.run, false, false, tt.args.files...) require.Equal(t, tt.want, got) }) From bc1bc5c1187594980de88bcf2418720955a209ca Mon Sep 17 00:00:00 2001 From: SuperQ Date: Wed, 6 Nov 2024 11:46:17 +0100 Subject: [PATCH 0903/1397] Update sigv4 library Migrate use of prometheus/common/sigv4 to prometheus/sigv4. Related to: https://github.com/prometheus/common/issues/709 Signed-off-by: SuperQ --- config/config.go | 2 +- go.mod | 2 +- go.sum | 269 +-------------------------------------- notifier/notifier.go | 2 +- storage/remote/client.go | 2 +- 5 files changed, 6 insertions(+), 271 deletions(-) diff --git a/config/config.go b/config/config.go index ef3ea5e67e..b23fff5756 100644 --- a/config/config.go +++ b/config/config.go @@ -30,7 +30,7 @@ import ( "github.com/grafana/regexp" "github.com/prometheus/common/config" "github.com/prometheus/common/model" - "github.com/prometheus/common/sigv4" + "github.com/prometheus/sigv4" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" diff --git a/go.mod b/go.mod index 5f138895d1..1f3413372a 100644 --- a/go.mod +++ b/go.mod @@ -54,8 +54,8 @@ require ( github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.60.1 github.com/prometheus/common/assets v0.2.0 - github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.13.1 + github.com/prometheus/sigv4 v0.1.0 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 diff --git a/go.sum b/go.sum index fa4367d15a..334f59b733 100644 --- a/go.sum +++ b/go.sum @@ -1,41 +1,11 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= @@ -59,7 +29,6 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mo github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -73,7 +42,6 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -85,7 +53,6 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= @@ -101,9 +68,6 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y= github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= @@ -168,15 +132,10 @@ github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -218,38 +177,26 @@ github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVI github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -258,13 +205,9 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -273,18 +216,8 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g= github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -292,8 +225,6 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw= @@ -346,7 +277,6 @@ github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -359,7 +289,6 @@ github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hetznercloud/hcloud-go/v2 v2.15.0 h1:6mpMJ/RuX1woZj+MCJdyKNEX9129KDkEIDeeyfr4GD4= github.com/hetznercloud/hcloud-go/v2 v2.15.0/go.mod h1:h8sHav+27Xa+48cVMAvAUMELov5h298Ilg2vflyTHgg= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY= @@ -376,12 +305,8 @@ github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2E github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -394,7 +319,6 @@ github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -504,8 +428,6 @@ github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwv github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -516,28 +438,22 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= -github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= -github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.13.1 h1:Evsh0gWQo2bdOHlnz9+0Nm7/OFfIwhE2Ws4A2jIlR04= github.com/prometheus/exporter-toolkit v0.13.1/go.mod h1:ujdv2YIOxtdFxxqtloLpbqmxd5J0Le6IITUvIRSWjj0= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/sigv4 v0.1.0 h1:FgxH+m1qf9dGQ4w8Dd6VkthmpFQfGTzUeavMoQeG1LA= +github.com/prometheus/sigv4 v0.1.0/go.mod h1:doosPW9dOitMzYe2I2BN0jZqUuBrGPbXrNsTScN18iU= github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -551,7 +467,6 @@ github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+Yg github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -581,18 +496,11 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/collector/pdata v1.18.0 h1:/yg2rO2dxqDM2p6GutsMCxXN6sKlXwyIz/ZYyUPONBg= @@ -629,8 +537,6 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -641,35 +547,11 @@ golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -683,32 +565,15 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -718,23 +583,15 @@ golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -746,43 +603,20 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -809,9 +643,7 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -822,9 +654,6 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= @@ -832,43 +661,11 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= @@ -878,71 +675,22 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= @@ -955,18 +703,14 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -979,7 +723,6 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -989,12 +732,7 @@ gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= @@ -1009,9 +747,6 @@ k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7F k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/notifier/notifier.go b/notifier/notifier.go index e970b67e6d..09a2005a36 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -34,8 +34,8 @@ import ( config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" - "github.com/prometheus/common/sigv4" "github.com/prometheus/common/version" + "github.com/prometheus/sigv4" "go.uber.org/atomic" "gopkg.in/yaml.v2" diff --git a/storage/remote/client.go b/storage/remote/client.go index 23775122e5..98eb8d3d77 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -30,8 +30,8 @@ import ( "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" - "github.com/prometheus/common/sigv4" "github.com/prometheus/common/version" + "github.com/prometheus/sigv4" "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel" From f9057544cb69261acd59628c2e81369adeb584c9 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Sun, 10 Nov 2024 09:17:21 -0800 Subject: [PATCH 0904/1397] Fix AllPostings added twice (#13893) * handle all postings added twice --------- Signed-off-by: Ben Ye --- tsdb/querier.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tsdb/querier.go b/tsdb/querier.go index b80faf881e..f741c5e281 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -193,6 +193,11 @@ func selectChunkSeriesSet(ctx context.Context, sortSeries bool, hints *storage.S // PostingsForMatchers assembles a single postings iterator against the index reader // based on the given matchers. The resulting postings are not ordered by series. func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matcher) (index.Postings, error) { + if len(ms) == 1 && ms[0].Name == "" && ms[0].Value == "" { + k, v := index.AllPostingsKey() + return ix.Postings(ctx, k, v) + } + var its, notIts []index.Postings // See which label must be non-empty. // Optimization for case like {l=~".", l!="1"}. @@ -247,13 +252,10 @@ func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matc return nil, ctx.Err() } switch { - case m.Name == "" && m.Value == "": // Special-case for AllPostings, used in tests at least. - k, v := index.AllPostingsKey() - allPostings, err := ix.Postings(ctx, k, v) - if err != nil { - return nil, err - } - its = append(its, allPostings) + case m.Name == "" && m.Value == "": + // We already handled the case at the top of the function, + // and it is unexpected to get all postings again here. + return nil, errors.New("unexpected all postings") case m.Type == labels.MatchRegexp && m.Value == ".*": // .* regexp matches any string: do nothing. case m.Type == labels.MatchNotRegexp && m.Value == ".*": From 140f4aa9aed6674c582dc603431a4aaa0c702cb7 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Sun, 10 Nov 2024 22:59:24 -0800 Subject: [PATCH 0905/1397] feat: Allow customizing TSDB postings decoder (#13567) * allow customizing TSDB postings decoder --------- Signed-off-by: Ben Ye --- cmd/promtool/tsdb.go | 2 +- tsdb/block.go | 8 ++++++-- tsdb/block_test.go | 26 +++++++++++++------------- tsdb/blockwriter_test.go | 2 +- tsdb/compact.go | 13 ++++++++++++- tsdb/compact_test.go | 6 +++--- tsdb/db.go | 18 ++++++++++++------ tsdb/db_test.go | 12 ++++++------ tsdb/index/index.go | 35 ++++++++++++++++------------------- tsdb/index/index_test.go | 15 ++++++++------- tsdb/querier_bench_test.go | 4 ++-- tsdb/querier_test.go | 10 +++++----- tsdb/repair_test.go | 4 ++-- 13 files changed, 87 insertions(+), 68 deletions(-) diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 847ea6be0c..8a5a728875 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -405,7 +405,7 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) } } - b, err := db.Block(blockID) + b, err := db.Block(blockID, tsdb.DefaultPostingsDecoderFactory) if err != nil { return nil, nil, err } diff --git a/tsdb/block.go b/tsdb/block.go index 48ba4588aa..aec99788c9 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -330,7 +330,7 @@ type Block struct { // OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used // to instantiate chunk structs. -func OpenBlock(logger *slog.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) { +func OpenBlock(logger *slog.Logger, dir string, pool chunkenc.Pool, postingsDecoderFactory PostingsDecoderFactory) (pb *Block, err error) { if logger == nil { logger = promslog.NewNopLogger() } @@ -351,7 +351,11 @@ func OpenBlock(logger *slog.Logger, dir string, pool chunkenc.Pool) (pb *Block, } closers = append(closers, cr) - ir, err := index.NewFileReader(filepath.Join(dir, indexFilename)) + decoder := index.DecodePostingsRaw + if postingsDecoderFactory != nil { + decoder = postingsDecoderFactory(meta) + } + ir, err := index.NewFileReader(filepath.Join(dir, indexFilename), decoder) if err != nil { return nil, err } diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 3589b42c17..3e25cff3da 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -59,14 +59,14 @@ func TestSetCompactionFailed(t *testing.T) { tmpdir := t.TempDir() blockDir := createBlock(t, tmpdir, genSeries(1, 1, 0, 1)) - b, err := OpenBlock(nil, blockDir, nil) + b, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) require.False(t, b.meta.Compaction.Failed) require.NoError(t, b.setCompactionFailed()) require.True(t, b.meta.Compaction.Failed) require.NoError(t, b.Close()) - b, err = OpenBlock(nil, blockDir, nil) + b, err = OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) require.True(t, b.meta.Compaction.Failed) require.NoError(t, b.Close()) @@ -74,7 +74,7 @@ func TestSetCompactionFailed(t *testing.T) { func TestCreateBlock(t *testing.T) { tmpdir := t.TempDir() - b, err := OpenBlock(nil, createBlock(t, tmpdir, genSeries(1, 1, 0, 10)), nil) + b, err := OpenBlock(nil, createBlock(t, tmpdir, genSeries(1, 1, 0, 10)), nil, nil) require.NoError(t, err) require.NoError(t, b.Close()) } @@ -84,7 +84,7 @@ func BenchmarkOpenBlock(b *testing.B) { blockDir := createBlock(b, tmpdir, genSeries(1e6, 20, 0, 10)) b.Run("benchmark", func(b *testing.B) { for i := 0; i < b.N; i++ { - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(b, err) require.NoError(b, block.Close()) } @@ -190,7 +190,7 @@ func TestCorruptedChunk(t *testing.T) { require.NoError(t, f.Close()) // Check open err. - b, err := OpenBlock(nil, blockDir, nil) + b, err := OpenBlock(nil, blockDir, nil, nil) if tc.openErr != nil { require.EqualError(t, err, tc.openErr.Error()) return @@ -245,7 +245,7 @@ func TestLabelValuesWithMatchers(t *testing.T) { require.NotEmpty(t, files, "No chunk created.") // Check open err. - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) defer func() { require.NoError(t, block.Close()) }() @@ -325,7 +325,7 @@ func TestBlockQuerierReturnsSortedLabelValues(t *testing.T) { blockDir := createBlock(t, tmpdir, seriesEntries) // Check open err. - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, block.Close()) }) @@ -352,7 +352,7 @@ func TestBlockSize(t *testing.T) { // Create a block and compare the reported size vs actual disk size. { blockDirInit = createBlock(t, tmpdir, genSeries(10, 1, 1, 100)) - blockInit, err = OpenBlock(nil, blockDirInit, nil) + blockInit, err = OpenBlock(nil, blockDirInit, nil, nil) require.NoError(t, err) defer func() { require.NoError(t, blockInit.Close()) @@ -377,7 +377,7 @@ func TestBlockSize(t *testing.T) { blockDirsAfterCompact, err := c.Compact(tmpdir, []string{blockInit.Dir()}, nil) require.NoError(t, err) require.Len(t, blockDirsAfterCompact, 1) - blockAfterCompact, err := OpenBlock(nil, filepath.Join(tmpdir, blockDirsAfterCompact[0].String()), nil) + blockAfterCompact, err := OpenBlock(nil, filepath.Join(tmpdir, blockDirsAfterCompact[0].String()), nil, nil) require.NoError(t, err) defer func() { require.NoError(t, blockAfterCompact.Close()) @@ -408,7 +408,7 @@ func TestReadIndexFormatV1(t *testing.T) { */ blockDir := filepath.Join("testdata", "index_format_v1") - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) q, err := NewBlockQuerier(block, 0, 1000) @@ -445,7 +445,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) { require.NotEmpty(b, files, "No chunk created.") // Check open err. - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(b, err) defer func() { require.NoError(b, block.Close()) }() @@ -497,7 +497,7 @@ func TestLabelNamesWithMatchers(t *testing.T) { require.NotEmpty(t, files, "No chunk created.") // Check open err. - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, block.Close()) }) @@ -551,7 +551,7 @@ func TestBlockIndexReader_PostingsForLabelMatching(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, files, "No chunk created.") - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, block.Close()) }) diff --git a/tsdb/blockwriter_test.go b/tsdb/blockwriter_test.go index 4ec25df70a..2704b53566 100644 --- a/tsdb/blockwriter_test.go +++ b/tsdb/blockwriter_test.go @@ -47,7 +47,7 @@ func TestBlockWriter(t *testing.T) { // Confirm the block has the correct data. blockpath := filepath.Join(outputDir, id.String()) - b, err := OpenBlock(nil, blockpath, nil) + b, err := OpenBlock(nil, blockpath, nil, nil) require.NoError(t, err) defer func() { require.NoError(t, b.Close()) }() q, err := NewBlockQuerier(b, math.MinInt64, math.MaxInt64) diff --git a/tsdb/compact.go b/tsdb/compact.go index 17374531d4..74fde4c783 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -87,6 +87,7 @@ type LeveledCompactor struct { maxBlockChunkSegmentSize int64 mergeFunc storage.VerticalChunkSeriesMergeFunc postingsEncoder index.PostingsEncoder + postingsDecoderFactory PostingsDecoderFactory enableOverlappingCompaction bool } @@ -158,6 +159,9 @@ type LeveledCompactorOptions struct { // PE specifies the postings encoder. It is called when compactor is writing out the postings for a label name/value pair during compaction. // If it is nil then the default encoder is used. At the moment that is the "raw" encoder. See index.EncodePostingsRaw for more. PE index.PostingsEncoder + // PD specifies the postings decoder factory to return different postings decoder based on BlockMeta. It is called when opening a block or opening the index file. + // If it is nil then a default decoder is used, compatible with Prometheus v2. + PD PostingsDecoderFactory // MaxBlockChunkSegmentSize is the max block chunk segment size. If it is 0 then the default chunks.DefaultChunkSegmentSize is used. MaxBlockChunkSegmentSize int64 // MergeFunc is used for merging series together in vertical compaction. By default storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge) is used. @@ -167,6 +171,12 @@ type LeveledCompactorOptions struct { EnableOverlappingCompaction bool } +type PostingsDecoderFactory func(meta *BlockMeta) index.PostingsDecoder + +func DefaultPostingsDecoderFactory(_ *BlockMeta) index.PostingsDecoder { + return index.DecodePostingsRaw +} + func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ MaxBlockChunkSegmentSize: maxBlockChunkSegmentSize, @@ -213,6 +223,7 @@ func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer maxBlockChunkSegmentSize: maxBlockChunkSegmentSize, mergeFunc: mergeFunc, postingsEncoder: pe, + postingsDecoderFactory: opts.PD, enableOverlappingCompaction: opts.EnableOverlappingCompaction, }, nil } @@ -477,7 +488,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, if b == nil { var err error - b, err = OpenBlock(c.logger, d, c.chunkPool) + b, err = OpenBlock(c.logger, d, c.chunkPool, c.postingsDecoderFactory) if err != nil { return nil, err } diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 5123d6e624..310fe8f25a 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1153,7 +1153,7 @@ func BenchmarkCompaction(b *testing.B) { blockDirs := make([]string, 0, len(c.ranges)) var blocks []*Block for _, r := range c.ranges { - block, err := OpenBlock(nil, createBlock(b, dir, genSeries(nSeries, 10, r[0], r[1])), nil) + block, err := OpenBlock(nil, createBlock(b, dir, genSeries(nSeries, 10, r[0], r[1])), nil, nil) require.NoError(b, err) blocks = append(blocks, block) defer func() { @@ -1549,7 +1549,7 @@ func TestHeadCompactionWithHistograms(t *testing.T) { require.Len(t, ids, 1) // Open the block and query it and check the histograms. - block, err := OpenBlock(nil, path.Join(head.opts.ChunkDirRoot, ids[0].String()), nil) + block, err := OpenBlock(nil, path.Join(head.opts.ChunkDirRoot, ids[0].String()), nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, block.Close()) @@ -1911,7 +1911,7 @@ func TestCompactEmptyResultBlockWithTombstone(t *testing.T) { ctx := context.Background() tmpdir := t.TempDir() blockDir := createBlock(t, tmpdir, genSeries(1, 1, 0, 10)) - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) // Write tombstone covering the whole block. err = block.Delete(ctx, 0, 10, labels.MustNewMatcher(labels.MatchEqual, defaultLabelName, "0")) diff --git a/tsdb/db.go b/tsdb/db.go index ab919c3100..2cfa4f3638 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -91,6 +91,7 @@ func DefaultOptions() *Options { EnableDelayedCompaction: false, CompactionDelayMaxPercent: DefaultCompactionDelayMaxPercent, CompactionDelay: time.Duration(0), + PostingsDecoderFactory: DefaultPostingsDecoderFactory, } } @@ -219,6 +220,10 @@ type Options struct { // BlockChunkQuerierFunc is a function to return storage.ChunkQuerier from a BlockReader. BlockChunkQuerierFunc BlockChunkQuerierFunc + + // PostingsDecoderFactory allows users to customize postings decoders based on BlockMeta. + // By default, DefaultPostingsDecoderFactory will be used to create raw posting decoder. + PostingsDecoderFactory PostingsDecoderFactory } type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) @@ -633,7 +638,7 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { return nil, ErrClosed default: } - loadable, corrupted, err := openBlocks(db.logger, db.dir, nil, nil) + loadable, corrupted, err := openBlocks(db.logger, db.dir, nil, nil, DefaultPostingsDecoderFactory) if err != nil { return nil, err } @@ -731,7 +736,7 @@ func (db *DBReadOnly) LastBlockID() (string, error) { } // Block returns a block reader by given block id. -func (db *DBReadOnly) Block(blockID string) (BlockReader, error) { +func (db *DBReadOnly) Block(blockID string, postingsDecoderFactory PostingsDecoderFactory) (BlockReader, error) { select { case <-db.closed: return nil, ErrClosed @@ -743,7 +748,7 @@ func (db *DBReadOnly) Block(blockID string) (BlockReader, error) { return nil, fmt.Errorf("invalid block ID %s", blockID) } - block, err := OpenBlock(db.logger, filepath.Join(db.dir, blockID), nil) + block, err := OpenBlock(db.logger, filepath.Join(db.dir, blockID), nil, postingsDecoderFactory) if err != nil { return nil, err } @@ -902,6 +907,7 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn db.compactor, err = NewLeveledCompactorWithOptions(ctx, r, l, rngs, db.chunkPool, LeveledCompactorOptions{ MaxBlockChunkSegmentSize: opts.MaxBlockChunkSegmentSize, EnableOverlappingCompaction: opts.EnableOverlappingCompaction, + PD: opts.PostingsDecoderFactory, }) } if err != nil { @@ -1568,7 +1574,7 @@ func (db *DB) reloadBlocks() (err error) { db.mtx.Lock() defer db.mtx.Unlock() - loadable, corrupted, err := openBlocks(db.logger, db.dir, db.blocks, db.chunkPool) + loadable, corrupted, err := openBlocks(db.logger, db.dir, db.blocks, db.chunkPool, db.opts.PostingsDecoderFactory) if err != nil { return err } @@ -1663,7 +1669,7 @@ func (db *DB) reloadBlocks() (err error) { return nil } -func openBlocks(l *slog.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { +func openBlocks(l *slog.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool, postingsDecoderFactory PostingsDecoderFactory) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { bDirs, err := blockDirs(dir) if err != nil { return nil, nil, fmt.Errorf("find blocks: %w", err) @@ -1680,7 +1686,7 @@ func openBlocks(l *slog.Logger, dir string, loaded []*Block, chunkPool chunkenc. // See if we already have the block in memory or open it otherwise. block, open := getBlock(loaded, meta.ULID) if !open { - block, err = OpenBlock(l, bDir, chunkPool) + block, err = OpenBlock(l, bDir, chunkPool, postingsDecoderFactory) if err != nil { corrupted[meta.ULID] = err continue diff --git a/tsdb/db_test.go b/tsdb/db_test.go index bfdf7aa4ad..f851f2b911 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1320,7 +1320,7 @@ func TestTombstoneCleanFail(t *testing.T) { totalBlocks := 2 for i := 0; i < totalBlocks; i++ { blockDir := createBlock(t, db.Dir(), genSeries(1, 1, int64(i), int64(i)+1)) - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) // Add some fake tombstones to trigger the compaction. tomb := tombstones.NewMemTombstones() @@ -1375,7 +1375,7 @@ func TestTombstoneCleanRetentionLimitsRace(t *testing.T) { // Generate some blocks with old mint (near epoch). for j := 0; j < totalBlocks; j++ { blockDir := createBlock(t, dbDir, genSeries(10, 1, int64(j), int64(j)+1)) - block, err := OpenBlock(nil, blockDir, nil) + block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) // Cover block with tombstones so it can be deleted with CleanTombstones() as well. tomb := tombstones.NewMemTombstones() @@ -1436,7 +1436,7 @@ func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ * return []ulid.ULID{}, errors.New("the compactor already did the maximum allowed blocks so it is time to fail") } - block, err := OpenBlock(nil, createBlock(c.t, dest, genSeries(1, 1, 0, 1)), nil) + block, err := OpenBlock(nil, createBlock(c.t, dest, genSeries(1, 1, 0, 1)), nil, nil) require.NoError(c.t, err) require.NoError(c.t, block.Close()) // Close block as we won't be using anywhere. c.blocks = append(c.blocks, block) @@ -2509,13 +2509,13 @@ func TestDBReadOnly(t *testing.T) { }) t.Run("block", func(t *testing.T) { blockID := expBlock.meta.ULID.String() - block, err := dbReadOnly.Block(blockID) + block, err := dbReadOnly.Block(blockID, nil) require.NoError(t, err) require.Equal(t, expBlock.Meta(), block.Meta(), "block meta mismatch") }) t.Run("invalid block ID", func(t *testing.T) { blockID := "01GTDVZZF52NSWB5SXQF0P2PGF" - _, err := dbReadOnly.Block(blockID) + _, err := dbReadOnly.Block(blockID, nil) require.Error(t, err) }) t.Run("last block ID", func(t *testing.T) { @@ -8851,7 +8851,7 @@ func TestBlockQuerierAndBlockChunkQuerier(t *testing.T) { // Include blockID into series to identify which block got touched. serieses := []storage.Series{storage.NewListSeries(labels.FromMap(map[string]string{"block": fmt.Sprintf("block-%d", i), labels.MetricName: "test_metric"}), []chunks.Sample{sample{t: 0, f: 1}})} blockDir := createBlock(t, db.Dir(), serieses) - b, err := OpenBlock(db.logger, blockDir, db.chunkPool) + b, err := OpenBlock(db.logger, blockDir, db.chunkPool, nil) require.NoError(t, err) // Overwrite meta.json with compaction section for testing purpose. diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 8c0f698eae..4e199c5bd2 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -118,6 +118,8 @@ type symbolCacheEntry struct { type PostingsEncoder func(*encoding.Encbuf, []uint32) error +type PostingsDecoder func(encoding.Decbuf) (int, Postings, error) + // Writer implements the IndexWriter interface for the standard // serialization format. type Writer struct { @@ -1157,17 +1159,17 @@ func (b realByteSlice) Sub(start, end int) ByteSlice { // NewReader returns a new index reader on the given byte slice. It automatically // handles different format versions. -func NewReader(b ByteSlice) (*Reader, error) { - return newReader(b, io.NopCloser(nil)) +func NewReader(b ByteSlice, decoder PostingsDecoder) (*Reader, error) { + return newReader(b, io.NopCloser(nil), decoder) } // NewFileReader returns a new index reader against the given index file. -func NewFileReader(path string) (*Reader, error) { +func NewFileReader(path string, decoder PostingsDecoder) (*Reader, error) { f, err := fileutil.OpenMmapFile(path) if err != nil { return nil, err } - r, err := newReader(realByteSlice(f.Bytes()), f) + r, err := newReader(realByteSlice(f.Bytes()), f, decoder) if err != nil { return nil, tsdb_errors.NewMulti( err, @@ -1178,7 +1180,7 @@ func NewFileReader(path string) (*Reader, error) { return r, nil } -func newReader(b ByteSlice, c io.Closer) (*Reader, error) { +func newReader(b ByteSlice, c io.Closer, postingsDecoder PostingsDecoder) (*Reader, error) { r := &Reader{ b: b, c: c, @@ -1277,7 +1279,7 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { r.nameSymbols[off] = k } - r.dec = &Decoder{LookupSymbol: r.lookupSymbol} + r.dec = &Decoder{LookupSymbol: r.lookupSymbol, DecodePostings: postingsDecoder} return r, nil } @@ -1706,7 +1708,7 @@ func (r *Reader) Postings(ctx context.Context, name string, values ...string) (P } // Read from the postings table. d := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable) - _, p, err := r.dec.Postings(d.Get()) + _, p, err := r.dec.DecodePostings(d) if err != nil { return nil, fmt.Errorf("decode postings: %w", err) } @@ -1749,7 +1751,7 @@ func (r *Reader) Postings(ctx context.Context, name string, values ...string) (P if val == value { // Read from the postings table. d2 := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable) - _, p, err := r.dec.Postings(d2.Get()) + _, p, err := r.dec.DecodePostings(d2) if err != nil { return false, fmt.Errorf("decode postings: %w", err) } @@ -1790,7 +1792,7 @@ func (r *Reader) PostingsForLabelMatching(ctx context.Context, name string, matc if match(val) { // We want this postings iterator since the value is a match postingsDec := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable) - _, p, err := r.dec.PostingsFromDecbuf(postingsDec) + _, p, err := r.dec.DecodePostings(postingsDec) if err != nil { return false, fmt.Errorf("decode postings: %w", err) } @@ -1823,7 +1825,7 @@ func (r *Reader) postingsForLabelMatchingV1(ctx context.Context, name string, ma // Read from the postings table. d := encoding.NewDecbufAt(r.b, int(offset), castagnoliTable) - _, p, err := r.dec.PostingsFromDecbuf(d) + _, p, err := r.dec.DecodePostings(d) if err != nil { return ErrPostings(fmt.Errorf("decode postings: %w", err)) } @@ -1918,17 +1920,12 @@ func (s stringListIter) Err() error { return nil } // It currently does not contain decoding methods for all entry types but can be extended // by them if there's demand. type Decoder struct { - LookupSymbol func(context.Context, uint32) (string, error) + LookupSymbol func(context.Context, uint32) (string, error) + DecodePostings PostingsDecoder } -// Postings returns a postings list for b and its number of elements. -func (dec *Decoder) Postings(b []byte) (int, Postings, error) { - d := encoding.Decbuf{B: b} - return dec.PostingsFromDecbuf(d) -} - -// PostingsFromDecbuf returns a postings list for d and its number of elements. -func (dec *Decoder) PostingsFromDecbuf(d encoding.Decbuf) (int, Postings, error) { +// DecodePostingsRaw returns a postings list for d and its number of elements. +func DecodePostingsRaw(d encoding.Decbuf) (int, Postings, error) { n := d.Be32int() l := d.Get() if d.Err() != nil { diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index d81dd8696c..0d330cfcb9 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -146,7 +146,7 @@ func TestIndexRW_Create_Open(t *testing.T) { require.NoError(t, err) require.NoError(t, iw.Close()) - ir, err := NewFileReader(fn) + ir, err := NewFileReader(fn, DecodePostingsRaw) require.NoError(t, err) require.NoError(t, ir.Close()) @@ -157,7 +157,7 @@ func TestIndexRW_Create_Open(t *testing.T) { require.NoError(t, err) f.Close() - _, err = NewFileReader(dir) + _, err = NewFileReader(dir, DecodePostingsRaw) require.Error(t, err) } @@ -218,7 +218,7 @@ func TestIndexRW_Postings(t *testing.T) { }, labelIndices) t.Run("ShardedPostings()", func(t *testing.T) { - ir, err := NewFileReader(fn) + ir, err := NewFileReader(fn, DecodePostingsRaw) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, ir.Close()) @@ -469,7 +469,7 @@ func TestDecbufUvarintWithInvalidBuffer(t *testing.T) { func TestReaderWithInvalidBuffer(t *testing.T) { b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81}) - _, err := NewReader(b) + _, err := NewReader(b, DecodePostingsRaw) require.Error(t, err) } @@ -481,7 +481,7 @@ func TestNewFileReaderErrorNoOpenFiles(t *testing.T) { err := os.WriteFile(idxName, []byte("corrupted contents"), 0o666) require.NoError(t, err) - _, err = NewFileReader(idxName) + _, err = NewFileReader(idxName, DecodePostingsRaw) require.Error(t, err) // dir.Close will fail on Win if idxName fd is not closed on error path. @@ -560,7 +560,8 @@ func BenchmarkReader_ShardedPostings(b *testing.B) { } func TestDecoder_Postings_WrongInput(t *testing.T) { - _, _, err := (&Decoder{}).Postings([]byte("the cake is a lie")) + d := encoding.Decbuf{B: []byte("the cake is a lie")} + _, _, err := (&Decoder{DecodePostings: DecodePostingsRaw}).DecodePostings(d) require.Error(t, err) } @@ -690,7 +691,7 @@ func createFileReader(ctx context.Context, tb testing.TB, input indexWriterSerie } require.NoError(tb, iw.Close()) - ir, err := NewFileReader(fn) + ir, err := NewFileReader(fn, DecodePostingsRaw) require.NoError(tb, err) tb.Cleanup(func() { require.NoError(tb, ir.Close()) diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index 33dca1284d..8b8f092a81 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -75,7 +75,7 @@ func BenchmarkQuerier(b *testing.B) { b.Run("Block", func(b *testing.B) { blockdir := createBlockFromHead(b, b.TempDir(), h) - block, err := OpenBlock(nil, blockdir, nil) + block, err := OpenBlock(nil, blockdir, nil, nil) require.NoError(b, err) defer func() { require.NoError(b, block.Close()) @@ -315,7 +315,7 @@ func BenchmarkQuerierSelect(b *testing.B) { tmpdir := b.TempDir() blockdir := createBlockFromHead(b, tmpdir, h) - block, err := OpenBlock(nil, blockdir, nil) + block, err := OpenBlock(nil, blockdir, nil, nil) require.NoError(b, err) defer func() { require.NoError(b, block.Close()) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 2d66102bfe..e97aea2fde 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -2443,7 +2443,7 @@ func BenchmarkQueryIterator(b *testing.B) { } else { generatedSeries = populateSeries(prefilledLabels, mint, maxt) } - block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil) + block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil, nil) require.NoError(b, err) blocks = append(blocks, block) defer block.Close() @@ -2506,7 +2506,7 @@ func BenchmarkQuerySeek(b *testing.B) { } else { generatedSeries = populateSeries(prefilledLabels, mint, maxt) } - block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil) + block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil, nil) require.NoError(b, err) blocks = append(blocks, block) defer block.Close() @@ -2641,7 +2641,7 @@ func BenchmarkSetMatcher(b *testing.B) { } else { generatedSeries = populateSeries(prefilledLabels, mint, maxt) } - block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil) + block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil, nil) require.NoError(b, err) blocks = append(blocks, block) defer block.Close() @@ -3209,7 +3209,7 @@ func BenchmarkQueries(b *testing.B) { qs := make([]storage.Querier, 0, 10) for x := 0; x <= 10; x++ { - block, err := OpenBlock(nil, createBlock(b, dir, series), nil) + block, err := OpenBlock(nil, createBlock(b, dir, series), nil, nil) require.NoError(b, err) q, err := NewBlockQuerier(block, 1, nSamples) require.NoError(b, err) @@ -3792,7 +3792,7 @@ func (m mockReaderOfLabels) Symbols() index.StringIter { // https://github.com/prometheus/prometheus/issues/14723, when one of the queriers (blockQuerier in this case) // alters the passed matchers. func TestMergeQuerierConcurrentSelectMatchers(t *testing.T) { - block, err := OpenBlock(nil, createBlock(t, t.TempDir(), genSeries(1, 1, 0, 1)), nil) + block, err := OpenBlock(nil, createBlock(t, t.TempDir(), genSeries(1, 1, 0, 1)), nil, nil) require.NoError(t, err) defer func() { require.NoError(t, block.Close()) diff --git a/tsdb/repair_test.go b/tsdb/repair_test.go index 8a70e05f31..8a192c4f78 100644 --- a/tsdb/repair_test.go +++ b/tsdb/repair_test.go @@ -79,7 +79,7 @@ func TestRepairBadIndexVersion(t *testing.T) { require.NoError(t, os.MkdirAll(filepath.Join(tmpDbDir, "chunks"), 0o777)) // Read current index to check integrity. - r, err := index.NewFileReader(filepath.Join(tmpDbDir, indexFilename)) + r, err := index.NewFileReader(filepath.Join(tmpDbDir, indexFilename), index.DecodePostingsRaw) require.NoError(t, err) p, err := r.Postings(ctx, "b", "1") require.NoError(t, err) @@ -97,7 +97,7 @@ func TestRepairBadIndexVersion(t *testing.T) { require.NoError(t, err) db.Close() - r, err = index.NewFileReader(filepath.Join(tmpDbDir, indexFilename)) + r, err = index.NewFileReader(filepath.Join(tmpDbDir, indexFilename), index.DecodePostingsRaw) require.NoError(t, err) defer r.Close() p, err = r.Postings(ctx, "b", "1") From de35a40ed5d555700eb9356a693e49f9eee24943 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 11 Nov 2024 10:55:05 +0000 Subject: [PATCH 0906/1397] [REFACTOR] Small improvement to top-level naming Previously, top-level initialization (including WAL reading) had this at the top of the stack: github.com/oklog/run.(*Group).Run.func1:38 I found this confusing, and thought it had something to do with logging. Introducing another local function should make this clearer. Also make the error message user-centric not code-centric. Signed-off-by: Bryan Boreham --- cmd/prometheus/main.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index ecf179ce59..493106e155 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -1369,10 +1369,12 @@ func main() { }, ) } - if err := g.Run(); err != nil { - logger.Error("Error running goroutines from run.Group", "err", err) - os.Exit(1) - } + func() { // This function exists so the top of the stack is named 'main.main.funcxxx' and not 'oklog'. + if err := g.Run(); err != nil { + logger.Error("Fatal error", "err", err) + os.Exit(1) + } + }() logger.Info("See you next time!") } From fb69a38e671db03edff09e4d90c0f228251d67bb Mon Sep 17 00:00:00 2001 From: Ben Kochie Date: Mon, 11 Nov 2024 17:29:33 +0100 Subject: [PATCH 0907/1397] Enable auto-gomemlimit by default (#15373) * Enable auto-gomemlimit by default Enable the `auto-gomemlimit` feature flag by default. * Add command line flag `--no-auto-gomemlimit` to disable. --------- Signed-off-by: SuperQ --- cmd/prometheus/main.go | 16 ++++++++-------- docs/command-line/prometheus.md | 3 ++- docs/feature_flags.md | 8 -------- 3 files changed, 10 insertions(+), 17 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 8fb6d4d38e..8b8f42d18e 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -196,13 +196,14 @@ type flagConfig struct { enableAutoReload bool autoReloadInterval model.Duration - featureList []string - memlimitRatio float64 + memlimitEnable bool + memlimitRatio float64 + + featureList []string // These options are extracted from featureList // for ease of use. enablePerStepStats bool enableAutoGOMAXPROCS bool - enableAutoGOMEMLIMIT bool enableConcurrentRuleEval bool prometheusURL string @@ -243,9 +244,6 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error { c.autoReloadInterval, _ = model.ParseDuration("1s") } logger.Info("Enabled automatic configuration file reloading. Checking for configuration changes every", "interval", c.autoReloadInterval) - case "auto-gomemlimit": - c.enableAutoGOMEMLIMIT = true - logger.Info("Automatically set GOMEMLIMIT to match Linux container or system memory limit") case "concurrent-rule-eval": c.enableConcurrentRuleEval = true logger.Info("Experimental concurrent rule evaluation enabled.") @@ -332,6 +330,8 @@ func main() { a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated."). Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses) + a.Flag("auto-gomemlimit", "Automatically set GOMEMLIMIT to match Linux container or system memory limit"). + Default("true").BoolVar(&cfg.memlimitEnable) a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory"). Default("0.9").FloatVar(&cfg.memlimitRatio) @@ -515,7 +515,7 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) @@ -775,7 +775,7 @@ func main() { } } - if cfg.enableAutoGOMEMLIMIT { + if cfg.memlimitEnable { if _, err := memlimit.SetGoMemLimitWithOpts( memlimit.WithRatio(cfg.memlimitRatio), memlimit.WithProvider( diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index a179a2f9f1..9f5400c414 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -17,6 +17,7 @@ The Prometheus monitoring server | --config.file | Prometheus configuration file path. | `prometheus.yml` | | --config.auto-reload-interval | Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes. | `30s` | | --web.listen-address ... | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` | +| --auto-gomemlimit | Automatically set GOMEMLIMIT to match Linux container or system memory limit | `true` | | --auto-gomemlimit.ratio | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` | | --web.config.file | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | | --web.read-timeout | Maximum duration before timing out read of the request, and closing idle connections. | `5m` | @@ -58,7 +59,7 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature ... | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | --agent | Run Prometheus in 'Agent mode'. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 4be11ed472..0541961f26 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -53,14 +53,6 @@ computed at all. When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota. -## Auto GOMEMLIMIT - -`--enable-feature=auto-gomemlimit` - -When enabled, the GOMEMLIMIT variable is automatically set to match the Linux container memory limit. If there is no container limit, or the process is running outside of containers, the system memory total is used. - -There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus. - ## Native Histograms `--enable-feature=native-histograms` From f701f4e23612033b1bd2a009e786b0aada798295 Mon Sep 17 00:00:00 2001 From: Ben Kochie Date: Mon, 11 Nov 2024 18:24:59 +0100 Subject: [PATCH 0908/1397] Enable auto-gomaxprocs by default (#15376) Enable the `auto-gomaxprocs` feature flag by default. * Add command line flag `--no-auto-gomaxprocs` to disable. Signed-off-by: SuperQ --- cmd/prometheus/main.go | 9 ++++----- docs/command-line/prometheus.md | 1 + docs/feature_flags.md | 6 ------ 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 8b8f42d18e..3d999b1d0a 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -196,6 +196,7 @@ type flagConfig struct { enableAutoReload bool autoReloadInterval model.Duration + maxprocsEnable bool memlimitEnable bool memlimitRatio float64 @@ -203,7 +204,6 @@ type flagConfig struct { // These options are extracted from featureList // for ease of use. enablePerStepStats bool - enableAutoGOMAXPROCS bool enableConcurrentRuleEval bool prometheusURL string @@ -235,9 +235,6 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error { case "promql-per-step-stats": c.enablePerStepStats = true logger.Info("Experimental per-step statistics reporting") - case "auto-gomaxprocs": - c.enableAutoGOMAXPROCS = true - logger.Info("Automatically set GOMAXPROCS to match Linux container CPU quota") case "auto-reload-config": c.enableAutoReload = true if s := time.Duration(c.autoReloadInterval).Seconds(); s > 0 && s < 1 { @@ -330,6 +327,8 @@ func main() { a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated."). Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses) + a.Flag("auto-gomaxprocs", "Automatically set GOMAXPROCS to match Linux container CPU quota"). + Default("true").BoolVar(&cfg.maxprocsEnable) a.Flag("auto-gomemlimit", "Automatically set GOMEMLIMIT to match Linux container or system memory limit"). Default("true").BoolVar(&cfg.memlimitEnable) a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory"). @@ -766,7 +765,7 @@ func main() { ruleManager *rules.Manager ) - if cfg.enableAutoGOMAXPROCS { + if cfg.maxprocsEnable { l := func(format string, a ...interface{}) { logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs") } diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 9f5400c414..dd207dc382 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -17,6 +17,7 @@ The Prometheus monitoring server | --config.file | Prometheus configuration file path. | `prometheus.yml` | | --config.auto-reload-interval | Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes. | `30s` | | --web.listen-address ... | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` | +| --auto-gomaxprocs | Automatically set GOMAXPROCS to match Linux container CPU quota | `true` | | --auto-gomemlimit | Automatically set GOMEMLIMIT to match Linux container or system memory limit | `true` | | --auto-gomemlimit.ratio | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` | | --web.config.file | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 0541961f26..8c0e319f9c 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -47,12 +47,6 @@ statistics. Currently this is limited to totalQueryableSamples. When disabled in either the engine or the query, per-step statistics are not computed at all. -## Auto GOMAXPROCS - -`--enable-feature=auto-gomaxprocs` - -When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota. - ## Native Histograms `--enable-feature=native-histograms` From 0f0deb77a2f15383914ad60a3309281adb7e8b27 Mon Sep 17 00:00:00 2001 From: Ben Kochie Date: Mon, 11 Nov 2024 18:26:38 +0100 Subject: [PATCH 0909/1397] Enable auto-gomemlimit by default (#15372) Enable the `auto-gomemlimit` feature flag by default. * Add command line flag `--no-auto-gomemlimit` to disable. Signed-off-by: SuperQ --- cmd/prometheus/main.go | 16 ++++++++-------- docs/command-line/prometheus.md | 3 ++- docs/feature_flags.md | 8 -------- 3 files changed, 10 insertions(+), 17 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index ecf179ce59..974cd7edb2 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -195,13 +195,14 @@ type flagConfig struct { enableAutoReload bool autoReloadInterval model.Duration - featureList []string - memlimitRatio float64 + memlimitEnable bool + memlimitRatio float64 + + featureList []string // These options are extracted from featureList // for ease of use. enablePerStepStats bool enableAutoGOMAXPROCS bool - enableAutoGOMEMLIMIT bool enableConcurrentRuleEval bool prometheusURL string @@ -242,9 +243,6 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error { c.autoReloadInterval, _ = model.ParseDuration("1s") } logger.Info("Enabled automatic configuration file reloading. Checking for configuration changes every", "interval", c.autoReloadInterval) - case "auto-gomemlimit": - c.enableAutoGOMEMLIMIT = true - logger.Info("Automatically set GOMEMLIMIT to match Linux container or system memory limit") case "concurrent-rule-eval": c.enableConcurrentRuleEval = true logger.Info("Experimental concurrent rule evaluation enabled.") @@ -331,6 +329,8 @@ func main() { a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated."). Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses) + a.Flag("auto-gomemlimit", "Automatically set GOMEMLIMIT to match Linux container or system memory limit"). + Default("true").BoolVar(&cfg.memlimitEnable) a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory"). Default("0.9").FloatVar(&cfg.memlimitRatio) @@ -514,7 +514,7 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) @@ -765,7 +765,7 @@ func main() { } } - if cfg.enableAutoGOMEMLIMIT { + if cfg.memlimitEnable { if _, err := memlimit.SetGoMemLimitWithOpts( memlimit.WithRatio(cfg.memlimitRatio), memlimit.WithProvider( diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index a179a2f9f1..9f5400c414 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -17,6 +17,7 @@ The Prometheus monitoring server | --config.file | Prometheus configuration file path. | `prometheus.yml` | | --config.auto-reload-interval | Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes. | `30s` | | --web.listen-address ... | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` | +| --auto-gomemlimit | Automatically set GOMEMLIMIT to match Linux container or system memory limit | `true` | | --auto-gomemlimit.ratio | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` | | --web.config.file | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | | --web.read-timeout | Maximum duration before timing out read of the request, and closing idle connections. | `5m` | @@ -58,7 +59,7 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature ... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature ... | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | --agent | Run Prometheus in 'Agent mode'. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 4be11ed472..0541961f26 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -53,14 +53,6 @@ computed at all. When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota. -## Auto GOMEMLIMIT - -`--enable-feature=auto-gomemlimit` - -When enabled, the GOMEMLIMIT variable is automatically set to match the Linux container memory limit. If there is no container limit, or the process is running outside of containers, the system memory total is used. - -There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus. - ## Native Histograms `--enable-feature=native-histograms` From ad8138a65d3fcbf860e48b03edbf38ed4c066a03 Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Mon, 11 Nov 2024 16:36:58 +0000 Subject: [PATCH 0910/1397] Add newlines before code blocks Signed-off-by: Fiona Liao --- docs/migration.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/migration.md b/docs/migration.md index 8a78ee63ab..c3d16472b7 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -143,6 +143,7 @@ scrape_configs: ### Log message format Prometheus v3 has adopted `log/slog` over the previous `go-kit/log`. This results in a change of log message format. An example of the old log format is: + ``` ts=2024-10-23T22:01:06.074Z caller=main.go:627 level=info msg="No time or size retention was set so using the default time retention" duration=15d ts=2024-10-23T22:01:06.074Z caller=main.go:671 level=info msg="Starting Prometheus Server" mode=server version="(version=, branch=, revision=91d80252c3e528728b0f88d254dd720f6be07cb8-modified)" @@ -151,6 +152,7 @@ ts=2024-10-23T22:01:06.074Z caller=main.go:677 level=info host_details="(Linux 5 ``` a similar sequence in the new log format looks like this: + ``` time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:640 msg="No time or size retention was set so using the default time retention" duration=15d time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:681 msg="Starting Prometheus Server" mode=server version="(version=, branch=, revision=7c7116fea8343795cae6da42960cacd0207a2af8)" From 962a3cd6bec885a96de5efe65ea2cfca53b54f80 Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Mon, 11 Nov 2024 16:39:46 +0000 Subject: [PATCH 0911/1397] Add backticks to fallback_scrape_protocol Signed-off-by: Fiona Liao --- docs/migration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/migration.md b/docs/migration.md index c3d16472b7..f01555675f 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -95,7 +95,7 @@ header was unparsable or unrecognised. This could lead to incorrect data being parsed in the scrape. Prometheus v3 will now fail the scrape in such cases. If a scrape target is not providing the correct Content-Type header the -fallback protocol can be specified using the fallback_scrape_protocol +fallback protocol can be specified using the `fallback_scrape_protocol` parameter. See [Prometheus scrape_config documentation.](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) This is a breaking change as scrapes that may have succeeded with Prometheus v2 From b2fd5b7e45df6264b535fcdcb5fe7ee9ad094022 Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Mon, 11 Nov 2024 16:45:13 +0000 Subject: [PATCH 0912/1397] Format PromQL section, replace table Signed-off-by: Fiona Liao --- docs/migration.md | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/docs/migration.md b/docs/migration.md index f01555675f..4d40a54814 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -54,17 +54,13 @@ This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0 - The `.` pattern in regular expressions in PromQL matches newline characters. With this change a regular expressions like `.*` matches strings that include - `\n`. This applies to matchers in queries and relabel configs. For example the - following regular expressions now match the accompanying strings, whereas in - Prometheus v2 these combinations didn't match. - - | Regex | Additional matches | - | ----- | ------ | - | ".*" | "foo\n", "Foo\nBar" | - | "foo.?bar" | "foo\nbar" | - | "foo.+bar" | "foo\nbar" | - - If you want Prometheus v3 to behave like v2 did, you will have to change your + `\n`. This applies to matchers in queries and relabel configs. + - For example, the following regular expressions now match the accompanying + strings, whereas in Prometheus v2 these combinations didn't match. + - `.*` additionally matches `foo\n` and `Foo\nBar` + - `foo.?bar` additionally matches `foo\nbar` + - `foo.+bar` additionally matches `foo\nbar` + - If you want Prometheus v3 to behave like v2, you will have to change your regular expressions by replacing all `.` patterns with `[^\n]`, e.g. `foo[^\n]*`. - Lookback and range selectors are left open and right closed (previously left @@ -73,11 +69,11 @@ This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0 timeseries with evenly spaced samples exactly 1 minute apart. Before Prometheus v3, a range query with `5m` would usually return 5 samples. But if the query evaluation aligns perfectly with a scrape, it would return 6 samples. In - Prometheus v3 queries like this will always return 5 samples. + Prometheus v3 queries like this will always return 5 samples. This change has likely few effects for everyday use, except for some subquery - use cases. + use cases. Query front-ends that align queries usually align subqueries to multiples of - the step size. These subqueries will likely be affected. + the step size. These subqueries will likely be affected. Tests are more likely to affected. To fix those either adjust the expected number of samples or extend the range by less than one sample interval. - The `holt_winters` function has been renamed to `double_exponential_smoothing` From 67764d7cfd3b192865285c3f6b11944829cc00ba Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Mon, 11 Nov 2024 17:39:33 +0000 Subject: [PATCH 0913/1397] Better spacing for flags Signed-off-by: Fiona Liao --- docs/migration.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/migration.md b/docs/migration.md index 4d40a54814..f98e7d0cb1 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -18,19 +18,19 @@ This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0 - `remote-write-receiver` - `new-service-discovery-manager` - `expand-external-labels` - Environment variable references `${var}` or `$var` in external label values + - Environment variable references `${var}` or `$var` in external label values are replaced according to the values of the current environment variables. - References to undefined variables are replaced by the empty string. + - References to undefined variables are replaced by the empty string. The `$` character can be escaped by using `$$`. - `no-default-scrape-port` - Prometheus v3 will no longer add ports to scrape targets according to the + - Prometheus v3 will no longer add ports to scrape targets according to the specified scheme. Target will now appear in labels as configured. - If you rely on scrape targets like - `https://example.com/metrics` or `http://exmaple.com/metrics` to be - represented as `https://example.com/metrics:443` and - `http://example.com/metrics:80` respectively, add them to your target URLs + - If you rely on scrape targets like + `https://example.com/metrics` or `http://exmaple.com/metrics` to be + represented as `https://example.com/metrics:443` and + `http://example.com/metrics:80` respectively, add them to your target URLs - `agent` - Instead use the dedicated `--agent` CLI flag. + - Instead use the dedicated `--agent` CLI flag. Prometheus v3 will log a warning if you continue to pass these to `--enable-feature`. From 5450e6d368e32e04f70f5a9d05e38796084eb7df Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 11 Nov 2024 18:41:30 +0000 Subject: [PATCH 0914/1397] [BUGFIX] TSDB: Apply fixes from loadWAL to loadWBL Move a couple of variables inside the scope of a goroutine, to avoid data races. Use `zeropool` to reduce garbage and avoid some lint warnings. Signed-off-by: Bryan Boreham --- tsdb/head_wal.go | 45 +++++++++++++++------------------------------ 1 file changed, 15 insertions(+), 30 deletions(-) diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 8103926dc6..6744d582ae 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -656,32 +656,15 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch concurrency = h.opts.WALReplayConcurrency processors = make([]wblSubsetProcessor, concurrency) - dec record.Decoder shards = make([][]record.RefSample, concurrency) histogramShards = make([][]histogramRecord, concurrency) - decodedCh = make(chan interface{}, 10) - decodeErr error - samplesPool = sync.Pool{ - New: func() interface{} { - return []record.RefSample{} - }, - } - markersPool = sync.Pool{ - New: func() interface{} { - return []record.RefMmapMarker{} - }, - } - histogramSamplesPool = sync.Pool{ - New: func() interface{} { - return []record.RefHistogramSample{} - }, - } - floatHistogramSamplesPool = sync.Pool{ - New: func() interface{} { - return []record.RefFloatHistogramSample{} - }, - } + decodedCh = make(chan interface{}, 10) + decodeErr error + samplesPool zeropool.Pool[[]record.RefSample] + markersPool zeropool.Pool[[]record.RefMmapMarker] + histogramSamplesPool zeropool.Pool[[]record.RefHistogramSample] + floatHistogramSamplesPool zeropool.Pool[[]record.RefFloatHistogramSample] ) defer func() { @@ -711,11 +694,13 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch go func() { defer close(decodedCh) + var err error + dec := record.NewDecoder(syms) for r.Next() { rec := r.Record() switch dec.Type(rec) { case record.Samples: - samples := samplesPool.Get().([]record.RefSample)[:0] + samples := samplesPool.Get()[:0] samples, err = dec.Samples(rec, samples) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -727,7 +712,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decodedCh <- samples case record.MmapMarkers: - markers := markersPool.Get().([]record.RefMmapMarker)[:0] + markers := markersPool.Get()[:0] markers, err = dec.MmapMarkers(rec, markers) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -739,7 +724,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decodedCh <- markers case record.HistogramSamples: - hists := histogramSamplesPool.Get().([]record.RefHistogramSample)[:0] + hists := histogramSamplesPool.Get()[:0] hists, err = dec.HistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -751,7 +736,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decodedCh <- hists case record.FloatHistogramSamples: - hists := floatHistogramSamplesPool.Get().([]record.RefFloatHistogramSample)[:0] + hists := floatHistogramSamplesPool.Get()[:0] hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -802,7 +787,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } samples = samples[m:] } - samplesPool.Put(d) + samplesPool.Put(v) case []record.RefMmapMarker: markers := v for _, rm := range markers { @@ -857,7 +842,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } samples = samples[m:] } - histogramSamplesPool.Put(v) //nolint:staticcheck + histogramSamplesPool.Put(v) case []record.RefFloatHistogramSample: samples := v // We split up the samples into chunks of 5000 samples or less. @@ -889,7 +874,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } samples = samples[m:] } - floatHistogramSamplesPool.Put(v) //nolint:staticcheck + floatHistogramSamplesPool.Put(v) default: panic(fmt.Errorf("unexpected decodedCh type: %T", d)) } From 99cbdc113cb89727b46e8decd4a6f1b4e753d2d8 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Mon, 11 Nov 2024 19:32:48 +0000 Subject: [PATCH 0915/1397] Update migration.md for TSDB storage upgrade Signed-off-by: Bartlomiej Plotka --- docs/migration.md | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/migration.md b/docs/migration.md index 8a78ee63ab..7caa9449a1 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -104,19 +104,25 @@ may now fail if this fallback protocol is not specified. ## Miscellaneous ### TSDB format and downgrade -The TSDB format has been changed in Prometheus v2.55 in preparation for changes + +The TSDB format has been changed slightly in Prometheus v2.55 in preparation for changes to the index format. Consequently, a Prometheus v3 TSDB can only be read by a -Prometheus v2.55 or newer. -Before upgrading to Prometheus v3 please upgrade to v2.55 first and confirm -Prometheus works as expected. Only then continue with the upgrade to v3. +Prometheus v2.55 or newer. Keep that in mind when upgrading to v3 -- you will be only +able to downgrade to v2.55, not lower, without loosing your TSDB persitent data. + +As an extra safety measure, you could optionally consider upgrading to v2.55 first and +confirm Prometheus works as expected, before upgrading to v3. ### TSDB storage contract + TSDB compatible storage is now expected to return results matching the specified selectors. This might impact some third party implementations, most likely implementing `remote_read`. + This contract is not explicitly enforced, but can cause undefined behavior. ### UTF-8 names + Prometheus v3 supports UTF-8 in metric and label names. This means metric and label names can change after upgrading according to what is exposed by endpoints. Furthermore, metric and label names that would have previously been From 55e34c8c5f1bfa5320aa965db883508f40a67147 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Fri, 8 Nov 2024 10:02:48 -0300 Subject: [PATCH 0916/1397] Allow UTF-8 characters in metric and label names as opt-in feature (#15258) * Allow UTF-8 characters in metric and label names as opt-in feature --------- Signed-off-by: Arthur Silva Sens --- config/config.go | 29 ++- config/config_test.go | 62 +++++++ config/testdata/otlp_allow_utf8.bad.yml | 4 + config/testdata/otlp_allow_utf8.good.yml | 2 + .../testdata/otlp_allow_utf8.incompatible.yml | 4 + documentation/examples/prometheus-otlp.yml | 31 ++++ .../prometheus/helpers_from_stdlib.go | 106 +++++++++++ .../prometheus/normalize_label.go | 4 +- .../prometheus/normalize_label_test.go | 27 +-- .../prometheus/normalize_name.go | 60 ++++--- .../prometheus/normalize_name_test.go | 165 ++++++++++-------- .../prometheusremotewrite/helper.go | 6 +- .../prometheusremotewrite/histograms_test.go | 2 +- .../prometheusremotewrite/metrics_to_prw.go | 3 +- .../otlp_to_openmetrics_metadata.go | 4 +- storage/remote/write_handler.go | 1 + 16 files changed, 396 insertions(+), 114 deletions(-) create mode 100644 config/testdata/otlp_allow_utf8.bad.yml create mode 100644 config/testdata/otlp_allow_utf8.good.yml create mode 100644 config/testdata/otlp_allow_utf8.incompatible.yml create mode 100644 documentation/examples/prometheus-otlp.yml create mode 100644 storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go diff --git a/config/config.go b/config/config.go index 30a74e0402..bc73f98d59 100644 --- a/config/config.go +++ b/config/config.go @@ -106,6 +106,18 @@ func Load(s string, logger *slog.Logger) (*Config, error) { if !b.Labels().IsEmpty() { cfg.GlobalConfig.ExternalLabels = b.Labels() } + + switch cfg.OTLPConfig.TranslationStrategy { + case UnderscoreEscapingWithSuffixes: + case "": + case NoUTF8EscapingWithSuffixes: + if cfg.GlobalConfig.MetricNameValidationScheme == LegacyValidationConfig { + return nil, errors.New("OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled") + } + default: + return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy) + } + return cfg, nil } @@ -239,7 +251,9 @@ var ( } // DefaultOTLPConfig is the default OTLP configuration. - DefaultOTLPConfig = OTLPConfig{} + DefaultOTLPConfig = OTLPConfig{ + TranslationStrategy: UnderscoreEscapingWithSuffixes, + } ) // Config is the top-level configuration for Prometheus's config files. @@ -1402,9 +1416,20 @@ func getGoGCEnv() int { return DefaultRuntimeConfig.GoGC } +type translationStrategyOption string + +var ( + // NoUTF8EscapingWithSuffixes will keep UTF-8 characters as they are, units and type suffixes will still be added. + NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes" + // UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus. + // This option will translate all UTF-8 characters to underscores, while adding units and type suffixes. + UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes" +) + // OTLPConfig is the configuration for writing to the OTLP endpoint. type OTLPConfig struct { - PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` + PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` + TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/config/config_test.go b/config/config_test.go index c3148f93a7..77cbf9b2eb 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -168,6 +168,7 @@ var expectedConf = &Config{ PromoteResourceAttributes: []string{ "k8s.cluster.name", "k8s.job.name", "k8s.namespace.name", }, + TranslationStrategy: UnderscoreEscapingWithSuffixes, }, RemoteReadConfigs: []*RemoteReadConfig{ @@ -1553,6 +1554,67 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) { }) } +func TestOTLPAllowUTF8(t *testing.T) { + t.Run("good config", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml") + verify := func(t *testing.T, conf *Config, err error) { + t.Helper() + require.NoError(t, err) + require.Equal(t, NoUTF8EscapingWithSuffixes, conf.OTLPConfig.TranslationStrategy) + } + + t.Run("LoadFile", func(t *testing.T) { + conf, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, conf, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + conf, err := Load(string(content), promslog.NewNopLogger()) + verify(t, conf, err) + }) + }) + + t.Run("incompatible config", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_allow_utf8.incompatible.yml") + verify := func(t *testing.T, err error) { + t.Helper() + require.ErrorContains(t, err, `OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled`) + } + + t.Run("LoadFile", func(t *testing.T) { + _, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + _, err = Load(string(content), promslog.NewNopLogger()) + t.Log("err", err) + verify(t, err) + }) + }) + + t.Run("bad config", func(t *testing.T) { + fpath := filepath.Join("testdata", "otlp_allow_utf8.bad.yml") + verify := func(t *testing.T, err error) { + t.Helper() + require.ErrorContains(t, err, `unsupported OTLP translation strategy "Invalid"`) + } + + t.Run("LoadFile", func(t *testing.T) { + _, err := LoadFile(fpath, false, promslog.NewNopLogger()) + verify(t, err) + }) + t.Run("Load", func(t *testing.T) { + content, err := os.ReadFile(fpath) + require.NoError(t, err) + _, err = Load(string(content), promslog.NewNopLogger()) + verify(t, err) + }) + }) +} + func TestLoadConfig(t *testing.T) { // Parse a valid file that sets a global scrape timeout. This tests whether parsing // an overwritten default field in the global config permanently changes the default. diff --git a/config/testdata/otlp_allow_utf8.bad.yml b/config/testdata/otlp_allow_utf8.bad.yml new file mode 100644 index 0000000000..488e4b0558 --- /dev/null +++ b/config/testdata/otlp_allow_utf8.bad.yml @@ -0,0 +1,4 @@ +global: + metric_name_validation_scheme: legacy +otlp: + translation_strategy: Invalid diff --git a/config/testdata/otlp_allow_utf8.good.yml b/config/testdata/otlp_allow_utf8.good.yml new file mode 100644 index 0000000000..f3069d2fdd --- /dev/null +++ b/config/testdata/otlp_allow_utf8.good.yml @@ -0,0 +1,2 @@ +otlp: + translation_strategy: NoUTF8EscapingWithSuffixes diff --git a/config/testdata/otlp_allow_utf8.incompatible.yml b/config/testdata/otlp_allow_utf8.incompatible.yml new file mode 100644 index 0000000000..2625c24131 --- /dev/null +++ b/config/testdata/otlp_allow_utf8.incompatible.yml @@ -0,0 +1,4 @@ +global: + metric_name_validation_scheme: legacy +otlp: + translation_strategy: NoUTF8EscapingWithSuffixes diff --git a/documentation/examples/prometheus-otlp.yml b/documentation/examples/prometheus-otlp.yml new file mode 100644 index 0000000000..f0a8ab8b11 --- /dev/null +++ b/documentation/examples/prometheus-otlp.yml @@ -0,0 +1,31 @@ +# my global config +global: + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + +otlp: + # Recommended attributes to be promoted to labels. + promote_resource_attributes: + - service.instance.id + - service.name + - service.namespace + - cloud.availability_zone + - cloud.region + - container.name + - deployment.environment.name + - k8s.cluster.name + - k8s.container.name + - k8s.cronjob.name + - k8s.daemonset.name + - k8s.deployment.name + - k8s.job.name + - k8s.namespace.name + - k8s.pod.name + - k8s.replicaset.name + - k8s.statefulset.name + # Ingest OTLP data keeping UTF-8 characters in metric/label names. + translation_strategy: NoUTF8EscapingWithSuffixes + +storage: + # OTLP is a push-based protocol, Out of order samples is a common scenario. + tsdb: + out_of_order_time_window: 30m diff --git a/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go b/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go new file mode 100644 index 0000000000..cb9257d073 --- /dev/null +++ b/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go @@ -0,0 +1,106 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/golang/go/blob/f2d118fd5f7e872804a5825ce29797f81a28b0fa/src/strings/strings.go +// Provenance-includes-license: BSD-3-Clause +// Provenance-includes-copyright: Copyright The Go Authors. + +package prometheus + +import "strings" + +// fieldsFunc is a copy of strings.FieldsFunc from the Go standard library, +// but it also returns the separators as part of the result. +func fieldsFunc(s string, f func(rune) bool) ([]string, []string) { + // A span is used to record a slice of s of the form s[start:end]. + // The start index is inclusive and the end index is exclusive. + type span struct { + start int + end int + } + spans := make([]span, 0, 32) + separators := make([]string, 0, 32) + + // Find the field start and end indices. + // Doing this in a separate pass (rather than slicing the string s + // and collecting the result substrings right away) is significantly + // more efficient, possibly due to cache effects. + start := -1 // valid span start if >= 0 + for end, rune := range s { + if f(rune) { + if start >= 0 { + spans = append(spans, span{start, end}) + // Set start to a negative value. + // Note: using -1 here consistently and reproducibly + // slows down this code by a several percent on amd64. + start = ^start + separators = append(separators, string(s[end])) + } + } else { + if start < 0 { + start = end + } + } + } + + // Last field might end at EOF. + if start >= 0 { + spans = append(spans, span{start, len(s)}) + } + + // Create strings from recorded field indices. + a := make([]string, len(spans)) + for i, span := range spans { + a[i] = s[span.start:span.end] + } + + return a, separators +} + +// join is a copy of strings.Join from the Go standard library, +// but it also accepts a slice of separators to join the elements with. +// If the slice of separators is shorter than the slice of elements, use a default value. +// We also don't check for integer overflow. +func join(elems []string, separators []string, def string) string { + switch len(elems) { + case 0: + return "" + case 1: + return elems[0] + } + + var n int + var sep string + sepLen := len(separators) + for i, elem := range elems { + if i >= sepLen { + sep = def + } else { + sep = separators[i] + } + n += len(sep) + len(elem) + } + + var b strings.Builder + b.Grow(n) + b.WriteString(elems[0]) + for i, s := range elems[1:] { + if i >= sepLen { + sep = def + } else { + sep = separators[i] + } + b.WriteString(sep) + b.WriteString(s) + } + return b.String() +} diff --git a/storage/remote/otlptranslator/prometheus/normalize_label.go b/storage/remote/otlptranslator/prometheus/normalize_label.go index d5de2c7651..b928e6888d 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label.go @@ -29,9 +29,9 @@ import ( // // Labels that start with non-letter rune will be prefixed with "key_". // An exception is made for double-underscores which are allowed. -func NormalizeLabel(label string) string { +func NormalizeLabel(label string, allowUTF8 bool) string { // Trivial case - if len(label) == 0 { + if len(label) == 0 || allowUTF8 { return label } diff --git a/storage/remote/otlptranslator/prometheus/normalize_label_test.go b/storage/remote/otlptranslator/prometheus/normalize_label_test.go index 21d4d6a6d8..19ab6cd173 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label_test.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label_test.go @@ -22,24 +22,27 @@ import ( func TestNormalizeLabel(t *testing.T) { tests := []struct { - label string - expected string + label string + expected string + expectedUTF8 string }{ - {"", ""}, - {"label:with:colons", "label_with_colons"}, // Without UTF-8 support, colons are only allowed in metric names - {"LabelWithCapitalLetters", "LabelWithCapitalLetters"}, - {"label!with&special$chars)", "label_with_special_chars_"}, - {"label_with_foreign_characters_字符", "label_with_foreign_characters___"}, - {"label.with.dots", "label_with_dots"}, - {"123label", "key_123label"}, - {"_label_starting_with_underscore", "key_label_starting_with_underscore"}, - {"__label_starting_with_2underscores", "__label_starting_with_2underscores"}, + {"", "", ""}, + {"label:with:colons", "label_with_colons", "label:with:colons"}, // Without UTF-8 support, colons are only allowed in metric names + {"LabelWithCapitalLetters", "LabelWithCapitalLetters", "LabelWithCapitalLetters"}, + {"label!with&special$chars)", "label_with_special_chars_", "label!with&special$chars)"}, + {"label_with_foreign_characters_字符", "label_with_foreign_characters___", "label_with_foreign_characters_字符"}, + {"label.with.dots", "label_with_dots", "label.with.dots"}, + {"123label", "key_123label", "123label"}, + {"_label_starting_with_underscore", "key_label_starting_with_underscore", "_label_starting_with_underscore"}, + {"__label_starting_with_2underscores", "__label_starting_with_2underscores", "__label_starting_with_2underscores"}, } for i, test := range tests { t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) { - result := NormalizeLabel(test.label) + result := NormalizeLabel(test.label, false) require.Equal(t, test.expected, result) + uTF8result := NormalizeLabel(test.label, true) + require.Equal(t, test.expectedUTF8, uTF8result) }) } } diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index 0119b64dff..335705aa8d 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -88,27 +88,32 @@ var perUnitMap = map[string]string{ // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels, // https://prometheus.io/docs/practices/naming/#metric-and-label-naming // and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. -func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { +func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes, allowUTF8 bool) string { // Full normalization following standard Prometheus naming conventions if addMetricSuffixes { - return normalizeName(metric, namespace) + return normalizeName(metric, namespace, allowUTF8) } - // Regexp for metric name characters that should be replaced with _. - invalidMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:_]`) + var metricName string + if !allowUTF8 { + // Regexp for metric name characters that should be replaced with _. + invalidMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:_]`) - // Simple case (no full normalization, no units, etc.). - metricName := strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool { - return invalidMetricCharRE.MatchString(string(r)) - }), "_") + // Simple case (no full normalization, no units, etc.). + metricName = strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool { + return invalidMetricCharRE.MatchString(string(r)) + }), "_") + } else { + metricName = metric.Name() + } // Namespace? if namespace != "" { return namespace + "_" + metricName } - // Metric name starts with a digit? Prefix it with an underscore. - if metricName != "" && unicode.IsDigit(rune(metricName[0])) { + // Metric name starts with a digit and utf8 not allowed? Prefix it with an underscore. + if metricName != "" && unicode.IsDigit(rune(metricName[0])) && !allowUTF8 { metricName = "_" + metricName } @@ -116,17 +121,18 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix } // Build a normalized name for the specified metric. -func normalizeName(metric pmetric.Metric, namespace string) string { - // Regexp for characters that can't be in a metric name token. - nonTokenMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:]`) - +func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) string { + var translationFunc func(rune) bool + if !allowUTF8 { + nonTokenMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:]`) + translationFunc = func(r rune) bool { return nonTokenMetricCharRE.MatchString(string(r)) } + } else { + translationFunc = func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != ':' } + } // Split metric name into "tokens" (of supported metric name runes). // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. - nameTokens := strings.FieldsFunc( - metric.Name(), - func(r rune) bool { return nonTokenMetricCharRE.MatchString(string(r)) }, - ) + nameTokens, separators := fieldsFunc(metric.Name(), translationFunc) // Split unit at the '/' if any unitTokens := strings.SplitN(metric.Unit(), "/", 2) @@ -137,7 +143,10 @@ func normalizeName(metric pmetric.Metric, namespace string) string { var mainUnitProm, perUnitProm string mainUnitOTel := strings.TrimSpace(unitTokens[0]) if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") { - mainUnitProm = cleanUpUnit(unitMapGetOrDefault(mainUnitOTel)) + mainUnitProm = unitMapGetOrDefault(mainUnitOTel) + if !allowUTF8 { + mainUnitProm = cleanUpUnit(mainUnitProm) + } if slices.Contains(nameTokens, mainUnitProm) { mainUnitProm = "" } @@ -148,7 +157,10 @@ func normalizeName(metric pmetric.Metric, namespace string) string { if len(unitTokens) > 1 && unitTokens[1] != "" { perUnitOTel := strings.TrimSpace(unitTokens[1]) if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") { - perUnitProm = cleanUpUnit(perUnitMapGetOrDefault(perUnitOTel)) + perUnitProm = perUnitMapGetOrDefault(perUnitOTel) + if !allowUTF8 { + perUnitProm = cleanUpUnit(perUnitProm) + } } if perUnitProm != "" { perUnitProm = "per_" + perUnitProm @@ -189,8 +201,12 @@ func normalizeName(metric pmetric.Metric, namespace string) string { nameTokens = append([]string{namespace}, nameTokens...) } - // Build the string from the tokens, separated with underscores - normalizedName := strings.Join(nameTokens, "_") + // Build the string from the tokens + separators. + // If UTF-8 isn't allowed, we'll use underscores as separators. + if !allowUTF8 { + separators = []string{} + } + normalizedName := join(nameTokens, separators, "_") // Metric name cannot start with a digit, so prefix it with "_" in this case if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) { diff --git a/storage/remote/otlptranslator/prometheus/normalize_name_test.go b/storage/remote/otlptranslator/prometheus/normalize_name_test.go index 2d5648e84c..d97e7a560a 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name_test.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name_test.go @@ -25,92 +25,119 @@ import ( ) func TestByte(t *testing.T) { - require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), "")) + require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), "", false)) } func TestByteCounter(t *testing.T) { - require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), "")) - require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), "")) + require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), "", false)) + require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), "", false)) } func TestWhiteSpaces(t *testing.T) { - require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), "")) + require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), "", false)) } func TestNonStandardUnit(t *testing.T) { - require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "")) + require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "", false)) } func TestNonStandardUnitCounter(t *testing.T) { - require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), "")) + require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), "", false)) } func TestBrokenUnit(t *testing.T) { - require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), "")) - require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), "")) + require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), "", false)) + require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), "", false)) + require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), "", false)) } func TestBrokenUnitCounter(t *testing.T) { - require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), "")) + require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), "", false)) + require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), "", false)) + require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), "", false)) } func TestRatio(t *testing.T) { - require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), "")) - require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), "")) - require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), "")) + require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), "", false)) + require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), "", false)) + require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), "", false)) } func TestHertz(t *testing.T) { - require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), "")) + require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), "", false)) } func TestPer(t *testing.T) { - require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "")) - require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "")) + require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "", false)) + require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "", false)) } func TestPercent(t *testing.T) { - require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), "")) - require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), "")) + require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), "", false)) + require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), "", false)) } func TestEmpty(t *testing.T) { - require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), "")) - require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), "")) + require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), "", false)) + require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), "", false)) } -func TestUnsupportedRunes(t *testing.T) { - require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "")) - require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "")) - require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "")) +func TestAllowUTF8(t *testing.T) { + t.Run("allow UTF8", func(t *testing.T) { + require.Equal(t, "unsupported.metric.temperature_°F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "", true)) + require.Equal(t, "unsupported.metric.weird_+=.:,!* & #", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "", true)) + require.Equal(t, "unsupported.metric.redundant___test $_per_°C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "", true)) + require.Equal(t, "metric_with_字符_foreign_characters_ど", normalizeName(createGauge("metric_with_字符_foreign_characters", "ど"), "", true)) + }) + t.Run("disallow UTF8", func(t *testing.T) { + require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "", false)) + require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "", false)) + require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "", false)) + require.Equal(t, "metric_with_foreign_characters", normalizeName(createGauge("metric_with_字符_foreign_characters", "ど"), "", false)) + }) +} + +func TestAllowUTF8KnownBugs(t *testing.T) { + // Due to historical reasons, the translator code was copied from OpenTelemetry collector codebase. + // Over there, they tried to provide means to translate metric names following Prometheus conventions that are documented here: + // https://prometheus.io/docs/practices/naming/ + // + // Althogh not explicitly said, it was implied that words should be separated by a single underscore and the codebase was written + // with that in mind. + // + // Now that we're allowing OTel users to have their original names stored in prometheus without any transformation, we're facing problems + // where two (or more) UTF-8 characters are being used to separate words. + // TODO(arthursens): Fix it! + + // We're asserting on 'NotEqual', which proves the bug. + require.NotEqual(t, "metric....split_=+by_//utf8characters", normalizeName(createGauge("metric....split_=+by_//utf8characters", ""), "", true)) + // Here we're asserting on 'Equal', showing the current behavior. + require.Equal(t, "metric.split_by_utf8characters", normalizeName(createGauge("metric....split_=+by_//utf8characters", ""), "", true)) } func TestOTelReceivers(t *testing.T) { - require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), "")) - require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "")) - require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "")) - require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), "")) - require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), "")) - require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), "")) - require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), "")) - require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), "")) - require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), "")) - require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), "")) - require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), "")) - require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), "")) - require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), "")) - require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), "")) - require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), "")) - require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), "")) - require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), "")) - require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), "")) - require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), "")) - require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), "")) - require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), "")) - require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "")) + require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), "", false)) + require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "", false)) + require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "", false)) + require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), "", false)) + require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), "", false)) + require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), "", false)) + require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), "", false)) + require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), "", false)) + require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), "", false)) + require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), "", false)) + require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), "", false)) + require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), "", false)) + require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), "", false)) + require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), "", false)) + require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), "", false)) + require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), "", false)) + require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), "", false)) + require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), "", false)) + require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), "", false)) + require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), "", false)) + require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), "", false)) + require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "", false)) } func TestTrimPromSuffixes(t *testing.T) { @@ -144,8 +171,8 @@ func TestTrimPromSuffixes(t *testing.T) { } func TestNamespace(t *testing.T) { - require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space")) - require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space")) + require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space", false)) + require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space", false)) } func TestCleanUpUnit(t *testing.T) { @@ -180,28 +207,28 @@ func TestRemoveItem(t *testing.T) { } func TestBuildCompliantNameWithSuffixes(t *testing.T) { - require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", true)) - require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true)) - require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true)) - require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true)) - require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true)) - require.Equal(t, ":foo::bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true)) + require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", true, false)) + require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true, false)) + require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true, false)) + require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true, false)) + require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true, false)) + require.Equal(t, ":foo::bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true, false)) // Gauges with unit 1 are considered ratios. - require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true)) + require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true, false)) // Slashes in units are converted. - require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantName(createCounter("system.io", "foo/bar"), "", true)) - require.Equal(t, "metric_with_foreign_characters_total", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", true)) + require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantName(createCounter("system.io", "foo/bar"), "", true, false)) + require.Equal(t, "metric_with_foreign_characters_total", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", true, false)) } func TestBuildCompliantNameWithoutSuffixes(t *testing.T) { - require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", false)) - require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", false)) - require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", false)) - require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", false)) - require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false)) - require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", false)) - require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false)) - require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false)) - require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false)) - require.Equal(t, "metric_with___foreign_characters", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", false)) + require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", false, false)) + require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", false, false)) + require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", false, false)) + require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", false, false)) + require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false, false)) + require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", false, false)) + require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false, false)) + require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false, false)) + require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false, false)) + require.Equal(t, "metric_with___foreign_characters", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", false, false)) } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index f7fede258b..30cfa86436 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -157,7 +157,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting // map ensures no duplicate label names. l := make(map[string]string, maxLabelCount) for _, label := range labels { - var finalKey = prometheustranslator.NormalizeLabel(label.Name) + var finalKey = prometheustranslator.NormalizeLabel(label.Name, settings.AllowUTF8) if existingValue, alreadyExists := l[finalKey]; alreadyExists { l[finalKey] = existingValue + ";" + label.Value } else { @@ -166,7 +166,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting } for _, lbl := range promotedAttrs { - normalized := prometheustranslator.NormalizeLabel(lbl.Name) + normalized := prometheustranslator.NormalizeLabel(lbl.Name, settings.AllowUTF8) if _, exists := l[normalized]; !exists { l[normalized] = lbl.Value } @@ -205,7 +205,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting } // internal labels should be maintained if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { - name = prometheustranslator.NormalizeLabel(name) + name = prometheustranslator.NormalizeLabel(name, settings.AllowUTF8) } l[name] = extras[i+1] } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go index 5fdd26ef29..dcd83b7f93 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go @@ -762,7 +762,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { Settings{ ExportCreatedMetric: true, }, - prometheustranslator.BuildCompliantName(metric, "", true), + prometheustranslator.BuildCompliantName(metric, "", true, true), ) require.NoError(t, err) require.Empty(t, annots) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 0afd2ad57e..4f8baf3100 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -38,6 +38,7 @@ type Settings struct { ExportCreatedMetric bool AddMetricSuffixes bool SendMetadata bool + AllowUTF8 bool PromoteResourceAttributes []string } @@ -84,7 +85,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric continue } - promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) + promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes, settings.AllowUTF8) // handle individual metrics based on type //exhaustive:enforce diff --git a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go index ba48704193..b423d2cc6e 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go @@ -43,7 +43,7 @@ func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) prompb.MetricMeta return prompb.MetricMetadata_UNKNOWN } -func OtelMetricsToMetadata(md pmetric.Metrics, addMetricSuffixes bool) []*prompb.MetricMetadata { +func OtelMetricsToMetadata(md pmetric.Metrics, addMetricSuffixes, allowUTF8 bool) []*prompb.MetricMetadata { resourceMetricsSlice := md.ResourceMetrics() metadataLength := 0 @@ -65,7 +65,7 @@ func OtelMetricsToMetadata(md pmetric.Metrics, addMetricSuffixes bool) []*prompb metric := scopeMetrics.Metrics().At(k) entry := prompb.MetricMetadata{ Type: otelMetricTypeToPromMetricType(metric), - MetricFamilyName: prometheustranslator.BuildCompliantName(metric, "", addMetricSuffixes), + MetricFamilyName: prometheustranslator.BuildCompliantName(metric, "", addMetricSuffixes, allowUTF8), Help: metric.Description(), } metadata = append(metadata, &entry) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 466673c99d..87102a374b 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -513,6 +513,7 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { converter := otlptranslator.NewPrometheusConverter() annots, err := converter.FromMetrics(r.Context(), req.Metrics(), otlptranslator.Settings{ AddMetricSuffixes: true, + AllowUTF8: otlpCfg.TranslationStrategy == config.NoUTF8EscapingWithSuffixes, PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, }) if err != nil { From e08794c53f8aae120833ca5bf8e50336465cf268 Mon Sep 17 00:00:00 2001 From: bwplotka Date: Mon, 11 Nov 2024 21:44:08 +0000 Subject: [PATCH 0917/1397] Added config entry. Signed-off-by: bwplotka --- docs/configuration/configuration.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 2093ed8836..2d1e4b1801 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -171,8 +171,17 @@ remote_write: [ - ... ] # Settings related to the OTLP receiver feature. +# See https://prometheus.io/docs/guides/opentelemetry/ for best practices. otlp: [ promote_resource_attributes: [, ...] | default = [ ] ] + # Configures translation of OTLP metrics when received through the OTLP metrics + # endpoint. Available values: + # - "UnderscoreEscapingWithSuffixes" refers to commonly agreed normalization used + # by OpenTelemetry in https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus + # - "NoUTF8EscapingWithSuffixes" is a mode that relies on UTF-8 support in Prometheus. + # It preserves all special characters like dots, but it still add required suffixes + # for units and _total like in UnderscoreEscapingWithSuffixes. + [ translation_strategy: | default = "UnderscoreEscapingWithSuffixes" ] # Settings related to the remote read feature. remote_read: From 3067d0bc2fedb199b442073e76ade24cdb16ddaf Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Mon, 11 Nov 2024 17:45:12 +0100 Subject: [PATCH 0918/1397] update CHANGELOG and migration guide Signed-off-by: Jan Fajerski --- CHANGELOG.md | 5 +++++ docs/migration.md | 8 ++++++++ 2 files changed, 13 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 43c0aa371e..8c634c1de5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,12 @@ ## unreleased +## 3.0.0-rc.1 / 2024-11-11 + +* [CHANGE] Set the `GOMAXPROCS` variable automatically to match the Linux CPU quota. Use `-no-auto-gomaxprocs` to disable it. The `auto-gomaxprocs` feature flag was removed. #15376 +* [CHANGE] Set the `GOMEMLIMIT` variable automatically to match the Linux container memory limit. Use `-no-auto-gomemlimit` to disable it. The `auto-gomemlimit` feature flag was removed. #15373 * [BUGFIX] Scraping: Don't log errors on empty scrapes. #15357 +* [BUGFIX] UI: fix selector / series formatting for empty metric names. #15341 ## 3.0.0-rc.0 / 2024-10-31 diff --git a/docs/migration.md b/docs/migration.md index 7caa9449a1..b3e7ce4310 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -31,6 +31,14 @@ This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0 `http://example.com/metrics:80` respectively, add them to your target URLs - `agent` Instead use the dedicated `--agent` CLI flag. + - `auto-gomemlimit` + Prometheus v3 will automatically set `GOMEMLIMIT` to match the Linux + container memory limit. If there is no container limit, or the process is + running outside of containers, the system memory total is used. To disable + this, `--no-auto-gomemlimit` is available. + - `auto-gomaxprocs` + Prometheus v3 will automatically set `GOMAXPROCS` to match the Linux + container CPU quota. To disable this, `--no-auto-gomaxprocs` is available. Prometheus v3 will log a warning if you continue to pass these to `--enable-feature`. From 60489308a1c66299183ab12d32da0bb6a44279b4 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Mon, 11 Nov 2024 17:46:08 +0100 Subject: [PATCH 0919/1397] Prepare v3.0.0-rc.1 Signed-off-by: Jan Fajerski --- VERSION | 2 +- web/ui/mantine-ui/package.json | 4 ++-- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/package.json | 2 +- web/ui/react-app/package-lock.json | 4 ++-- web/ui/react-app/package.json | 2 +- 8 files changed, 17 insertions(+), 17 deletions(-) diff --git a/VERSION b/VERSION index 3c6eac305d..8443074c0f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.0.0-rc.0 +3.0.0-rc.1 diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 679685a50f..49ee44a499 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,7 +1,7 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "0.300.0-rc.0", + "version": "0.300.0-rc.1", "type": "module", "scripts": { "start": "vite", @@ -28,7 +28,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.300.0-rc.0", + "@prometheus-io/codemirror-promql": "0.300.0-rc.1", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.59.0", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index feb75d73fe..8e9be9b760 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.300.0-rc.0", + "version": "0.300.0-rc.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.300.0-rc.0", + "@prometheus-io/lezer-promql": "0.300.0-rc.1", "lru-cache": "^11.0.1" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 49e54c40af..ef3018964f 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.300.0-rc.0", + "version": "0.300.0-rc.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 242179a083..feee0d6a31 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.300.0-rc.0", + "version": "0.300.0-rc.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.300.0-rc.0", + "version": "0.300.0-rc.1", "workspaces": [ "mantine-ui", "module/*" @@ -24,7 +24,7 @@ }, "mantine-ui": { "name": "@prometheus-io/mantine-ui", - "version": "0.300.0-rc.0", + "version": "0.300.0-rc.1", "dependencies": { "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.2", @@ -42,7 +42,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.300.0-rc.0", + "@prometheus-io/codemirror-promql": "0.300.0-rc.1", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.59.0", @@ -155,10 +155,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.300.0-rc.0", + "version": "0.300.0-rc.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.300.0-rc.0", + "@prometheus-io/lezer-promql": "0.300.0-rc.1", "lru-cache": "^11.0.1" }, "devDependencies": { @@ -188,7 +188,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.300.0-rc.0", + "version": "0.300.0-rc.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.1", diff --git a/web/ui/package.json b/web/ui/package.json index ab23c6363b..b9a6bc9ac9 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,7 +1,7 @@ { "name": "prometheus-io", "description": "Monorepo for the Prometheus UI", - "version": "0.300.0-rc.0", + "version": "0.300.0-rc.1", "private": true, "scripts": { "build": "bash build_ui.sh --all", diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index a4c5af6ce9..ce18939428 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -1,12 +1,12 @@ { "name": "@prometheus-io/app", - "version": "0.300.0-rc.0", + "version": "0.300.0-rc.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@prometheus-io/app", - "version": "0.300.0-rc.0", + "version": "0.300.0-rc.1", "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 9711a09e83..c61358e1de 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.300.0-rc.0", + "version": "0.300.0-rc.1", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.17.0", From 7311ce3c007b9257b6c4d70448dfedc4d83db11b Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Mon, 11 Nov 2024 17:46:19 +0000 Subject: [PATCH 0920/1397] Update CHANGELOG.md Co-authored-by: Ben Kochie Signed-off-by: Bartlomiej Plotka --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c634c1de5..a86b997cfc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,8 +4,8 @@ ## 3.0.0-rc.1 / 2024-11-11 -* [CHANGE] Set the `GOMAXPROCS` variable automatically to match the Linux CPU quota. Use `-no-auto-gomaxprocs` to disable it. The `auto-gomaxprocs` feature flag was removed. #15376 -* [CHANGE] Set the `GOMEMLIMIT` variable automatically to match the Linux container memory limit. Use `-no-auto-gomemlimit` to disable it. The `auto-gomemlimit` feature flag was removed. #15373 +* [CHANGE] Set the `GOMAXPROCS` variable automatically to match the Linux CPU quota. Use `--no-auto-gomaxprocs` to disable it. The `auto-gomaxprocs` feature flag was removed. #15376 +* [CHANGE] Set the `GOMEMLIMIT` variable automatically to match the Linux container memory limit. Use `--no-auto-gomemlimit` to disable it. The `auto-gomemlimit` feature flag was removed. #15373 * [BUGFIX] Scraping: Don't log errors on empty scrapes. #15357 * [BUGFIX] UI: fix selector / series formatting for empty metric names. #15341 From 22eec4a390b5fc8466ae5045cbf46f6ba29201c7 Mon Sep 17 00:00:00 2001 From: bwplotka Date: Mon, 11 Nov 2024 22:34:49 +0000 Subject: [PATCH 0921/1397] Updated CHANGELOG.md Signed-off-by: bwplotka --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a86b997cfc..b9766854cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ * [CHANGE] Set the `GOMAXPROCS` variable automatically to match the Linux CPU quota. Use `--no-auto-gomaxprocs` to disable it. The `auto-gomaxprocs` feature flag was removed. #15376 * [CHANGE] Set the `GOMEMLIMIT` variable automatically to match the Linux container memory limit. Use `--no-auto-gomemlimit` to disable it. The `auto-gomemlimit` feature flag was removed. #15373 +* [FEATURE] OTLP receiver: Ability to skip UTF-8 normalization using `otlp.translation_strategy = NoUTF8EscapingWithSuffixes` configuration option. #15384 * [BUGFIX] Scraping: Don't log errors on empty scrapes. #15357 * [BUGFIX] UI: fix selector / series formatting for empty metric names. #15341 From ebc45750efe25ac37fc0faf64c7f08653c634eb4 Mon Sep 17 00:00:00 2001 From: Julien Date: Tue, 12 Nov 2024 10:34:58 +0100 Subject: [PATCH 0922/1397] Fix release date Closes #15385 Signed-off-by: Julien --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 19af3f460f..2c4db0eb86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ## unreleased -## 2.55.1 / 2024-01-04 +## 2.55.1 / 2024-11-04 * [BUGFIX] `round()` function did not remove `__name__` label. #15250 From b93aec077cf16d2877cb2bd34b17ac11fb608c2d Mon Sep 17 00:00:00 2001 From: Julien Date: Mon, 30 Sep 2024 12:55:11 +0200 Subject: [PATCH 0923/1397] Fix auto reload when a config file with a syntax error is reverted When we had a syntax error but restored the old file, we did not re-trigger the config reload, so the config reload metric was showing that config reload was unsucessful. I made magic to handle logs in cmd/prometheus. For now it is a separate file so we can backport this easily. I will generalize the helper in another PR. Signed-off-by: Julien --- cmd/prometheus/main.go | 11 +- cmd/prometheus/reload_test.go | 229 ++++++++++++++++++++++++++++++++++ 2 files changed, 234 insertions(+), 6 deletions(-) create mode 100644 cmd/prometheus/reload_test.go diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 3d999b1d0a..bcfbe24a6a 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -1152,9 +1152,8 @@ func main() { if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { logger.Error("Error reloading config", "err", err) } else if cfg.enableAutoReload { - if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { - checksum = currentChecksum - } else { + checksum, err = config.GenerateChecksum(cfg.configFile) + if err != nil { logger.Error("Failed to generate checksum during configuration reload", "err", err) } } @@ -1165,9 +1164,8 @@ func main() { } else { rc <- nil if cfg.enableAutoReload { - if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { - checksum = currentChecksum - } else { + checksum, err = config.GenerateChecksum(cfg.configFile) + if err != nil { logger.Error("Failed to generate checksum during configuration reload", "err", err) } } @@ -1178,6 +1176,7 @@ func main() { } currentChecksum, err := config.GenerateChecksum(cfg.configFile) if err != nil { + checksum = currentChecksum logger.Error("Failed to generate checksum during configuration reload", "err", err) } else if currentChecksum == checksum { continue diff --git a/cmd/prometheus/reload_test.go b/cmd/prometheus/reload_test.go new file mode 100644 index 0000000000..18a7ff2ad1 --- /dev/null +++ b/cmd/prometheus/reload_test.go @@ -0,0 +1,229 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/util/testutil" +) + +const configReloadMetric = "prometheus_config_last_reload_successful" + +func TestAutoReloadConfig_ValidToValid(t *testing.T) { + steps := []struct { + configText string + expectedInterval string + expectedMetric float64 + }{ + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + { + configText: ` +global: + scrape_interval: 15s +`, + expectedInterval: "15s", + expectedMetric: 1, + }, + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + } + + runTestSteps(t, steps) +} + +func TestAutoReloadConfig_ValidToInvalidToValid(t *testing.T) { + steps := []struct { + configText string + expectedInterval string + expectedMetric float64 + }{ + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + { + configText: ` +global: + scrape_interval: 15s +invalid_syntax +`, + expectedInterval: "30s", + expectedMetric: 0, + }, + { + configText: ` +global: + scrape_interval: 30s +`, + expectedInterval: "30s", + expectedMetric: 1, + }, + } + + runTestSteps(t, steps) +} + +func runTestSteps(t *testing.T, steps []struct { + configText string + expectedInterval string + expectedMetric float64 +}, +) { + configDir := t.TempDir() + configFilePath := filepath.Join(configDir, "prometheus.yml") + + t.Logf("Config file path: %s", configFilePath) + + require.NoError(t, os.WriteFile(configFilePath, []byte(steps[0].configText), 0o644), "Failed to write initial config file") + + port := testutil.RandomUnprivilegedPort(t) + runPrometheusWithLogging(t, configFilePath, port) + + baseURL := "http://localhost:" + strconv.Itoa(port) + require.Eventually(t, func() bool { + resp, err := http.Get(baseURL + "/-/ready") + if err != nil { + return false + } + defer resp.Body.Close() + return resp.StatusCode == http.StatusOK + }, 5*time.Second, 100*time.Millisecond, "Prometheus didn't become ready in time") + + for i, step := range steps { + t.Logf("Step %d", i) + require.NoError(t, os.WriteFile(configFilePath, []byte(step.configText), 0o644), "Failed to write config file for step") + + require.Eventually(t, func() bool { + return verifyScrapeInterval(t, baseURL, step.expectedInterval) && + verifyConfigReloadMetric(t, baseURL, step.expectedMetric) + }, 10*time.Second, 500*time.Millisecond, "Prometheus config reload didn't happen in time") + } +} + +func verifyScrapeInterval(t *testing.T, baseURL, expectedInterval string) bool { + resp, err := http.Get(baseURL + "/api/v1/status/config") + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + config := struct { + Data struct { + YAML string `json:"yaml"` + } `json:"data"` + }{} + + require.NoError(t, json.Unmarshal(body, &config)) + return strings.Contains(config.Data.YAML, "scrape_interval: "+expectedInterval) +} + +func verifyConfigReloadMetric(t *testing.T, baseURL string, expectedValue float64) bool { + resp, err := http.Get(baseURL + "/metrics") + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + lines := string(body) + var actualValue float64 + found := false + + for _, line := range strings.Split(lines, "\n") { + if strings.HasPrefix(line, configReloadMetric) { + parts := strings.Fields(line) + if len(parts) >= 2 { + actualValue, err = strconv.ParseFloat(parts[1], 64) + require.NoError(t, err) + found = true + break + } + } + } + + return found && actualValue == expectedValue +} + +func captureLogsToTLog(t *testing.T, r io.Reader) { + scanner := bufio.NewScanner(r) + for scanner.Scan() { + t.Log(scanner.Text()) + } + if err := scanner.Err(); err != nil { + t.Logf("Error reading logs: %v", err) + } +} + +func runPrometheusWithLogging(t *testing.T, configFilePath string, port int) { + stdoutPipe, stdoutWriter := io.Pipe() + stderrPipe, stderrWriter := io.Pipe() + + var wg sync.WaitGroup + wg.Add(2) + + prom := exec.Command(promPath, "-test.main", "--enable-feature=auto-reload-config", "--config.file="+configFilePath, "--config.auto-reload-interval=1s", "--web.listen-address=0.0.0.0:"+strconv.Itoa(port)) + prom.Stdout = stdoutWriter + prom.Stderr = stderrWriter + + go func() { + defer wg.Done() + captureLogsToTLog(t, stdoutPipe) + }() + go func() { + defer wg.Done() + captureLogsToTLog(t, stderrPipe) + }() + + t.Cleanup(func() { + prom.Process.Kill() + prom.Wait() + stdoutWriter.Close() + stderrWriter.Close() + wg.Wait() + }) + + require.NoError(t, prom.Start()) +} From 869addfec82d5503b4e3f2728c0b16a0926ce883 Mon Sep 17 00:00:00 2001 From: Ben Kochie Date: Tue, 12 Nov 2024 13:27:01 +0100 Subject: [PATCH 0924/1397] Enable auto-gomaxprocs by default (#15378) Enable the `auto-gomaxprocs` feature flag by default. * Add command line flag `--no-auto-gomaxprocs` to disable. Signed-off-by: SuperQ --- cmd/prometheus/main.go | 9 ++++----- docs/command-line/prometheus.md | 1 + docs/feature_flags.md | 6 ------ 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 9049d4d291..dbb513bbd0 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -195,6 +195,7 @@ type flagConfig struct { enableAutoReload bool autoReloadInterval model.Duration + maxprocsEnable bool memlimitEnable bool memlimitRatio float64 @@ -202,7 +203,6 @@ type flagConfig struct { // These options are extracted from featureList // for ease of use. enablePerStepStats bool - enableAutoGOMAXPROCS bool enableConcurrentRuleEval bool prometheusURL string @@ -234,9 +234,6 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error { case "promql-per-step-stats": c.enablePerStepStats = true logger.Info("Experimental per-step statistics reporting") - case "auto-gomaxprocs": - c.enableAutoGOMAXPROCS = true - logger.Info("Automatically set GOMAXPROCS to match Linux container CPU quota") case "auto-reload-config": c.enableAutoReload = true if s := time.Duration(c.autoReloadInterval).Seconds(); s > 0 && s < 1 { @@ -329,6 +326,8 @@ func main() { a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated."). Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses) + a.Flag("auto-gomaxprocs", "Automatically set GOMAXPROCS to match Linux container CPU quota"). + Default("true").BoolVar(&cfg.maxprocsEnable) a.Flag("auto-gomemlimit", "Automatically set GOMEMLIMIT to match Linux container or system memory limit"). Default("true").BoolVar(&cfg.memlimitEnable) a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory"). @@ -756,7 +755,7 @@ func main() { ruleManager *rules.Manager ) - if cfg.enableAutoGOMAXPROCS { + if cfg.maxprocsEnable { l := func(format string, a ...interface{}) { logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs") } diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 9f5400c414..dd207dc382 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -17,6 +17,7 @@ The Prometheus monitoring server | --config.file | Prometheus configuration file path. | `prometheus.yml` | | --config.auto-reload-interval | Specifies the interval for checking and automatically reloading the Prometheus configuration file upon detecting changes. | `30s` | | --web.listen-address ... | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` | +| --auto-gomaxprocs | Automatically set GOMAXPROCS to match Linux container CPU quota | `true` | | --auto-gomemlimit | Automatically set GOMEMLIMIT to match Linux container or system memory limit | `true` | | --auto-gomemlimit.ratio | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` | | --web.config.file | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 0541961f26..8c0e319f9c 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -47,12 +47,6 @@ statistics. Currently this is limited to totalQueryableSamples. When disabled in either the engine or the query, per-step statistics are not computed at all. -## Auto GOMAXPROCS - -`--enable-feature=auto-gomaxprocs` - -When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota. - ## Native Histograms `--enable-feature=native-histograms` From 628bbd2682802d6774f7a20d8569c7790dac2e0b Mon Sep 17 00:00:00 2001 From: Ben Kochie Date: Tue, 12 Nov 2024 13:27:39 +0100 Subject: [PATCH 0925/1397] Update linting (#15369) * Ignore all node_modules dirs. * Update Makefile.common golangci-lint to match CI pipeline. Signed-off-by: SuperQ --- .yamllint | 2 +- Makefile.common | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.yamllint b/.yamllint index 1859cb624b..8d09c375fd 100644 --- a/.yamllint +++ b/.yamllint @@ -1,7 +1,7 @@ --- extends: default ignore: | - ui/react-app/node_modules + **/node_modules rules: braces: diff --git a/Makefile.common b/Makefile.common index cbb5d86382..09e5bff858 100644 --- a/Makefile.common +++ b/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.60.2 +GOLANGCI_LINT_VERSION ?= v1.61.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) From 003a2270e9308728e163760ef149e09aa37129e4 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 6 Nov 2024 15:51:39 +0000 Subject: [PATCH 0926/1397] [BUGFIX] TSDB: Fix race on stale values in headAppender (#15322) * [BUGFIX] TSDB: Fix race on stale values in headAppender Signed-off-by: Bryan Boreham * Simplify Signed-off-by: Bryan Boreham --------- Signed-off-by: Bryan Boreham (cherry picked from commit f42b37ff2fe56a92cfa1de64f814b5b5c9528e7d) --- tsdb/head_append.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 603b96cfcc..ea2a163f26 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -356,21 +356,21 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 } } + s.Lock() if value.IsStaleNaN(v) { - // This is not thread safe as we should be holding the lock for "s". // TODO(krajorama): reorganize Commit() to handle samples in append order // not floats first and then histograms. Then we could do this conversion // in commit. This code should move into Commit(). switch { case s.lastHistogramValue != nil: + s.Unlock() return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil) case s.lastFloatHistogramValue != nil: + s.Unlock() return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v}) } } - s.Lock() - defer s.Unlock() // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // to skip that sample from the WAL and write only in the WBL. @@ -1517,7 +1517,7 @@ type chunkOpts struct { // append adds the sample (t, v) to the series. The caller also has to provide // the appendID for isolation. (The appendID can be zero, which results in no // isolation for this append.) -// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. +// Series lock must be held when calling. func (s *memSeries) append(t int64, v float64, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) { c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, o) if !sampleInOrder { From c599d37668112c3ae4f0ffd6539b3d243642019d Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Tue, 12 Nov 2024 14:14:06 +0000 Subject: [PATCH 0927/1397] Always return unknown hint for first sample in non-gauge histogram chunk (#15343) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Always return unknown hint for first sample in non-gauge histogram chunk --------- Signed-off-by: Fiona Liao Co-authored-by: György Krajcsovits --- tsdb/chunkenc/float_histogram_test.go | 43 ++- tsdb/chunkenc/histogram_meta.go | 22 +- tsdb/chunkenc/histogram_test.go | 43 ++- tsdb/db_test.go | 453 ++++++++++++++++++++++---- tsdb/head_test.go | 4 +- tsdb/ooo_head_test.go | 26 +- tsdb/testutil.go | 6 +- 7 files changed, 490 insertions(+), 107 deletions(-) diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go index 6092c0f636..1416b8d085 100644 --- a/tsdb/chunkenc/float_histogram_test.go +++ b/tsdb/chunkenc/float_histogram_test.go @@ -31,22 +31,27 @@ func TestFirstFloatHistogramExplicitCounterReset(t *testing.T) { tests := map[string]struct { hint histogram.CounterResetHint expHeader CounterResetHeader + expHint histogram.CounterResetHint }{ "CounterReset": { hint: histogram.CounterReset, expHeader: CounterReset, + expHint: histogram.UnknownCounterReset, }, "NotCounterReset": { hint: histogram.NotCounterReset, expHeader: UnknownCounterReset, + expHint: histogram.UnknownCounterReset, }, "UnknownCounterReset": { hint: histogram.UnknownCounterReset, expHeader: UnknownCounterReset, + expHint: histogram.UnknownCounterReset, }, "Gauge": { hint: histogram.GaugeType, expHeader: GaugeType, + expHint: histogram.GaugeType, }, } for name, test := range tests { @@ -63,6 +68,7 @@ func TestFirstFloatHistogramExplicitCounterReset(t *testing.T) { require.False(t, recoded) require.Equal(t, app, newApp) require.Equal(t, test.expHeader, chk.GetCounterResetHeader()) + assertFirstFloatHistogramSampleHint(t, chk, test.expHint) }) } } @@ -338,7 +344,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) - assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset, histogram.UnknownCounterReset) } { // Zero threshold change. @@ -348,7 +354,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) - assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset, histogram.UnknownCounterReset) } { // New histogram that has more buckets. @@ -397,7 +403,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) - assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset) } { // New histogram that has buckets missing but the buckets missing were empty. @@ -480,7 +486,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) - assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset) } { // New histogram that has a counter reset while new buckets were added. @@ -503,7 +509,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) - assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset) } { @@ -532,7 +538,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) - assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset) } { // New histogram that has an explicit counter reset. @@ -540,7 +546,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2 := h1.Copy() h2.CounterResetHint = histogram.CounterReset - assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset) } { // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk. @@ -557,6 +563,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { require.Equal(t, app, newApp) assertSampleCount(t, nextChunk, 1, ValFloatHistogram) require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader()) + assertFirstFloatHistogramSampleHint(t, nextChunk, histogram.UnknownCounterReset) } { // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk. @@ -574,6 +581,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { require.Equal(t, app, newApp) assertSampleCount(t, nextChunk, 1, ValFloatHistogram) require.Equal(t, CounterReset, nextChunk.GetCounterResetHeader()) + assertFirstFloatHistogramSampleHint(t, nextChunk, histogram.UnknownCounterReset) } { // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk. @@ -600,6 +608,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { require.Equal(t, app, newApp) assertSampleCount(t, nextChunk, 1, ValFloatHistogram) require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader()) + assertFirstFloatHistogramSampleHint(t, nextChunk, histogram.UnknownCounterReset) } { @@ -664,7 +673,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) - assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset) } { // Custom buckets, change only in custom bounds. @@ -674,7 +683,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) - assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset) } { // Custom buckets, with more buckets. @@ -705,7 +714,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { } } -func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) { +func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader, expectHint histogram.CounterResetHint) { oldChunkBytes := oldChunk.Bytes() newChunk, recoded, newAppender, err := hApp.AppendFloatHistogram(nil, ts, h, false) require.Equal(t, oldChunkBytes, oldChunk.Bytes()) // Sanity check that previous chunk is untouched. @@ -717,6 +726,7 @@ func assertNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *Fl require.NotNil(t, newAppender) require.NotEqual(t, hApp, newAppender) assertSampleCount(t, newChunk, 1, ValFloatHistogram) + assertFirstFloatHistogramSampleHint(t, newChunk, expectHint) } func assertNoNewFloatHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *FloatHistogramAppender, ts int64, h *histogram.FloatHistogram, expectHeader CounterResetHeader) { @@ -1023,7 +1033,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { _, _, _, _, _, _, ok := hApp.appendableGauge(h2) require.False(t, ok) - assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType, histogram.GaugeType) } { // Zero threshold change. @@ -1033,7 +1043,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { _, _, _, _, _, _, ok := hApp.appendableGauge(h2) require.False(t, ok) - assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType, histogram.GaugeType) } { // New histogram that has more buckets. @@ -1208,7 +1218,7 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { _, _, _, _, _, _, ok := hApp.appendableGauge(h2) require.False(t, ok) - assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType, histogram.GaugeType) } { // Custom buckets, with more buckets. @@ -1357,3 +1367,10 @@ func TestFloatHistogramUniqueSpansAfterNext(t *testing.T) { require.NotSame(t, &rh1.PositiveSpans[0], &rh2.PositiveSpans[0], "PositiveSpans should be unique between histograms") require.NotSame(t, &rh1.NegativeSpans[0], &rh2.NegativeSpans[0], "NegativeSpans should be unique between histograms") } + +func assertFirstFloatHistogramSampleHint(t *testing.T, chunk Chunk, expected histogram.CounterResetHint) { + it := chunk.Iterator(nil) + require.Equal(t, ValFloatHistogram, it.Next()) + _, v := it.AtFloatHistogram(nil) + require.Equal(t, expected, v.CounterResetHint) +} diff --git a/tsdb/chunkenc/histogram_meta.go b/tsdb/chunkenc/histogram_meta.go index 8d614b817f..7bb31acf00 100644 --- a/tsdb/chunkenc/histogram_meta.go +++ b/tsdb/chunkenc/histogram_meta.go @@ -558,26 +558,16 @@ func counterResetHint(crh CounterResetHeader, numRead uint16) histogram.CounterR // In a counter histogram chunk, there will not be any counter // resets after the first histogram. return histogram.NotCounterReset - case crh == CounterReset: - // If the chunk was started because of a counter reset, we can - // safely return that hint. This histogram always has to be - // treated as a counter reset. - return histogram.CounterReset default: // Sadly, we have to return "unknown" as the hint for all other - // cases, even if we know that the chunk was started without a + // cases, even if we know that the chunk was started with or without a // counter reset. But we cannot be sure that the previous chunk - // still exists in the TSDB, so we conservatively return - // "unknown". On the bright side, this case should be relatively - // rare. + // still exists in the TSDB, or if the previous chunk was added later + // by out of order or backfill, so we conservatively return "unknown". // - // TODO(beorn7): Nevertheless, if the current chunk is in the - // middle of a block (not the first chunk in the block for this - // series), it's probably safe to assume that the previous chunk - // will exist in the TSDB for as long as the current chunk - // exist, and we could safely return - // "histogram.NotCounterReset". This needs some more work and - // might not be worth the effort and/or risk. To be vetted... + // TODO: If we can detect whether the previous and current chunk are + // actually consecutive then we could trust its hint: + // https://github.com/prometheus/prometheus/issues/15346. return histogram.UnknownCounterReset } } diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index 29b77b1583..9ce2bf4e60 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -32,22 +32,27 @@ func TestFirstHistogramExplicitCounterReset(t *testing.T) { tests := map[string]struct { hint histogram.CounterResetHint expHeader CounterResetHeader + expHint histogram.CounterResetHint }{ "CounterReset": { hint: histogram.CounterReset, expHeader: CounterReset, + expHint: histogram.UnknownCounterReset, }, "NotCounterReset": { hint: histogram.NotCounterReset, expHeader: UnknownCounterReset, + expHint: histogram.UnknownCounterReset, }, "UnknownCounterReset": { hint: histogram.UnknownCounterReset, expHeader: UnknownCounterReset, + expHint: histogram.UnknownCounterReset, }, "Gauge": { hint: histogram.GaugeType, expHeader: GaugeType, + expHint: histogram.GaugeType, }, } for name, test := range tests { @@ -64,6 +69,7 @@ func TestFirstHistogramExplicitCounterReset(t *testing.T) { require.False(t, recoded) require.Equal(t, app, newApp) require.Equal(t, test.expHeader, chk.GetCounterResetHeader()) + assertFirstIntHistogramSampleHint(t, chk, test.expHint) }) } } @@ -352,7 +358,7 @@ func TestHistogramChunkAppendable(t *testing.T) { _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) - assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset, histogram.UnknownCounterReset) } { // Zero threshold change. @@ -362,7 +368,7 @@ func TestHistogramChunkAppendable(t *testing.T) { _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) - assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset, histogram.UnknownCounterReset) } { // New histogram that has more buckets. @@ -413,7 +419,7 @@ func TestHistogramChunkAppendable(t *testing.T) { require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) - assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset) } { // New histogram that has buckets missing but the buckets missing were empty. @@ -498,7 +504,7 @@ func TestHistogramChunkAppendable(t *testing.T) { require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) - assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset) } { // New histogram that has a counter reset while new buckets were added. @@ -524,7 +530,7 @@ func TestHistogramChunkAppendable(t *testing.T) { require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) - assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset) } { @@ -556,7 +562,7 @@ func TestHistogramChunkAppendable(t *testing.T) { require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) - assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset) } { // New histogram that has an explicit counter reset. @@ -564,7 +570,7 @@ func TestHistogramChunkAppendable(t *testing.T) { h2 := h1.Copy() h2.CounterResetHint = histogram.CounterReset - assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset) } { // Start new chunk explicitly, and append a new histogram that is considered appendable to the previous chunk. @@ -581,6 +587,7 @@ func TestHistogramChunkAppendable(t *testing.T) { require.Equal(t, app, newApp) assertSampleCount(t, nextChunk, 1, ValHistogram) require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader()) + assertFirstIntHistogramSampleHint(t, nextChunk, histogram.UnknownCounterReset) } { // Start new chunk explicitly, and append a new histogram that is not considered appendable to the previous chunk. @@ -598,6 +605,7 @@ func TestHistogramChunkAppendable(t *testing.T) { require.Equal(t, app, newApp) assertSampleCount(t, nextChunk, 1, ValHistogram) require.Equal(t, CounterReset, nextChunk.GetCounterResetHeader()) + assertFirstIntHistogramSampleHint(t, nextChunk, histogram.UnknownCounterReset) } { // Start new chunk explicitly, and append a new histogram that would need recoding if we added it to the chunk. @@ -627,6 +635,7 @@ func TestHistogramChunkAppendable(t *testing.T) { require.Equal(t, app, newApp) assertSampleCount(t, nextChunk, 1, ValHistogram) require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader()) + assertFirstIntHistogramSampleHint(t, nextChunk, histogram.UnknownCounterReset) } { @@ -691,7 +700,7 @@ func TestHistogramChunkAppendable(t *testing.T) { _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) - assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset) } { // Custom buckets, change only in custom bounds. @@ -701,7 +710,7 @@ func TestHistogramChunkAppendable(t *testing.T) { _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) - assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset, histogram.UnknownCounterReset) } { // Custom buckets, with more buckets. @@ -732,7 +741,7 @@ func TestHistogramChunkAppendable(t *testing.T) { } } -func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) { +func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader, expectHint histogram.CounterResetHint) { oldChunkBytes := oldChunk.Bytes() newChunk, recoded, newAppender, err := hApp.AppendHistogram(nil, ts, h, false) require.Equal(t, oldChunkBytes, oldChunk.Bytes()) // Sanity check that previous chunk is untouched. @@ -744,6 +753,7 @@ func assertNewHistogramChunkOnAppend(t *testing.T, oldChunk Chunk, hApp *Histogr require.NotNil(t, newAppender) require.NotEqual(t, hApp, newAppender) assertSampleCount(t, newChunk, 1, ValHistogram) + assertFirstIntHistogramSampleHint(t, newChunk, expectHint) } func assertNoNewHistogramChunkOnAppend(t *testing.T, currChunk Chunk, hApp *HistogramAppender, ts int64, h *histogram.Histogram, expectHeader CounterResetHeader) { @@ -1203,7 +1213,7 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { _, _, _, _, _, _, ok := hApp.appendableGauge(h2) require.False(t, ok) - assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType, histogram.GaugeType) } { // Zero threshold change. @@ -1213,7 +1223,7 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { _, _, _, _, _, _, ok := hApp.appendableGauge(h2) require.False(t, ok) - assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType, histogram.GaugeType) } { // New histogram that has more buckets. @@ -1388,7 +1398,7 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { _, _, _, _, _, _, ok := hApp.appendableGauge(h2) require.False(t, ok) - assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) + assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType, histogram.GaugeType) } { // Custom buckets, with more buckets. @@ -1635,3 +1645,10 @@ func BenchmarkAppendable(b *testing.B) { b.Fail() } } + +func assertFirstIntHistogramSampleHint(t *testing.T, chunk Chunk, expected histogram.CounterResetHint) { + it := chunk.Iterator(nil) + require.Equal(t, ValHistogram, it.Next()) + _, v := it.AtHistogram(nil) + require.Equal(t, expected, v.CounterResetHint) +} diff --git a/tsdb/db_test.go b/tsdb/db_test.go index f851f2b911..306dc4579e 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -6121,7 +6121,7 @@ func testOOONativeHistogramsWithCounterResets(t *testing.T, scenario sampleTypeS shouldReset: func(v int64) bool { return v == 45 }, - expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.CounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset}, + expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset}, }, }, }, @@ -6241,6 +6241,242 @@ func testOOONativeHistogramsWithCounterResets(t *testing.T, scenario sampleTypeS } } +func TestOOOInterleavedImplicitCounterResets(t *testing.T) { + for name, scenario := range sampleTypeScenarios { + t.Run(name, func(t *testing.T) { + testOOOInterleavedImplicitCounterResets(t, name, scenario) + }) + } +} + +func testOOOInterleavedImplicitCounterResets(t *testing.T, name string, scenario sampleTypeScenario) { + var appendFunc func(app storage.Appender, ts, v int64) error + + if scenario.sampleType != sampleMetricTypeHistogram { + return + } + + switch name { + case intHistogram: + appendFunc = func(app storage.Appender, ts, v int64) error { + h := &histogram.Histogram{ + Count: uint64(v), + Sum: float64(v), + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{v}, + } + _, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) + return err + } + case floatHistogram: + appendFunc = func(app storage.Appender, ts, v int64) error { + fh := &histogram.FloatHistogram{ + Count: float64(v), + Sum: float64(v), + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []float64{float64(v)}, + } + _, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh) + return err + } + case gaugeIntHistogram, gaugeFloatHistogram: + return + } + + // Not a sample, we're encoding an integer counter that we convert to a + // histogram with a single bucket. + type tsValue struct { + ts int64 + v int64 + } + + type expectedTsValue struct { + ts int64 + v int64 + hint histogram.CounterResetHint + } + + type expectedChunk struct { + hint histogram.CounterResetHint + size int + } + + cases := map[string]struct { + samples []tsValue + oooCap int64 + // The expected samples with counter reset. + expectedSamples []expectedTsValue + // The expected counter reset hint for each chunk. + expectedChunks []expectedChunk + }{ + "counter reset in-order cleared by in-memory OOO chunk": { + samples: []tsValue{ + {1, 40}, // New in In-order. I1. + {4, 30}, // In-order counter reset. I2. + {2, 40}, // New in OOO. O1. + {3, 10}, // OOO counter reset. O2. + }, + oooCap: 30, + // Expect all to be set to UnknownCounterReset because we switch between + // in-order and out-of-order samples. + expectedSamples: []expectedTsValue{ + {1, 40, histogram.UnknownCounterReset}, // I1. + {2, 40, histogram.UnknownCounterReset}, // O1. + {3, 10, histogram.UnknownCounterReset}, // O2. + {4, 30, histogram.UnknownCounterReset}, // I2. Counter reset cleared by iterator change. + }, + expectedChunks: []expectedChunk{ + {histogram.UnknownCounterReset, 1}, // I1. + {histogram.UnknownCounterReset, 1}, // O1. + {histogram.UnknownCounterReset, 1}, // O2. + {histogram.UnknownCounterReset, 1}, // I2. + }, + }, + "counter reset in OOO mmapped chunk cleared by in-memory ooo chunk": { + samples: []tsValue{ + {8, 30}, // In-order, new chunk. I1. + {1, 10}, // OOO, new chunk (will be mmapped). MO1. + {2, 20}, // OOO, no reset (will be mmapped). MO1. + {3, 30}, // OOO, no reset (will be mmapped). MO1. + {5, 20}, // OOO, reset (will be mmapped). MO2. + {6, 10}, // OOO, reset (will be mmapped). MO3. + {7, 20}, // OOO, no reset (will be mmapped). MO3. + {4, 10}, // OOO, inserted into memory, triggers mmap. O1. + }, + oooCap: 6, + expectedSamples: []expectedTsValue{ + {1, 10, histogram.UnknownCounterReset}, // MO1. + {2, 20, histogram.NotCounterReset}, // MO1. + {3, 30, histogram.NotCounterReset}, // MO1. + {4, 10, histogram.UnknownCounterReset}, // O1. Counter reset cleared by iterator change. + {5, 20, histogram.UnknownCounterReset}, // MO2. + {6, 10, histogram.UnknownCounterReset}, // MO3. + {7, 20, histogram.NotCounterReset}, // MO3. + {8, 30, histogram.UnknownCounterReset}, // I1. + }, + expectedChunks: []expectedChunk{ + {histogram.UnknownCounterReset, 3}, // MO1. + {histogram.UnknownCounterReset, 1}, // O1. + {histogram.UnknownCounterReset, 1}, // MO2. + {histogram.UnknownCounterReset, 2}, // MO3. + {histogram.UnknownCounterReset, 1}, // I1. + }, + }, + "counter reset in OOO mmapped chunk cleared by another OOO mmapped chunk": { + samples: []tsValue{ + {8, 100}, // In-order, new chunk. I1. + {1, 50}, // OOO, new chunk (will be mmapped). MO1. + {5, 40}, // OOO, reset (will be mmapped). MO2. + {6, 50}, // OOO, no reset (will be mmapped). MO2. + {2, 10}, // OOO, new chunk no reset (will be mmapped). MO3. + {3, 20}, // OOO, no reset (will be mmapped). MO3. + {4, 30}, // OOO, no reset (will be mmapped). MO3. + {7, 60}, // OOO, no reset in memory. O1. + }, + oooCap: 3, + expectedSamples: []expectedTsValue{ + {1, 50, histogram.UnknownCounterReset}, // MO1. + {2, 10, histogram.UnknownCounterReset}, // MO3. + {3, 20, histogram.NotCounterReset}, // MO3. + {4, 30, histogram.NotCounterReset}, // MO3. + {5, 40, histogram.UnknownCounterReset}, // MO2. + {6, 50, histogram.NotCounterReset}, // MO2. + {7, 60, histogram.UnknownCounterReset}, // O1. + {8, 100, histogram.UnknownCounterReset}, // I1. + }, + expectedChunks: []expectedChunk{ + {histogram.UnknownCounterReset, 1}, // MO1. + {histogram.UnknownCounterReset, 3}, // MO3. + {histogram.UnknownCounterReset, 2}, // MO2. + {histogram.UnknownCounterReset, 1}, // O1. + {histogram.UnknownCounterReset, 1}, // I1. + }, + }, + } + + for tcName, tc := range cases { + t.Run(tcName, func(t *testing.T) { + opts := DefaultOptions() + opts.OutOfOrderCapMax = tc.oooCap + opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds() + + db := openTestDB(t, opts, nil) + db.DisableCompactions() + db.EnableOOONativeHistograms() + defer func() { + require.NoError(t, db.Close()) + }() + + app := db.Appender(context.Background()) + for _, s := range tc.samples { + require.NoError(t, appendFunc(app, s.ts, s.v)) + } + require.NoError(t, app.Commit()) + + t.Run("querier", func(t *testing.T) { + querier, err := db.Querier(0, 10) + require.NoError(t, err) + defer querier.Close() + + seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")) + require.Len(t, seriesSet, 1) + samples, ok := seriesSet["{foo=\"bar1\"}"] + require.True(t, ok) + require.Len(t, samples, len(tc.samples)) + require.Len(t, samples, len(tc.expectedSamples)) + + // We expect all unknown counter resets because we clear the counter reset + // hint when we switch between in-order and out-of-order samples. + for i, s := range samples { + switch name { + case intHistogram: + require.Equal(t, tc.expectedSamples[i].hint, s.H().CounterResetHint, "sample %d", i) + require.Equal(t, tc.expectedSamples[i].v, int64(s.H().Count), "sample %d", i) + case floatHistogram: + require.Equal(t, tc.expectedSamples[i].hint, s.FH().CounterResetHint, "sample %d", i) + require.Equal(t, tc.expectedSamples[i].v, int64(s.FH().Count), "sample %d", i) + default: + t.Fatalf("unexpected sample type %s", name) + } + } + }) + + t.Run("chunk-querier", func(t *testing.T) { + querier, err := db.ChunkQuerier(0, 10) + require.NoError(t, err) + defer querier.Close() + + chunkSet := queryAndExpandChunks(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")) + require.Len(t, chunkSet, 1) + chunks, ok := chunkSet["{foo=\"bar1\"}"] + require.True(t, ok) + require.Len(t, chunks, len(tc.expectedChunks)) + idx := 0 + for i, samples := range chunks { + require.Len(t, samples, tc.expectedChunks[i].size) + for j, s := range samples { + expectHint := tc.expectedChunks[i].hint + if j > 0 { + expectHint = histogram.NotCounterReset + } + switch name { + case intHistogram: + require.Equal(t, expectHint, s.H().CounterResetHint, "sample %d", idx) + require.Equal(t, tc.expectedSamples[idx].v, int64(s.H().Count), "sample %d", idx) + case floatHistogram: + require.Equal(t, expectHint, s.FH().CounterResetHint, "sample %d", idx) + require.Equal(t, tc.expectedSamples[idx].v, int64(s.FH().Count), "sample %d", idx) + default: + t.Fatalf("unexpected sample type %s", name) + } + idx++ + } + } + }) + }) + } +} + func TestOOOAppendAndQuery(t *testing.T) { for name, scenario := range sampleTypeScenarios { t.Run(name, func(t *testing.T) { @@ -6746,7 +6982,7 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) { s = addSample(int64(i), series1, 100000+i, histogram.UnknownCounterReset) // The samples with timestamp less than 410 overlap with the samples from chunk 2, so before compaction, // they're all UnknownCounterReset. Samples greater than or equal to 410 don't overlap with other chunks - // so they're always detected as NotCounterReset pre and post compaction/ + // so they're always detected as NotCounterReset pre and post compaction. if i >= 410 { s = copyWithCounterReset(s, histogram.NotCounterReset) } @@ -6772,8 +7008,10 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) { s = addSample(165, series1, 100000, histogram.UnknownCounterReset) // Before compaction, sample has an UnknownCounterReset header due to the chainSampleIterator. series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s) - // After compaction, the sample's counter reset is properly detected. - series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, copyWithCounterReset(s, histogram.CounterReset)) + // After compaction, the sample's counter reset is still UnknownCounterReset as we cannot trust CounterReset + // headers in chunks at the moment, so when reading the first sample in a chunk, its hint is set to + // UnknownCounterReset. + series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s) // Add 23 more samples to complete a chunk. for i := 175; i < 405; i += 10 { @@ -6810,7 +7048,6 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) { } // Counter reset. s = addSample(int64(490), series1, 100000, histogram.UnknownCounterReset) - s = copyWithCounterReset(s, histogram.CounterReset) series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s) series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s) // Add some more samples after the counter reset. @@ -6835,7 +7072,6 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) { } // Counter reset. s = addSample(int64(300), series2, 100000, histogram.UnknownCounterReset) - s = copyWithCounterReset(s, histogram.CounterReset) series2ExpSamplesPreCompact = append(series2ExpSamplesPreCompact, s) series2ExpSamplesPostCompact = append(series2ExpSamplesPostCompact, s) // Add some more samples after the counter reset. @@ -7007,6 +7243,103 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) { } } +func TestInterleavedInOrderAndOOOHistogramCompactionWithCounterResets(t *testing.T) { + for _, floatHistogram := range []bool{false, true} { + dir := t.TempDir() + ctx := context.Background() + + opts := DefaultOptions() + opts.OutOfOrderCapMax = 30 + opts.OutOfOrderTimeWindow = 500 * time.Minute.Milliseconds() + + db, err := Open(dir, nil, nil, opts, nil) + require.NoError(t, err) + db.DisableCompactions() // We want to manually call it. + db.EnableNativeHistograms() + db.EnableOOONativeHistograms() + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + series1 := labels.FromStrings("foo", "bar1") + + addSample := func(ts int64, l labels.Labels, val int) sample { + app := db.Appender(context.Background()) + tsMs := ts + if floatHistogram { + h := tsdbutil.GenerateTestFloatHistogram(val) + _, err = app.AppendHistogram(0, l, tsMs, nil, h) + require.NoError(t, err) + require.NoError(t, app.Commit()) + return sample{t: tsMs, fh: h.Copy()} + } + + h := tsdbutil.GenerateTestHistogram(val) + _, err = app.AppendHistogram(0, l, tsMs, h, nil) + require.NoError(t, err) + require.NoError(t, app.Commit()) + return sample{t: tsMs, h: h.Copy()} + } + + var expSamples []chunks.Sample + + s := addSample(0, series1, 0) + expSamples = append(expSamples, s) + s = addSample(1, series1, 10) + expSamples = append(expSamples, copyWithCounterReset(s, histogram.NotCounterReset)) + s = addSample(3, series1, 3) + expSamples = append(expSamples, copyWithCounterReset(s, histogram.UnknownCounterReset)) + s = addSample(2, series1, 0) + expSamples = append(expSamples, copyWithCounterReset(s, histogram.UnknownCounterReset)) + + // Sort samples (as OOO samples not added in time-order). + sort.Slice(expSamples, func(i, j int) bool { + return expSamples[i].T() < expSamples[j].T() + }) + + verifyDBSamples := func(s1Samples []chunks.Sample) { + t.Helper() + expRes := map[string][]chunks.Sample{ + series1.String(): s1Samples, + } + + q, err := db.Querier(math.MinInt64, math.MaxInt64) + require.NoError(t, err) + actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*")) + requireEqualSeries(t, expRes, actRes, false) + } + + // Verify DB samples before compaction. + verifyDBSamples(expSamples) + + require.NoError(t, db.CompactOOOHead(ctx)) + + // Check samples after OOO compaction. + verifyDBSamples(expSamples) + + // Checking for expected data in the blocks. + // Check that blocks are created after compaction. + require.Len(t, db.Blocks(), 1) + + // Compact the in-order head and expect another block. + // Since this is a forced compaction, this block is not aligned with 2h. + err = db.CompactHead(NewRangeHead(db.head, 0, 3)) + require.NoError(t, err) + require.Len(t, db.Blocks(), 2) + + // Blocks created out of normal and OOO head now. But not merged. + verifyDBSamples(expSamples) + + // This will merge overlapping block. + require.NoError(t, db.Compact(ctx)) + + require.Len(t, db.Blocks(), 1) + + // Final state. Blocks from normal and OOO head are merged. + verifyDBSamples(expSamples) + } +} + func copyWithCounterReset(s sample, hint histogram.CounterResetHint) sample { if s.h != nil { h := s.h.Copy() @@ -8045,7 +8378,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { }) ctx := context.Background() - appendHistogram := func( + appendHistogram := func(t *testing.T, lbls labels.Labels, tsMinute int, h *histogram.Histogram, exp *[]chunks.Sample, expCRH histogram.CounterResetHint, ) { @@ -8066,7 +8399,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { require.NoError(t, err) require.NoError(t, app.Commit()) } - appendFloat := func(lbls labels.Labels, tsMinute int, val float64, exp *[]chunks.Sample) { + appendFloat := func(t *testing.T, lbls labels.Labels, tsMinute int, val float64, exp *[]chunks.Sample) { t.Helper() app := db.Appender(ctx) _, err := app.Append(0, lbls, minute(tsMinute), val) @@ -8075,7 +8408,7 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { *exp = append(*exp, sample{t: minute(tsMinute), f: val}) } - testQuery := func(name, value string, exp map[string][]chunks.Sample) { + testQuery := func(t *testing.T, name, value string, exp map[string][]chunks.Sample) { t.Helper() q, err := db.Querier(math.MinInt64, math.MaxInt64) require.NoError(t, err) @@ -8113,24 +8446,24 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { t.Run("series with only histograms", func(t *testing.T) { h := baseH.Copy() // This is shared across all sub tests. - appendHistogram(series1, 100, h, &exp1, histogram.UnknownCounterReset) - testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) + appendHistogram(t, series1, 100, h, &exp1, histogram.UnknownCounterReset) + testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) h.PositiveBuckets[0]++ h.NegativeBuckets[0] += 2 h.Count += 10 - appendHistogram(series1, 101, h, &exp1, histogram.NotCounterReset) - testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) + appendHistogram(t, series1, 101, h, &exp1, histogram.NotCounterReset) + testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) t.Run("changing schema", func(t *testing.T) { h.Schema = 2 - appendHistogram(series1, 102, h, &exp1, histogram.UnknownCounterReset) - testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) + appendHistogram(t, series1, 102, h, &exp1, histogram.UnknownCounterReset) + testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) // Schema back to old. h.Schema = 1 - appendHistogram(series1, 103, h, &exp1, histogram.UnknownCounterReset) - testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) + appendHistogram(t, series1, 103, h, &exp1, histogram.UnknownCounterReset) + testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) }) t.Run("new buckets incoming", func(t *testing.T) { @@ -8158,8 +8491,8 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { h.PositiveSpans[1].Length++ h.PositiveBuckets = append(h.PositiveBuckets, 1) h.Count += 3 - appendHistogram(series1, 104, h, &exp1, histogram.NotCounterReset) - testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) + appendHistogram(t, series1, 104, h, &exp1, histogram.NotCounterReset) + testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) // Because of the previous two histograms being on the active chunk, // and the next append is only adding a new bucket, the active chunk @@ -8196,14 +8529,14 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { h.Count += 3 // {2, 1, -1, 0, 1} -> {2, 1, 0, -1, 0, 1} h.PositiveBuckets = append(h.PositiveBuckets[:2], append([]int64{0}, h.PositiveBuckets[2:]...)...) - appendHistogram(series1, 105, h, &exp1, histogram.NotCounterReset) - testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) + appendHistogram(t, series1, 105, h, &exp1, histogram.NotCounterReset) + testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) // We add 4 more histograms to clear out the buffer and see the re-encoded histograms. - appendHistogram(series1, 106, h, &exp1, histogram.NotCounterReset) - appendHistogram(series1, 107, h, &exp1, histogram.NotCounterReset) - appendHistogram(series1, 108, h, &exp1, histogram.NotCounterReset) - appendHistogram(series1, 109, h, &exp1, histogram.NotCounterReset) + appendHistogram(t, series1, 106, h, &exp1, histogram.NotCounterReset) + appendHistogram(t, series1, 107, h, &exp1, histogram.NotCounterReset) + appendHistogram(t, series1, 108, h, &exp1, histogram.NotCounterReset) + appendHistogram(t, series1, 109, h, &exp1, histogram.NotCounterReset) // Update the expected histograms to reflect the re-encoding. if floatHistogram { @@ -8230,69 +8563,69 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { exp1[l-6] = sample{t: exp1[l-6].T(), h: h6} } - testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) + testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) }) t.Run("buckets disappearing", func(t *testing.T) { h.PositiveSpans[1].Length-- h.PositiveBuckets = h.PositiveBuckets[:len(h.PositiveBuckets)-1] h.Count -= 3 - appendHistogram(series1, 110, h, &exp1, histogram.CounterReset) - testQuery("foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) + appendHistogram(t, series1, 110, h, &exp1, histogram.UnknownCounterReset) + testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1}) }) }) t.Run("series starting with float and then getting histograms", func(t *testing.T) { - appendFloat(series2, 100, 100, &exp2) - appendFloat(series2, 101, 101, &exp2) - appendFloat(series2, 102, 102, &exp2) - testQuery("foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2}) + appendFloat(t, series2, 100, 100, &exp2) + appendFloat(t, series2, 101, 101, &exp2) + appendFloat(t, series2, 102, 102, &exp2) + testQuery(t, "foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2}) h := baseH.Copy() - appendHistogram(series2, 103, h, &exp2, histogram.UnknownCounterReset) - appendHistogram(series2, 104, h, &exp2, histogram.NotCounterReset) - appendHistogram(series2, 105, h, &exp2, histogram.NotCounterReset) - testQuery("foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2}) + appendHistogram(t, series2, 103, h, &exp2, histogram.UnknownCounterReset) + appendHistogram(t, series2, 104, h, &exp2, histogram.NotCounterReset) + appendHistogram(t, series2, 105, h, &exp2, histogram.NotCounterReset) + testQuery(t, "foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2}) // Switching between float and histograms again. - appendFloat(series2, 106, 106, &exp2) - appendFloat(series2, 107, 107, &exp2) - testQuery("foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2}) + appendFloat(t, series2, 106, 106, &exp2) + appendFloat(t, series2, 107, 107, &exp2) + testQuery(t, "foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2}) - appendHistogram(series2, 108, h, &exp2, histogram.UnknownCounterReset) - appendHistogram(series2, 109, h, &exp2, histogram.NotCounterReset) - testQuery("foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2}) + appendHistogram(t, series2, 108, h, &exp2, histogram.UnknownCounterReset) + appendHistogram(t, series2, 109, h, &exp2, histogram.NotCounterReset) + testQuery(t, "foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2}) }) t.Run("series starting with histogram and then getting float", func(t *testing.T) { h := baseH.Copy() - appendHistogram(series3, 101, h, &exp3, histogram.UnknownCounterReset) - appendHistogram(series3, 102, h, &exp3, histogram.NotCounterReset) - appendHistogram(series3, 103, h, &exp3, histogram.NotCounterReset) - testQuery("foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3}) + appendHistogram(t, series3, 101, h, &exp3, histogram.UnknownCounterReset) + appendHistogram(t, series3, 102, h, &exp3, histogram.NotCounterReset) + appendHistogram(t, series3, 103, h, &exp3, histogram.NotCounterReset) + testQuery(t, "foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3}) - appendFloat(series3, 104, 100, &exp3) - appendFloat(series3, 105, 101, &exp3) - appendFloat(series3, 106, 102, &exp3) - testQuery("foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3}) + appendFloat(t, series3, 104, 100, &exp3) + appendFloat(t, series3, 105, 101, &exp3) + appendFloat(t, series3, 106, 102, &exp3) + testQuery(t, "foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3}) // Switching between histogram and float again. - appendHistogram(series3, 107, h, &exp3, histogram.UnknownCounterReset) - appendHistogram(series3, 108, h, &exp3, histogram.NotCounterReset) - testQuery("foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3}) + appendHistogram(t, series3, 107, h, &exp3, histogram.UnknownCounterReset) + appendHistogram(t, series3, 108, h, &exp3, histogram.NotCounterReset) + testQuery(t, "foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3}) - appendFloat(series3, 109, 106, &exp3) - appendFloat(series3, 110, 107, &exp3) - testQuery("foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3}) + appendFloat(t, series3, 109, 106, &exp3) + appendFloat(t, series3, 110, 107, &exp3) + testQuery(t, "foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3}) }) t.Run("query mix of histogram and float series", func(t *testing.T) { // A float only series. - appendFloat(series4, 100, 100, &exp4) - appendFloat(series4, 101, 101, &exp4) - appendFloat(series4, 102, 102, &exp4) + appendFloat(t, series4, 100, 100, &exp4) + appendFloat(t, series4, 101, 101, &exp4) + appendFloat(t, series4, 102, 102, &exp4) - testQuery("foo", "bar.*", map[string][]chunks.Sample{ + testQuery(t, "foo", "bar.*", map[string][]chunks.Sample{ series1.String(): exp1, series2.String(): exp2, series3.String(): exp3, diff --git a/tsdb/head_test.go b/tsdb/head_test.go index cc9daa97fe..2ca3aeffc7 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -6405,7 +6405,7 @@ func TestHeadAppender_AppendCT(t *testing.T) { expectedSamples: []chunks.Sample{ sample{t: 1, h: &histogram.Histogram{}}, sample{t: 100, h: testHistogram}, - sample{t: 101, h: &histogram.Histogram{CounterResetHint: histogram.CounterReset}}, + sample{t: 101, h: &histogram.Histogram{CounterResetHint: histogram.UnknownCounterReset}}, sample{t: 102, h: testHistogram}, }, }, @@ -6418,7 +6418,7 @@ func TestHeadAppender_AppendCT(t *testing.T) { expectedSamples: []chunks.Sample{ sample{t: 1, fh: &histogram.FloatHistogram{}}, sample{t: 100, fh: testFloatHistogram}, - sample{t: 101, fh: &histogram.FloatHistogram{CounterResetHint: histogram.CounterReset}}, + sample{t: 101, fh: &histogram.FloatHistogram{CounterResetHint: histogram.UnknownCounterReset}}, sample{t: 102, fh: testFloatHistogram}, }, }, diff --git a/tsdb/ooo_head_test.go b/tsdb/ooo_head_test.go index b1641e29ba..37a46e76da 100644 --- a/tsdb/ooo_head_test.go +++ b/tsdb/ooo_head_test.go @@ -166,6 +166,8 @@ func TestOOOChunks_ToEncodedChunks(t *testing.T) { h2 := h1.Copy() h2.PositiveSpans = append(h2.PositiveSpans, histogram.Span{Offset: 1, Length: 1}) h2.PositiveBuckets = append(h2.PositiveBuckets, 12) + h2explicit := h2.Copy() + h2explicit.CounterResetHint = histogram.CounterReset testCases := map[string]struct { samples []sample @@ -198,12 +200,32 @@ func TestOOOChunks_ToEncodedChunks(t *testing.T) { {encoding: chunkenc.EncXOR, minTime: 1200, maxTime: 1200}, }, }, - "has a counter reset": { + "has an implicit counter reset": { samples: []sample{ {t: 1000, h: h2}, {t: 1100, h: h1}, }, - expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.CounterReset}, + expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.UnknownCounterReset}, + expectedChunks: []chunkVerify{ + {encoding: chunkenc.EncHistogram, minTime: 1000, maxTime: 1000}, + {encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100}, + }, + }, + "has an explicit counter reset": { + samples: []sample{ + {t: 1100, h: h2explicit}, + }, + expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset}, + expectedChunks: []chunkVerify{ + {encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100}, + }, + }, + "has an explicit counter reset inside": { + samples: []sample{ + {t: 1000, h: h1}, + {t: 1100, h: h2explicit}, + }, + expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.UnknownCounterReset}, expectedChunks: []chunkVerify{ {encoding: chunkenc.EncHistogram, minTime: 1000, maxTime: 1000}, {encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100}, diff --git a/tsdb/testutil.go b/tsdb/testutil.go index 03587f4e2c..c39eb133c7 100644 --- a/tsdb/testutil.go +++ b/tsdb/testutil.go @@ -111,7 +111,11 @@ func requireEqualSeries(t *testing.T, expected, actual map[string][]chunks.Sampl for name, expectedItem := range expected { actualItem, ok := actual[name] require.True(t, ok, "Expected series %s not found", name) - requireEqualSamples(t, name, expectedItem, actualItem, requireEqualSamplesIgnoreCounterResets) + if ignoreCounterResets { + requireEqualSamples(t, name, expectedItem, actualItem, requireEqualSamplesIgnoreCounterResets) + } else { + requireEqualSamples(t, name, expectedItem, actualItem) + } } for name := range actual { _, ok := expected[name] From 789c9b1a5e455850ed9b3c89cafb37df75ce1e50 Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Tue, 12 Nov 2024 20:07:05 +0530 Subject: [PATCH 0928/1397] [BUGFIX] PromQL: Corrects the behaviour of some operator and aggregators with Native Histograms (#15245) PromQL: Correct the behaviour of some operator and aggregators with Native Histograms --------- Signed-off-by: Neeraj Gartia --- promql/engine.go | 172 +++++++++++------- promql/promqltest/testdata/aggregators.test | 35 +++- .../testdata/native_histograms.test | 2 + promql/promqltest/testdata/operators.test | 21 +++ util/annotations/annotations.go | 10 + 5 files changed, 173 insertions(+), 67 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index cc7ff694e4..5e0539d8f7 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2793,77 +2793,84 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { // vectorElemBinop evaluates a binary operation between two Vector elements. func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram, pos posrange.PositionRange) (float64, *histogram.FloatHistogram, bool, error) { - switch op { - case parser.ADD: - if hlhs != nil && hrhs != nil { - res, err := hlhs.Copy().Add(hrhs) - if err != nil { - return 0, nil, false, err + opName := parser.ItemTypeStr[op] + switch { + case hlhs == nil && hrhs == nil: + { + switch op { + case parser.ADD: + return lhs + rhs, nil, true, nil + case parser.SUB: + return lhs - rhs, nil, true, nil + case parser.MUL: + return lhs * rhs, nil, true, nil + case parser.DIV: + return lhs / rhs, nil, true, nil + case parser.POW: + return math.Pow(lhs, rhs), nil, true, nil + case parser.MOD: + return math.Mod(lhs, rhs), nil, true, nil + case parser.EQLC: + return lhs, nil, lhs == rhs, nil + case parser.NEQ: + return lhs, nil, lhs != rhs, nil + case parser.GTR: + return lhs, nil, lhs > rhs, nil + case parser.LSS: + return lhs, nil, lhs < rhs, nil + case parser.GTE: + return lhs, nil, lhs >= rhs, nil + case parser.LTE: + return lhs, nil, lhs <= rhs, nil + case parser.ATAN2: + return math.Atan2(lhs, rhs), nil, true, nil } - return 0, res.Compact(0), true, nil } - if hlhs == nil && hrhs == nil { - return lhs + rhs, nil, true, nil - } - if hlhs != nil { - return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "+", "float", pos) - } - return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", "+", "histogram", pos) - case parser.SUB: - if hlhs != nil && hrhs != nil { - res, err := hlhs.Copy().Sub(hrhs) - if err != nil { - return 0, nil, false, err + case hlhs == nil && hrhs != nil: + { + switch op { + case parser.MUL: + return 0, hrhs.Copy().Mul(lhs).Compact(0), true, nil + case parser.ADD, parser.SUB, parser.DIV, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", opName, "histogram", pos) } - return 0, res.Compact(0), true, nil } - if hlhs == nil && hrhs == nil { - return lhs - rhs, nil, true, nil - } - if hlhs != nil { - return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "-", "float", pos) - } - return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", "-", "histogram", pos) - case parser.MUL: - if hlhs != nil && hrhs == nil { - return 0, hlhs.Copy().Mul(rhs), true, nil - } - if hlhs == nil && hrhs != nil { - return 0, hrhs.Copy().Mul(lhs), true, nil - } - if hlhs != nil && hrhs != nil { - return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "*", "histogram", pos) - } - return lhs * rhs, nil, true, nil - case parser.DIV: - if hlhs != nil && hrhs == nil { - return 0, hlhs.Copy().Div(rhs), true, nil - } - if hrhs != nil { - if hlhs != nil { - return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "/", "histogram", pos) + case hlhs != nil && hrhs == nil: + { + switch op { + case parser.MUL: + return 0, hlhs.Copy().Mul(rhs).Compact(0), true, nil + case parser.DIV: + return 0, hlhs.Copy().Div(rhs).Compact(0), true, nil + case parser.ADD, parser.SUB, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", opName, "float", pos) + } + } + case hlhs != nil && hrhs != nil: + { + switch op { + case parser.ADD: + res, err := hlhs.Copy().Add(hrhs) + if err != nil { + return 0, nil, false, err + } + return 0, res.Compact(0), true, nil + case parser.SUB: + res, err := hlhs.Copy().Sub(hrhs) + if err != nil { + return 0, nil, false, err + } + return 0, res.Compact(0), true, nil + case parser.EQLC: + // This operation expects that both histograms are compacted. + return 0, hlhs, hlhs.Equals(hrhs), nil + case parser.NEQ: + // This operation expects that both histograms are compacted. + return 0, hlhs, !hlhs.Equals(hrhs), nil + case parser.MUL, parser.DIV, parser.POW, parser.MOD, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", opName, "histogram", pos) } - return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", "/", "histogram", pos) } - return lhs / rhs, nil, true, nil - case parser.POW: - return math.Pow(lhs, rhs), nil, true, nil - case parser.MOD: - return math.Mod(lhs, rhs), nil, true, nil - case parser.EQLC: - return lhs, nil, lhs == rhs, nil - case parser.NEQ: - return lhs, nil, lhs != rhs, nil - case parser.GTR: - return lhs, nil, lhs > rhs, nil - case parser.LSS: - return lhs, nil, lhs < rhs, nil - case parser.GTE: - return lhs, nil, lhs >= rhs, nil - case parser.LTE: - return lhs, nil, lhs <= rhs, nil - case parser.ATAN2: - return math.Atan2(lhs, rhs), nil, true, nil } panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op)) } @@ -2925,16 +2932,34 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix case h != nil: // Ignore histograms for STDVAR and STDDEV. group.seen = false + if op == parser.STDVAR { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stdvar", e.Expr.PositionRange())) + } else { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stddev", e.Expr.PositionRange())) + } case math.IsNaN(f), math.IsInf(f, 0): group.floatValue = math.NaN() default: group.floatValue = 0 } case parser.QUANTILE: + if h != nil { + group.seen = false + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("quantile", e.Expr.PositionRange())) + } group.heap = make(vectorByValueHeap, 1) group.heap[0] = Sample{F: f} case parser.GROUP: group.floatValue = 1 + case parser.MIN, parser.MAX: + if h != nil { + group.seen = false + if op == parser.MIN { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("min", e.Expr.PositionRange())) + } else { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("max", e.Expr.PositionRange())) + } + } } continue } @@ -3033,11 +3058,19 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // Do nothing. Required to avoid the panic in `default:` below. case parser.MAX: + if h != nil { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("min", e.Expr.PositionRange())) + continue + } if group.floatValue < f || math.IsNaN(group.floatValue) { group.floatValue = f } case parser.MIN: + if h != nil { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("max", e.Expr.PositionRange())) + continue + } if group.floatValue > f || math.IsNaN(group.floatValue) { group.floatValue = f } @@ -3051,9 +3084,18 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix delta := f - group.floatMean group.floatMean += delta / group.groupCount group.floatValue += delta * (f - group.floatMean) + if op == parser.STDVAR { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stdvar", e.Expr.PositionRange())) + } else { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stddev", e.Expr.PositionRange())) + } } case parser.QUANTILE: + if h != nil { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("quantile", e.Expr.PositionRange())) + continue + } group.heap = append(group.heap, Sample{F: f}) default: diff --git a/promql/promqltest/testdata/aggregators.test b/promql/promqltest/testdata/aggregators.test index e2eb381dbc..19a896a6fb 100644 --- a/promql/promqltest/testdata/aggregators.test +++ b/promql/promqltest/testdata/aggregators.test @@ -229,13 +229,28 @@ load 5m http_requests{job="api-server", instance="0", group="canary"} NaN http_requests{job="api-server", instance="1", group="canary"} 3 http_requests{job="api-server", instance="2", group="canary"} 4 + http_requests_histogram{job="api-server", instance="3", group="canary"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}} eval instant at 0m max(http_requests) {} 4 +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +eval_info instant at 0m max({job="api-server"}) + {} 4 + +# The histogram is ignored here so there is no result but it has an info annotation now. +eval_info instant at 0m max(http_requests_histogram) + eval instant at 0m min(http_requests) {} 1 +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +eval_info instant at 0m min({job="api-server"}) + {} 1 + +# The histogram is ignored here so there is no result but it has an info annotation now. +eval_info instant at 0m min(http_requests_histogram) + eval instant at 0m max by (group) (http_requests) {group="production"} 2 {group="canary"} 4 @@ -380,6 +395,7 @@ load 10s data{test="uneven samples",point="a"} 0 data{test="uneven samples",point="b"} 1 data{test="uneven samples",point="c"} 4 + data_histogram{test="histogram sample", point="c"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}} foo .8 eval instant at 1m quantile without(point)(0.8, data) @@ -387,6 +403,15 @@ eval instant at 1m quantile without(point)(0.8, data) {test="three samples"} 1.6 {test="uneven samples"} 2.8 +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +eval_info instant at 1m quantile without(point)(0.8, {__name__=~"data(_histogram)?"}) + {test="two samples"} 0.8 + {test="three samples"} 1.6 + {test="uneven samples"} 2.8 + +# The histogram is ignored here so there is no result but it has an info annotation now. +eval_info instant at 1m quantile(0.8, data_histogram) + # Bug #5276. eval instant at 1m quantile without(point)(scalar(foo), data) {test="two samples"} 0.8 @@ -581,12 +606,18 @@ load 5m series{label="b"} 2 series{label="c"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} -eval instant at 0m stddev(series) +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +eval_info instant at 0m stddev(series) {} 0.5 -eval instant at 0m stdvar(series) +eval_info instant at 0m stdvar(series) {} 0.25 +# The histogram is ignored here so there is no result but it has an info annotation now. +eval_info instant at 0m stddev({label="c"}) + +eval_info instant at 0m stdvar({label="c"}) + eval instant at 0m stddev by (label) (series) {label="a"} 0 {label="b"} 0 diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 8c5814ae8a..0463384e2e 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1184,3 +1184,5 @@ eval instant at 3m sum_over_time(histogram_sum_over_time[4m:1m]) eval instant at 3m avg_over_time(histogram_sum_over_time[4m:1m]) {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}} + +clear diff --git a/promql/promqltest/testdata/operators.test b/promql/promqltest/testdata/operators.test index 645cca88b8..9070953abd 100644 --- a/promql/promqltest/testdata/operators.test +++ b/promql/promqltest/testdata/operators.test @@ -7,6 +7,7 @@ load 5m http_requests{job="app-server", instance="1", group="production"} 0+60x10 http_requests{job="app-server", instance="0", group="canary"} 0+70x10 http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + http_requests_histogram{job="app-server", instance="1", group="production"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}}x11 load 5m vector_matching_a{l="x"} 0+1x100 @@ -287,6 +288,26 @@ eval instant at 50m 1 == bool 1 eval instant at 50m http_requests{job="api-server", instance="0", group="production"} == bool 100 {job="api-server", instance="0", group="production"} 1 +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +eval_info instant at 5m {job="app-server"} == 80 + http_requests{group="canary", instance="1", job="app-server"} 80 + +eval_info instant at 5m http_requests_histogram != 80 + +eval_info instant at 5m http_requests_histogram > 80 + +eval_info instant at 5m http_requests_histogram < 80 + +eval_info instant at 5m http_requests_histogram >= 80 + +eval_info instant at 5m http_requests_histogram <= 80 + +# Should produce valid results in case of (in)equality between two histograms. +eval instant at 5m http_requests_histogram == http_requests_histogram + http_requests_histogram{job="app-server", instance="1", group="production"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} + +eval instant at 5m http_requests_histogram != http_requests_histogram + # group_left/group_right. clear diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go index ebe74ecd11..1b743f7057 100644 --- a/util/annotations/annotations.go +++ b/util/annotations/annotations.go @@ -147,6 +147,7 @@ var ( PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo) HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) IncompatibleTypesInBinOpInfo = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo) + HistogramIgnoredInAggregationInfo = fmt.Errorf("%w: ignored histogram in", PromQLInfo) ) type annoErr struct { @@ -283,3 +284,12 @@ func NewIncompatibleTypesInBinOpInfo(lhsType, operator, rhsType string, pos posr Err: fmt.Errorf("%w %q: %s %s %s", IncompatibleTypesInBinOpInfo, operator, lhsType, operator, rhsType), } } + +// NewHistogramIgnoredInAggregationInfo is used when a histogram is ignored by +// an aggregation operator that cannot handle histograms. +func NewHistogramIgnoredInAggregationInfo(aggregation string, pos posrange.PositionRange) error { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %s aggregation", HistogramIgnoredInAggregationInfo, aggregation), + } +} From 48abdcd83f64eaa5436aa4eadf089086bf29b05d Mon Sep 17 00:00:00 2001 From: Tristan Colgate-McFarlane Date: Fri, 1 Nov 2024 16:24:31 +0000 Subject: [PATCH 0929/1397] bugfix: allow quoted exemplar keys in openmetrics text format Signed-off-by: Tristan Colgate-McFarlane --- model/textparse/openmetricslex.l | 1 + model/textparse/openmetricslex.l.go | 150 ++++++++++++++--------- model/textparse/openmetricsparse_test.go | 21 +++- 3 files changed, 112 insertions(+), 60 deletions(-) diff --git a/model/textparse/openmetricslex.l b/model/textparse/openmetricslex.l index 9afbbbd8bd..09106c52ce 100644 --- a/model/textparse/openmetricslex.l +++ b/model/textparse/openmetricslex.l @@ -69,6 +69,7 @@ S [ ] {S}#{S}\{ l.state = sExemplar; return tComment {L}({L}|{D})* return tLName +\"(\\.|[^\\"\n])*\" l.state = sExemplar; return tQString \} l.state = sEValue; return tBraceClose = l.state = sEValue; return tEqual \"(\\.|[^\\"\n])*\" l.state = sExemplar; return tLValue diff --git a/model/textparse/openmetricslex.l.go b/model/textparse/openmetricslex.l.go index c8789ef60d..c0b2fcdb4d 100644 --- a/model/textparse/openmetricslex.l.go +++ b/model/textparse/openmetricslex.l.go @@ -53,9 +53,9 @@ yystate0: case 8: // start condition: sExemplar goto yystart57 case 9: // start condition: sEValue - goto yystart62 + goto yystart65 case 10: // start condition: sETimestamp - goto yystart68 + goto yystart71 } yystate1: @@ -538,125 +538,153 @@ yystart57: switch { default: goto yyabort - case c == ',': + case c == '"': goto yystate58 - case c == '=': - goto yystate59 - case c == '}': + case c == ',': goto yystate61 + case c == '=': + goto yystate62 + case c == '}': + goto yystate64 case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate60 + goto yystate63 } yystate58: c = l.next() - goto yyrule26 + switch { + default: + goto yyabort + case c == '"': + goto yystate59 + case c == '\\': + goto yystate60 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate58 + } yystate59: c = l.next() - goto yyrule24 + goto yyrule23 yystate60: c = l.next() switch { default: - goto yyrule22 - case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': - goto yystate60 + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate58 } yystate61: c = l.next() - goto yyrule23 + goto yyrule27 yystate62: c = l.next() -yystart62: - switch { - default: - goto yyabort - case c == ' ': - goto yystate63 - case c == '"': - goto yystate65 - } + goto yyrule25 yystate63: c = l.next() switch { default: - goto yyabort - case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate64 + goto yyrule22 + case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate63 } yystate64: c = l.next() - switch { - default: - goto yyrule27 - case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate64 - } + goto yyrule24 yystate65: c = l.next() +yystart65: switch { default: goto yyabort - case c == '"': + case c == ' ': goto yystate66 - case c == '\\': - goto yystate67 - case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': - goto yystate65 + case c == '"': + goto yystate68 } yystate66: c = l.next() - goto yyrule25 + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate67 + } yystate67: c = l.next() switch { default: - goto yyabort - case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': - goto yystate65 + goto yyrule28 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate67 } yystate68: c = l.next() -yystart68: switch { default: goto yyabort - case c == ' ': - goto yystate70 - case c == '\n': + case c == '"': goto yystate69 + case c == '\\': + goto yystate70 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate68 } yystate69: c = l.next() - goto yyrule29 + goto yyrule26 yystate70: c = l.next() switch { default: goto yyabort - case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate71 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate68 } yystate71: c = l.next() +yystart71: switch { default: - goto yyrule28 + goto yyabort + case c == ' ': + goto yystate73 + case c == '\n': + goto yystate72 + } + +yystate72: + c = l.next() + goto yyrule30 + +yystate73: + c = l.next() + switch { + default: + goto yyabort case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': - goto yystate71 + goto yystate74 + } + +yystate74: + c = l.next() + switch { + default: + goto yyrule29 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate74 } yyrule1: // #{S} @@ -782,39 +810,45 @@ yyrule22: // {L}({L}|{D})* { return tLName } -yyrule23: // \} +yyrule23: // \"(\\.|[^\\"\n])*\" + { + l.state = sExemplar + return tQString + goto yystate0 + } +yyrule24: // \} { l.state = sEValue return tBraceClose goto yystate0 } -yyrule24: // = +yyrule25: // = { l.state = sEValue return tEqual goto yystate0 } -yyrule25: // \"(\\.|[^\\"\n])*\" +yyrule26: // \"(\\.|[^\\"\n])*\" { l.state = sExemplar return tLValue goto yystate0 } -yyrule26: // , +yyrule27: // , { return tComma } -yyrule27: // {S}[^ \n]+ +yyrule28: // {S}[^ \n]+ { l.state = sETimestamp return tValue goto yystate0 } -yyrule28: // {S}[^ \n]+ +yyrule29: // {S}[^ \n]+ { return tTimestamp } -yyrule29: // \n +yyrule30: // \n if true { // avoid go vet determining the below panic will not be reached l.state = sInit return tLinebreak @@ -859,10 +893,10 @@ yyabort: // no lexem recognized goto yystate57 } if false { - goto yystate62 + goto yystate65 } if false { - goto yystate68 + goto yystate71 } } diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index 9c3c679ab5..a09c56a7ba 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -486,9 +486,12 @@ func TestUTF8OpenMetricsParse(t *testing.T) { {"http.status",q="0.9",a="b"} 8.3835e-05 {q="0.9","http.status",a="b"} 8.3835e-05 {"go.gc_duration_seconds_sum"} 0.004304266 -{"Heizölrückstoßabdämpfung 10€ metric with \"interesting\" {character\nchoices}","strange©™\n'quoted' \"name\""="6"} 10.0` +{"Heizölrückstoßabdämpfung 10€ metric with \"interesting\" {character\nchoices}","strange©™\n'quoted' \"name\""="6"} 10.0 +quotedexemplar_count 1 # {"id.thing"="histogram-count-test"} 4 +quotedexemplar2_count 1 # {"id.thing"="histogram-count-test",other="hello"} 4 +` - input += "\n# EOF\n" + input += "# EOF\n" exp := []parsedEntry{ { @@ -535,6 +538,20 @@ func TestUTF8OpenMetricsParse(t *testing.T) { v: 10.0, lset: labels.FromStrings("__name__", `Heizölrückstoßabdämpfung 10€ metric with "interesting" {character choices}`, "strange©™\n'quoted' \"name\"", "6"), + }, { + m: `quotedexemplar_count`, + v: 1, + lset: labels.FromStrings("__name__", "quotedexemplar_count"), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id.thing", "histogram-count-test"), Value: 4}, + }, + }, { + m: `quotedexemplar2_count`, + v: 1, + lset: labels.FromStrings("__name__", "quotedexemplar2_count"), + es: []exemplar.Exemplar{ + {Labels: labels.FromStrings("id.thing", "histogram-count-test", "other", "hello"), Value: 4}, + }, }, } From 84396bf636f04c72834b2145d05400f406b8208a Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Wed, 13 Nov 2024 13:29:56 +0100 Subject: [PATCH 0930/1397] migration: add removal of AM v1 api Signed-off-by: Jan Fajerski --- docs/migration.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/migration.md b/docs/migration.md index 869b67cc8d..73de5bcaaf 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -207,6 +207,12 @@ This should **only** be applied to metrics that currently produce such labels. regex: (\d+)\.0+;.*_bucket ``` +### Disallow configuring Alertmanager with the v1 API +Prometheus 3 no longer supports Alertmanager's v1 API. Effectively Prometheus 3 +requires [Alertmanager 0.16.0](https://github.com/prometheus/alertmanager/releases/tag/v0.16.0) or later. Users with older Alertmanager +versions or configurations that use `alerting: alertmanagers: [api_version: v1]` +need to upgrade Alertmanager and change their configuration to use `api_version: v2`. + # Prometheus 2.0 migration guide For the Prometheus 1.8 to 2.0 please refer to the [Prometheus v2.55 documentation](https://prometheus.io/docs/prometheus/2.55/migration/). From d541b3bbebbc889e02ff51dd54e5a1674bcf5b77 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Wed, 13 Nov 2024 13:30:20 +0100 Subject: [PATCH 0931/1397] prepare release 3.0.0 Signed-off-by: Jan Fajerski --- CHANGELOG.md | 75 +++++++++----------- VERSION | 2 +- web/ui/mantine-ui/package.json | 4 +- web/ui/module/codemirror-promql/package.json | 4 +- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 ++-- web/ui/package.json | 2 +- web/ui/react-app/package-lock.json | 4 +- web/ui/react-app/package.json | 2 +- 9 files changed, 49 insertions(+), 60 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b9766854cd..fcfde9a988 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,63 +2,25 @@ ## unreleased -## 3.0.0-rc.1 / 2024-11-11 +## 3.0.0 / 2024-11-14 + +This release includes new features such as a brand new UI and UTF-8 support enabled by default. As this marks the first new major version in seven years, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. For users that want to upgrade we recommend to read through our [migration guide](https://prometheus.io/docs/prometheus/3.0/migration/). * [CHANGE] Set the `GOMAXPROCS` variable automatically to match the Linux CPU quota. Use `--no-auto-gomaxprocs` to disable it. The `auto-gomaxprocs` feature flag was removed. #15376 * [CHANGE] Set the `GOMEMLIMIT` variable automatically to match the Linux container memory limit. Use `--no-auto-gomemlimit` to disable it. The `auto-gomemlimit` feature flag was removed. #15373 -* [FEATURE] OTLP receiver: Ability to skip UTF-8 normalization using `otlp.translation_strategy = NoUTF8EscapingWithSuffixes` configuration option. #15384 -* [BUGFIX] Scraping: Don't log errors on empty scrapes. #15357 -* [BUGFIX] UI: fix selector / series formatting for empty metric names. #15341 - -## 3.0.0-rc.0 / 2024-10-31 - * [CHANGE] Scraping: Remove implicit fallback to the Prometheus text format in case of invalid/missing Content-Type and fail the scrape instead. Add ability to specify a `fallback_scrape_protocol` in the scrape config. #15136 * [CHANGE] Remote-write: default enable_http2 to false. #15219 * [CHANGE] Scraping: normalize "le" and "quantile" label values upon ingestion. #15164 * [CHANGE] Scraping: config `scrape_classic_histograms` was renamed to `always_scrape_classic_histograms`. #15178 * [CHANGE] Config: remove expand-external-labels flag, expand external labels env vars by default. #14657 * [CHANGE] Disallow configuring AM with the v1 api. #13883 -* [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710 -* [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196 -* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694 -* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941 -* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941 -* [BUGFIX] OTLP receiver: Preserve colons when generating metric names in suffix adding mode (this mode is always enabled, unless one uses Prometheus as a library). #15251 - -## 3.0.0-beta.1 / 2024-10-09 - * [CHANGE] regexp `.` now matches all characters (performance improvement). #14505 * [CHANGE] `holt_winters` is now called `double_exponential_smoothing` and moves behind the [experimental-promql-functions feature flag](https://prometheus.io/docs/prometheus/latest/feature_flags/#experimental-promql-functions). #14930 * [CHANGE] API: The OTLP receiver endpoint can now be enabled using `--web.enable-otlp-receiver` instead of `--enable-feature=otlp-write-receiver`. #14894 * [CHANGE] Prometheus will not add or remove port numbers from the target address. `no-default-scrape-port` feature flag removed. #14160 * [CHANGE] Logging: the format of log lines has changed a little, along with the adoption of Go's Structured Logging package. #14906 -* [CHANGE] Don't create extra `_created` timeseries if feature-flag `created-timestamp-zero-ingestion' is enabled. #14738 +* [CHANGE] Don't create extra `_created` timeseries if feature-flag `created-timestamp-zero-ingestion` is enabled. #14738 * [CHANGE] Float literals and time durations being the same is now a stable fetaure. #15111 -* [ENHANCEMENT] UI: Many fixes and improvements. #14898, #14899, #14907, #14908, #14912, #14913, #14914, #14931, #14940, #14945, #14946, #14972, #14981, #14982, #14994, #15096 -* [ENHANCEMENT] UI: Web UI now displays notifications, e.g. when starting up and shutting down. #15082 -* [ENHANCEMENT] PromQL: Introduce exponential interpolation for native histograms. #14677 -* [ENHANCEMENT] TSDB: Add support for ingestion of out-of-order native histogram samples. #14850, #14546 -* [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers. #13909 -* [ENHANCEMENT] Kubernetes SD: Support sidecar containers in endpoint discovery. #14929 -* [ENHANCEMENT] Consul SD: Support catalog filters. #11224 -* [PERF] TSDB: Parallelize deletion of postings after head compaction. #14975 -* [PERF] TSDB: Chunk encoding: shorten some write sequences. #14932 -* [PERF] TSDB: Grow postings by doubling. #14721 -* [PERF] Relabeling: Optimize adding a constant label pair. #12180 -* [BUGFIX] Scraping: Unit was missing when using protobuf format. #15095 -* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910 -* [BUGFIX] TSDB: Chunks could have one unnecessary zero byte at the end. #14854 -* [BUGFIX] "superfluous response.WriteHeader call" messages in log. #14884 -* [BUGFIX] PromQL: Unary negation of native histograms. #14821 -* [BUGFIX] PromQL: Handle stale marker in native histogram series (e.g. if series goes away and comes back). #15025 -* [BUGFIX] Autoreload: Reload invalid yaml files. #14947 - -## 3.0.0-beta.0 / 2024-09-05 - -Release 3.0.0-beta.0 includes new features such as a brand new UI and UTF-8 support enabled by default. As a new major version, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. Most users should be able to try this release out of the box without any configuration changes. - -As is traditional with a beta release, we do **not** recommend users install 3.0.0-beta on critical production systems, but we do want everyone to test it out and find bugs. - * [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. #14872 * [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904 * [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365 @@ -69,8 +31,35 @@ As is traditional with a beta release, we do **not** recommend users install 3.0 * [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747 * [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526 * [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643 -* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 +* [FEATURE] OTLP receiver: Ability to skip UTF-8 normalization using `otlp.translation_strategy = NoUTF8EscapingWithSuffixes` configuration option. #15384 * [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769 +* [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710 +* [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196 +* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694 +* [ENHANCEMENT] UI: Many fixes and improvements. #14898, #14899, #14907, #14908, #14912, #14913, #14914, #14931, #14940, #14945, #14946, #14972, #14981, #14982, #14994, #15096 +* [ENHANCEMENT] UI: Web UI now displays notifications, e.g. when starting up and shutting down. #15082 +* [ENHANCEMENT] PromQL: Introduce exponential interpolation for native histograms. #14677 +* [ENHANCEMENT] TSDB: Add support for ingestion of out-of-order native histogram samples. #14850, #14546 +* [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers. #13909 +* [ENHANCEMENT] Kubernetes SD: Support sidecar containers in endpoint discovery. #14929 +* [ENHANCEMENT] Consul SD: Support catalog filters. #11224 +* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875 +* [PERF] TSDB: Parallelize deletion of postings after head compaction. #14975 +* [PERF] TSDB: Chunk encoding: shorten some write sequences. #14932 +* [PERF] TSDB: Grow postings by doubling. #14721 +* [PERF] Relabeling: Optimize adding a constant label pair. #12180 +* [BUGFIX] Scraping: Don't log errors on empty scrapes. #15357 +* [BUGFIX] UI: fix selector / series formatting for empty metric names. #15341 +* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941 +* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941 +* [BUGFIX] OTLP receiver: Preserve colons when generating metric names in suffix adding mode (this mode is always enabled, unless one uses Prometheus as a library). #15251 +* [BUGFIX] Scraping: Unit was missing when using protobuf format. #15095 +* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910 +* [BUGFIX] TSDB: Chunks could have one unnecessary zero byte at the end. #14854 +* [BUGFIX] "superfluous response.WriteHeader call" messages in log. #14884 +* [BUGFIX] PromQL: Unary negation of native histograms. #14821 +* [BUGFIX] PromQL: Handle stale marker in native histogram series (e.g. if series goes away and comes back). #15025 +* [BUGFIX] Autoreload: Reload invalid yaml files. #14947 * [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029 ## 2.55.0 / 2024-10-22 diff --git a/VERSION b/VERSION index 8443074c0f..4a36342fca 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.0.0-rc.1 +3.0.0 diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 49ee44a499..dc32eee96d 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,7 +1,7 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "0.300.0-rc.1", + "version": "0.300.0", "type": "module", "scripts": { "start": "vite", @@ -28,7 +28,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.300.0-rc.1", + "@prometheus-io/codemirror-promql": "0.300.0", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.59.0", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 8e9be9b760..8fb6dc4ba7 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.300.0-rc.1", + "version": "0.300.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.300.0-rc.1", + "@prometheus-io/lezer-promql": "0.300.0", "lru-cache": "^11.0.1" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index ef3018964f..6564d3fa56 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.300.0-rc.1", + "version": "0.300.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index feee0d6a31..f5ae7642b0 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.300.0-rc.1", + "version": "0.300.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.300.0-rc.1", + "version": "0.300.0", "workspaces": [ "mantine-ui", "module/*" @@ -24,7 +24,7 @@ }, "mantine-ui": { "name": "@prometheus-io/mantine-ui", - "version": "0.300.0-rc.1", + "version": "0.300.0", "dependencies": { "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.2", @@ -42,7 +42,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.300.0-rc.1", + "@prometheus-io/codemirror-promql": "0.300.0", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.59.0", @@ -155,10 +155,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.300.0-rc.1", + "version": "0.300.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.300.0-rc.1", + "@prometheus-io/lezer-promql": "0.300.0", "lru-cache": "^11.0.1" }, "devDependencies": { @@ -188,7 +188,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.300.0-rc.1", + "version": "0.300.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.1", diff --git a/web/ui/package.json b/web/ui/package.json index b9a6bc9ac9..ef5bdb81fc 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,7 +1,7 @@ { "name": "prometheus-io", "description": "Monorepo for the Prometheus UI", - "version": "0.300.0-rc.1", + "version": "0.300.0", "private": true, "scripts": { "build": "bash build_ui.sh --all", diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index ce18939428..bdc2d72bee 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -1,12 +1,12 @@ { "name": "@prometheus-io/app", - "version": "0.300.0-rc.1", + "version": "0.300.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@prometheus-io/app", - "version": "0.300.0-rc.1", + "version": "0.300.0", "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index c61358e1de..30db488ccc 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.300.0-rc.1", + "version": "0.300.0", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.17.0", From f7685caf0d2308164ca83d64f70730d7ea447aab Mon Sep 17 00:00:00 2001 From: Aniket Kaulavkar Date: Thu, 14 Nov 2024 10:41:47 +0530 Subject: [PATCH 0932/1397] Parallelize discovery/kubernetes tests using t.Parallel() Signed-off-by: Aniket Kaulavkar --- discovery/kubernetes/endpoints_test.go | 14 ++++++++++++++ .../kubernetes/endpointslice_adaptor_test.go | 1 + discovery/kubernetes/endpointslice_test.go | 15 +++++++++++++++ discovery/kubernetes/ingress_test.go | 5 +++++ discovery/kubernetes/kubernetes_test.go | 3 +++ discovery/kubernetes/node_test.go | 4 ++++ discovery/kubernetes/pod_test.go | 10 ++++++++++ discovery/kubernetes/service_test.go | 6 ++++++ 8 files changed, 58 insertions(+) diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index a1ac6e5d48..28ad5697bc 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -97,6 +97,7 @@ func makeEndpoints() *v1.Endpoints { } func TestEndpointsDiscoveryBeforeRun(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}) k8sDiscoveryTest{ @@ -151,6 +152,7 @@ func TestEndpointsDiscoveryBeforeRun(t *testing.T) { } func TestEndpointsDiscoveryAdd(t *testing.T) { + t.Parallel() obj := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", @@ -276,6 +278,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) { } func TestEndpointsDiscoveryDelete(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) k8sDiscoveryTest{ @@ -294,6 +297,7 @@ func TestEndpointsDiscoveryDelete(t *testing.T) { } func TestEndpointsDiscoveryUpdate(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) k8sDiscoveryTest{ @@ -365,6 +369,7 @@ func TestEndpointsDiscoveryUpdate(t *testing.T) { } func TestEndpointsDiscoveryEmptySubsets(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) k8sDiscoveryTest{ @@ -393,6 +398,7 @@ func TestEndpointsDiscoveryEmptySubsets(t *testing.T) { } func TestEndpointsDiscoveryWithService(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) k8sDiscoveryTest{ @@ -458,6 +464,7 @@ func TestEndpointsDiscoveryWithService(t *testing.T) { } func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints()) k8sDiscoveryTest{ @@ -538,6 +545,7 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) { } func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { + t.Parallel() metadataConfig := AttachMetadataConfig{Node: true} nodeLabels1 := map[string]string{"az": "us-east1"} nodeLabels2 := map[string]string{"az": "us-west2"} @@ -611,6 +619,7 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { } func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { + t.Parallel() nodeLabels1 := map[string]string{"az": "us-east1"} nodeLabels2 := map[string]string{"az": "us-west2"} node1 := makeNode("foobar", "", "", nodeLabels1, nil) @@ -688,6 +697,7 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { } func TestEndpointsDiscoveryNamespaces(t *testing.T) { + t.Parallel() epOne := makeEndpoints() epOne.Namespace = "ns1" objs := []runtime.Object{ @@ -839,6 +849,7 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) { } func TestEndpointsDiscoveryOwnNamespace(t *testing.T) { + t.Parallel() epOne := makeEndpoints() epOne.Namespace = "own-ns" @@ -933,6 +944,7 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) { } func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) { + t.Parallel() ep := makeEndpoints() ep.Namespace = "ns" @@ -978,6 +990,7 @@ func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) { // TestEndpointsDiscoveryUpdatePod makes sure that Endpoints discovery detects underlying Pods changes. // See https://github.com/prometheus/prometheus/issues/11305 for more details. func TestEndpointsDiscoveryUpdatePod(t *testing.T) { + t.Parallel() pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", @@ -1097,6 +1110,7 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) { } func TestEndpointsDiscoverySidecarContainer(t *testing.T) { + t.Parallel() objs := []runtime.Object{ &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ diff --git a/discovery/kubernetes/endpointslice_adaptor_test.go b/discovery/kubernetes/endpointslice_adaptor_test.go index de33c64b66..e9d8be54f0 100644 --- a/discovery/kubernetes/endpointslice_adaptor_test.go +++ b/discovery/kubernetes/endpointslice_adaptor_test.go @@ -21,6 +21,7 @@ import ( ) func Test_EndpointSliceAdaptor_v1(t *testing.T) { + t.Parallel() endpointSlice := makeEndpointSliceV1() adaptor := newEndpointSliceAdaptorFromV1(endpointSlice) diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index cc92c7ddaa..21d49defd4 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -114,6 +114,7 @@ func makeEndpointSliceV1() *v1.EndpointSlice { } func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ @@ -195,6 +196,7 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { } func TestEndpointSliceDiscoveryAdd(t *testing.T) { + t.Parallel() obj := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", @@ -322,6 +324,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) { } func TestEndpointSliceDiscoveryDelete(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ @@ -340,6 +343,7 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) { } func TestEndpointSliceDiscoveryUpdate(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ @@ -396,6 +400,7 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { } func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ @@ -424,6 +429,7 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { } func TestEndpointSliceDiscoveryWithService(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ @@ -516,6 +522,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { } func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1()) k8sDiscoveryTest{ @@ -623,6 +630,7 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { } func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { + t.Parallel() metadataConfig := AttachMetadataConfig{Node: true} nodeLabels1 := map[string]string{"az": "us-east1"} nodeLabels2 := map[string]string{"az": "us-west2"} @@ -722,6 +730,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { } func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { + t.Parallel() metadataConfig := AttachMetadataConfig{Node: true} nodeLabels1 := map[string]string{"az": "us-east1"} nodeLabels2 := map[string]string{"az": "us-west2"} @@ -827,6 +836,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { } func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { + t.Parallel() epOne := makeEndpointSliceV1() epOne.Namespace = "ns1" objs := []runtime.Object{ @@ -1003,6 +1013,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { } func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { + t.Parallel() epOne := makeEndpointSliceV1() epOne.Namespace = "own-ns" @@ -1123,6 +1134,7 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { } func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) { + t.Parallel() ep := makeEndpointSliceV1() ep.Namespace = "ns" @@ -1169,6 +1181,7 @@ func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) { // sets up indexing for the main Kube informer only when needed. // See: https://github.com/prometheus/prometheus/pull/13554#discussion_r1490965817 func TestEndpointSliceInfIndexersCount(t *testing.T) { + t.Parallel() tests := []struct { name string withNodeMetadata bool @@ -1179,6 +1192,7 @@ func TestEndpointSliceInfIndexersCount(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { + t.Parallel() var ( n *Discovery mainInfIndexersCount int @@ -1204,6 +1218,7 @@ func TestEndpointSliceInfIndexersCount(t *testing.T) { } func TestEndpointSliceDiscoverySidecarContainer(t *testing.T) { + t.Parallel() objs := []runtime.Object{ &v1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ diff --git a/discovery/kubernetes/ingress_test.go b/discovery/kubernetes/ingress_test.go index 9bddfb1e14..a828dee27f 100644 --- a/discovery/kubernetes/ingress_test.go +++ b/discovery/kubernetes/ingress_test.go @@ -144,6 +144,7 @@ func expectedTargetGroups(ns string, tls TLSMode) map[string]*targetgroup.Group } func TestIngressDiscoveryAdd(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ @@ -158,6 +159,7 @@ func TestIngressDiscoveryAdd(t *testing.T) { } func TestIngressDiscoveryAddTLS(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ @@ -172,6 +174,7 @@ func TestIngressDiscoveryAddTLS(t *testing.T) { } func TestIngressDiscoveryAddMixed(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}) k8sDiscoveryTest{ @@ -186,6 +189,7 @@ func TestIngressDiscoveryAddMixed(t *testing.T) { } func TestIngressDiscoveryNamespaces(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) expected := expectedTargetGroups("ns1", TLSNo) @@ -207,6 +211,7 @@ func TestIngressDiscoveryNamespaces(t *testing.T) { } func TestIngressDiscoveryOwnNamespace(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{IncludeOwnNamespace: true}) expected := expectedTargetGroups("own-ns", TLSNo) diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index a14f2b3d1b..e34bed899c 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -273,6 +273,7 @@ func (s *Service) hasSynced() bool { } func TestRetryOnError(t *testing.T) { + t.Parallel() for _, successAt := range []int{1, 2, 3} { var called int f := func() error { @@ -288,6 +289,7 @@ func TestRetryOnError(t *testing.T) { } func TestFailuresCountMetric(t *testing.T) { + t.Parallel() tests := []struct { role Role minFailedWatches int @@ -324,6 +326,7 @@ func TestFailuresCountMetric(t *testing.T) { } func TestNodeName(t *testing.T) { + t.Parallel() node := &apiv1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", diff --git a/discovery/kubernetes/node_test.go b/discovery/kubernetes/node_test.go index bbf7a6b27c..bc17efdc01 100644 --- a/discovery/kubernetes/node_test.go +++ b/discovery/kubernetes/node_test.go @@ -56,6 +56,7 @@ func makeEnumeratedNode(i int) *v1.Node { } func TestNodeDiscoveryBeforeStart(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleNode, NamespaceDiscovery{}) k8sDiscoveryTest{ @@ -95,6 +96,7 @@ func TestNodeDiscoveryBeforeStart(t *testing.T) { } func TestNodeDiscoveryAdd(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleNode, NamespaceDiscovery{}) k8sDiscoveryTest{ @@ -124,6 +126,7 @@ func TestNodeDiscoveryAdd(t *testing.T) { } func TestNodeDiscoveryDelete(t *testing.T) { + t.Parallel() obj := makeEnumeratedNode(0) n, c := makeDiscovery(RoleNode, NamespaceDiscovery{}, obj) @@ -142,6 +145,7 @@ func TestNodeDiscoveryDelete(t *testing.T) { } func TestNodeDiscoveryUpdate(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleNode, NamespaceDiscovery{}) k8sDiscoveryTest{ diff --git a/discovery/kubernetes/pod_test.go b/discovery/kubernetes/pod_test.go index 286a1a230d..7a3079a265 100644 --- a/discovery/kubernetes/pod_test.go +++ b/discovery/kubernetes/pod_test.go @@ -239,6 +239,7 @@ func expectedPodTargetGroupsWithNodeMeta(ns, nodeName string, nodeLabels map[str } func TestPodDiscoveryBeforeRun(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{}) k8sDiscoveryTest{ @@ -302,6 +303,7 @@ func TestPodDiscoveryBeforeRun(t *testing.T) { } func TestPodDiscoveryInitContainer(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{}) ns := "default" @@ -329,6 +331,7 @@ func TestPodDiscoveryInitContainer(t *testing.T) { } func TestPodDiscoveryAdd(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{}) k8sDiscoveryTest{ @@ -343,6 +346,7 @@ func TestPodDiscoveryAdd(t *testing.T) { } func TestPodDiscoveryDelete(t *testing.T) { + t.Parallel() obj := makePods() n, c := makeDiscovery(RolePod, NamespaceDiscovery{}, obj) @@ -362,6 +366,7 @@ func TestPodDiscoveryDelete(t *testing.T) { } func TestPodDiscoveryUpdate(t *testing.T) { + t.Parallel() obj := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testpod", @@ -403,6 +408,7 @@ func TestPodDiscoveryUpdate(t *testing.T) { } func TestPodDiscoveryUpdateEmptyPodIP(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{}) initialPod := makePods() @@ -427,6 +433,7 @@ func TestPodDiscoveryUpdateEmptyPodIP(t *testing.T) { } func TestPodDiscoveryNamespaces(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) expected := expectedPodTargetGroups("ns1") @@ -448,6 +455,7 @@ func TestPodDiscoveryNamespaces(t *testing.T) { } func TestPodDiscoveryOwnNamespace(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RolePod, NamespaceDiscovery{IncludeOwnNamespace: true}) expected := expectedPodTargetGroups("own-ns") @@ -466,6 +474,7 @@ func TestPodDiscoveryOwnNamespace(t *testing.T) { } func TestPodDiscoveryWithNodeMetadata(t *testing.T) { + t.Parallel() attachMetadata := AttachMetadataConfig{Node: true} n, c := makeDiscoveryWithMetadata(RolePod, NamespaceDiscovery{}, attachMetadata) nodeLbls := map[string]string{"l1": "v1"} @@ -485,6 +494,7 @@ func TestPodDiscoveryWithNodeMetadata(t *testing.T) { } func TestPodDiscoveryWithNodeMetadataUpdateNode(t *testing.T) { + t.Parallel() nodeLbls := map[string]string{"l2": "v2"} attachMetadata := AttachMetadataConfig{Node: true} n, c := makeDiscoveryWithMetadata(RolePod, NamespaceDiscovery{}, attachMetadata) diff --git a/discovery/kubernetes/service_test.go b/discovery/kubernetes/service_test.go index dde3aaea57..8386ef296a 100644 --- a/discovery/kubernetes/service_test.go +++ b/discovery/kubernetes/service_test.go @@ -118,6 +118,7 @@ func makeLoadBalancerService() *v1.Service { } func TestServiceDiscoveryAdd(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{}) k8sDiscoveryTest{ @@ -189,6 +190,7 @@ func TestServiceDiscoveryAdd(t *testing.T) { } func TestServiceDiscoveryDelete(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{}, makeService()) k8sDiscoveryTest{ @@ -207,6 +209,7 @@ func TestServiceDiscoveryDelete(t *testing.T) { } func TestServiceDiscoveryUpdate(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{}, makeService()) k8sDiscoveryTest{ @@ -251,6 +254,7 @@ func TestServiceDiscoveryUpdate(t *testing.T) { } func TestServiceDiscoveryNamespaces(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) k8sDiscoveryTest{ @@ -303,6 +307,7 @@ func TestServiceDiscoveryNamespaces(t *testing.T) { } func TestServiceDiscoveryOwnNamespace(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{IncludeOwnNamespace: true}) k8sDiscoveryTest{ @@ -338,6 +343,7 @@ func TestServiceDiscoveryOwnNamespace(t *testing.T) { } func TestServiceDiscoveryAllNamespaces(t *testing.T) { + t.Parallel() n, c := makeDiscovery(RoleService, NamespaceDiscovery{}) k8sDiscoveryTest{ From 0ef0b75a4f9bace031dc84de9c0181ce9ba0f543 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 15 Nov 2024 11:18:46 +0000 Subject: [PATCH 0933/1397] [TESTS] Remote-Write: Fix BenchmarkStartup It was crashing due to uninitialized metrics, and not terminating due to incorrectly reading segment names. We need to export `SetMetrics` to avoid the first problem. Signed-off-by: Bryan Boreham --- storage/remote/queue_manager_test.go | 10 +++++++--- tsdb/wlog/watcher.go | 4 ++-- tsdb/wlog/watcher_test.go | 8 ++++---- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 21fdf92e36..8369da3f12 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -20,6 +20,7 @@ import ( "math" "math/rand" "os" + "path" "runtime/pprof" "sort" "strconv" @@ -48,6 +49,7 @@ import ( "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/util/runutil" "github.com/prometheus/prometheus/util/testutil" ) @@ -1407,12 +1409,12 @@ func BenchmarkStartup(b *testing.B) { // Find the second largest segment; we will replay up to this. // (Second largest as WALWatcher will start tailing the largest). - dirents, err := os.ReadDir(dir) + dirents, err := os.ReadDir(path.Join(dir, "wal")) require.NoError(b, err) var segments []int for _, dirent := range dirents { - if i, err := strconv.Atoi(dirent.Name()); err != nil { + if i, err := strconv.Atoi(dirent.Name()); err == nil { segments = append(segments, i) } } @@ -1424,13 +1426,15 @@ func BenchmarkStartup(b *testing.B) { mcfg := config.DefaultMetadataConfig for n := 0; n < b.N; n++ { metrics := newQueueManagerMetrics(nil, "", "") + watcherMetrics := wlog.NewWatcherMetrics(nil) c := NewTestBlockedWriteClient() // todo: test with new proto type(s) - m := NewQueueManager(metrics, nil, nil, logger, dir, + m := NewQueueManager(metrics, watcherMetrics, nil, logger, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false, false, config.RemoteWriteProtoMsgV1) m.watcher.SetStartTime(timestamp.Time(math.MaxInt64)) m.watcher.MaxSegment = segments[len(segments)-2] + m.watcher.SetMetrics() err := m.watcher.Run() require.NoError(b, err) } diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index d68ef2accb..89db5d2dd7 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -206,7 +206,7 @@ func (w *Watcher) Notify() { } } -func (w *Watcher) setMetrics() { +func (w *Watcher) SetMetrics() { // Setup the WAL Watchers metrics. We do this here rather than in the // constructor because of the ordering of creating Queue Managers's, // stopping them, and then starting new ones in storage/remote/storage.go ApplyConfig. @@ -221,7 +221,7 @@ func (w *Watcher) setMetrics() { // Start the Watcher. func (w *Watcher) Start() { - w.setMetrics() + w.SetMetrics() w.logger.Info("Starting WAL watcher", "queue", w.name) go w.loop() diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index 68c2c5afda..398b0f4414 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -234,7 +234,7 @@ func TestTailSamples(t *testing.T) { watcher.SetStartTime(now) // Set the Watcher's metrics so they're not nil pointers. - watcher.setMetrics() + watcher.SetMetrics() for i := first; i <= last; i++ { segment, err := OpenReadSegment(SegmentName(watcher.walDir, i)) require.NoError(t, err) @@ -548,7 +548,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { watcher.MaxSegment = -1 // Set the Watcher's metrics so they're not nil pointers. - watcher.setMetrics() + watcher.SetMetrics() lastCheckpoint, _, err := LastCheckpoint(watcher.walDir) require.NoError(t, err) @@ -699,7 +699,7 @@ func TestRun_StartupTime(t *testing.T) { watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false) watcher.MaxSegment = segments - watcher.setMetrics() + watcher.SetMetrics() startTime := time.Now() err = watcher.Run() @@ -768,7 +768,7 @@ func TestRun_AvoidNotifyWhenBehind(t *testing.T) { // Set up the watcher and run it in the background. wt := newWriteToMock(time.Millisecond) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false) - watcher.setMetrics() + watcher.SetMetrics() watcher.MaxSegment = segmentsToRead var g errgroup.Group From e13c28bd4afed29cdafbb9e9fea66d5d009275f0 Mon Sep 17 00:00:00 2001 From: zenador Date: Fri, 15 Nov 2024 22:09:50 +0800 Subject: [PATCH 0934/1397] Use a function to determine if an aggregation function is experimental (#15331) Signed-off-by: Jeanette Tan --- promql/parser/lex.go | 6 ++++++ promql/parser/parse.go | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 82bf0367b8..7210d51b7b 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -68,6 +68,12 @@ func (i ItemType) IsAggregatorWithParam() bool { return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE || i == LIMITK || i == LIMIT_RATIO } +// IsExperimentalAggregator defines the experimental aggregation functions that are controlled +// with EnableExperimentalFunctions. +func (i ItemType) IsExperimentalAggregator() bool { + return i == LIMITK || i == LIMIT_RATIO +} + // IsKeyword returns true if the Item corresponds to a keyword. // Returns false otherwise. func (i ItemType) IsKeyword() bool { return i > keywordsStart && i < keywordsEnd } diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 05549eaac8..3113d18d57 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -447,8 +447,8 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateE desiredArgs := 1 if ret.Op.IsAggregatorWithParam() { - if !EnableExperimentalFunctions && (ret.Op == LIMITK || ret.Op == LIMIT_RATIO) { - p.addParseErrf(ret.PositionRange(), "limitk() and limit_ratio() are experimental and must be enabled with --enable-feature=promql-experimental-functions") + if !EnableExperimentalFunctions && ret.Op.IsExperimentalAggregator() { + p.addParseErrf(ret.PositionRange(), "%s() is experimental and must be enabled with --enable-feature=promql-experimental-functions", ret.Op) return } desiredArgs = 2 From 101b1c307f2b80b142c38f7cebcc3db50084ed8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90urica=20Yuri=20Nikoli=C4=87?= Date: Fri, 15 Nov 2024 16:41:49 +0100 Subject: [PATCH 0935/1397] [ENHANCEMEN] Remote-Write: optionally use a DNS resolver that picks a random IP (#15329) When a remote-write is executed towards a host name that is resolved to multiple IP addresses, this PR introduces a possibility to force creation of new connections used for the remote-write request to a randomly chosen IP address from the ones corresponding to the host name. The default behavior remains unchanged, i.s., the IP address used for the connection creation remains the one chosen by Go. This is an experimental feature, it is disabled by default. Signed-off-by: Yuri Nikolic --- config/config.go | 1 + docs/configuration/configuration.md | 6 ++ storage/remote/client.go | 7 +- storage/remote/dial_context.go | 62 +++++++++++ storage/remote/dial_context_test.go | 161 ++++++++++++++++++++++++++++ storage/remote/write.go | 1 + 6 files changed, 237 insertions(+), 1 deletion(-) create mode 100644 storage/remote/dial_context.go create mode 100644 storage/remote/dial_context_test.go diff --git a/config/config.go b/config/config.go index b23fff5756..a94342d841 100644 --- a/config/config.go +++ b/config/config.go @@ -1195,6 +1195,7 @@ type RemoteWriteConfig struct { Name string `yaml:"name,omitempty"` SendExemplars bool `yaml:"send_exemplars,omitempty"` SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"` + RoundRobinDNS bool `yaml:"round_robin_dns,omitempty"` // ProtobufMessage specifies the protobuf message to use against the remote // receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/ ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"` diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 2093ed8836..2a64d8c387 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -2797,6 +2797,12 @@ write_relabel_configs: # For the `io.prometheus.write.v2.Request` message, this option is noop (always true). [ send_native_histograms: | default = false ] +# When enabled, remote-write will resolve the URL host name via DNS, choose one of the IP addresses at random, and connect to it. +# When disabled, remote-write relies on Go's standard behavior, which is to try to connect to each address in turn. +# The connection timeout applies to the whole operation, i.e. in the latter case it is spread over all attempt. +# This is an experimental feature, and its behavior might still change, or even get removed. +[ round_robin_dns: | default = false ] + # Optionally configures AWS's Signature Verification 4 signing process to # sign requests. Cannot be set at the same time as basic_auth, authorization, oauth2, or azuread. # To use the default credentials from the AWS SDK, use `sigv4: {}`. diff --git a/storage/remote/client.go b/storage/remote/client.go index 98eb8d3d77..ad766be9bf 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -145,6 +145,7 @@ type ClientConfig struct { RetryOnRateLimit bool WriteProtoMsg config.RemoteWriteProtoMsg ChunkedReadLimit uint64 + RoundRobinDNS bool } // ReadClient will request the STREAMED_XOR_CHUNKS method of remote read but can @@ -180,7 +181,11 @@ func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) { // NewWriteClient creates a new client for remote write. func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) { - httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_write_client") + var httpOpts []config_util.HTTPClientOption + if conf.RoundRobinDNS { + httpOpts = []config_util.HTTPClientOption{config_util.WithDialContextFunc(newDialContextWithRoundRobinDNS().dialContextFn())} + } + httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_write_client", httpOpts...) if err != nil { return nil, err } diff --git a/storage/remote/dial_context.go b/storage/remote/dial_context.go new file mode 100644 index 0000000000..b842728e4c --- /dev/null +++ b/storage/remote/dial_context.go @@ -0,0 +1,62 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "math/rand" + "net" + "net/http" + "time" + + "github.com/prometheus/common/config" +) + +type hostResolver interface { + LookupHost(context.Context, string) ([]string, error) +} + +type dialContextWithRoundRobinDNS struct { + dialContext config.DialContextFunc + resolver hostResolver + rand *rand.Rand +} + +// newDialContextWithRoundRobinDNS creates a new dialContextWithRoundRobinDNS. +// We discourage creating new instances of struct dialContextWithRoundRobinDNS by explicitly setting its members, +// except for testing purposes, and recommend using newDialContextWithRoundRobinDNS. +func newDialContextWithRoundRobinDNS() *dialContextWithRoundRobinDNS { + return &dialContextWithRoundRobinDNS{ + dialContext: http.DefaultTransport.(*http.Transport).DialContext, + resolver: net.DefaultResolver, + rand: rand.New(rand.NewSource(time.Now().Unix())), + } +} + +func (dc *dialContextWithRoundRobinDNS) dialContextFn() config.DialContextFunc { + return func(ctx context.Context, network, addr string) (net.Conn, error) { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return dc.dialContext(ctx, network, addr) + } + + addrs, err := dc.resolver.LookupHost(ctx, host) + if err != nil || len(addrs) == 0 { + return dc.dialContext(ctx, network, addr) + } + + randomAddr := net.JoinHostPort(addrs[dc.rand.Intn(len(addrs))], port) + return dc.dialContext(ctx, network, randomAddr) + } +} diff --git a/storage/remote/dial_context_test.go b/storage/remote/dial_context_test.go new file mode 100644 index 0000000000..d2716df53f --- /dev/null +++ b/storage/remote/dial_context_test.go @@ -0,0 +1,161 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "errors" + "math/rand" + "net" + "sync" + "testing" + "time" + + "github.com/prometheus/common/config" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +const ( + testNetwork = "tcp" + testAddrWithoutPort = "this-is-my-addr.without-port" + testAddrWithPort = "this-is-my-addr.without-port:123" + testPort = "123" + ip1 = "1.2.3.4" + ip2 = "5.6.7.8" + ip3 = "9.0.1.2" + randSeed int64 = 123456789 +) + +var ( + errMockLookupHost = errors.New("this is a mocked error") + testLookupResult = []string{ip1, ip2, ip3} + testLookupResultWithPort = []string{net.JoinHostPort(ip1, testPort), net.JoinHostPort(ip2, testPort), net.JoinHostPort(ip3, testPort)} +) + +type mockDialContext struct { + mock.Mock + + addrFrequencyMu sync.Mutex + addrFrequency map[string]int +} + +func newMockDialContext(acceptableAddresses []string) *mockDialContext { + m := &mockDialContext{ + addrFrequencyMu: sync.Mutex{}, + addrFrequency: make(map[string]int), + } + for _, acceptableAddr := range acceptableAddresses { + m.On("dialContext", mock.Anything, mock.Anything, acceptableAddr).Return(nil, nil) + } + return m +} + +func (dc *mockDialContext) dialContext(ctx context.Context, network, addr string) (net.Conn, error) { + dc.addrFrequencyMu.Lock() + defer dc.addrFrequencyMu.Unlock() + args := dc.MethodCalled("dialContext", ctx, network, addr) + dc.addrFrequency[addr]++ + return nil, args.Error(1) +} + +func (dc *mockDialContext) getCount(addr string) int { + dc.addrFrequencyMu.Lock() + defer dc.addrFrequencyMu.Unlock() + return dc.addrFrequency[addr] +} + +type mockedLookupHost struct { + withErr bool + result []string +} + +func (lh *mockedLookupHost) LookupHost(context.Context, string) ([]string, error) { + if lh.withErr { + return nil, errMockLookupHost + } + return lh.result, nil +} + +func createDialContextWithRoundRobinDNS(dialContext config.DialContextFunc, resolver hostResolver, r *rand.Rand) dialContextWithRoundRobinDNS { + return dialContextWithRoundRobinDNS{ + dialContext: dialContext, + resolver: resolver, + rand: r, + } +} + +func TestDialContextWithRandomConnections(t *testing.T) { + numberOfRuns := 2 * len(testLookupResult) + var mdc *mockDialContext + testCases := map[string]struct { + addr string + setup func() dialContextWithRoundRobinDNS + check func() + }{ + "if address contains no port call default DealContext": { + addr: testAddrWithoutPort, + setup: func() dialContextWithRoundRobinDNS { + mdc = newMockDialContext([]string{testAddrWithoutPort}) + return createDialContextWithRoundRobinDNS(mdc.dialContext, &mockedLookupHost{withErr: false}, rand.New(rand.NewSource(time.Now().Unix()))) + }, + check: func() { + require.Equal(t, numberOfRuns, mdc.getCount(testAddrWithoutPort)) + }, + }, + "if lookup host returns error call default DealContext": { + addr: testAddrWithPort, + setup: func() dialContextWithRoundRobinDNS { + mdc = newMockDialContext([]string{testAddrWithPort}) + return createDialContextWithRoundRobinDNS(mdc.dialContext, &mockedLookupHost{withErr: true}, rand.New(rand.NewSource(time.Now().Unix()))) + }, + check: func() { + require.Equal(t, numberOfRuns, mdc.getCount(testAddrWithPort)) + }, + }, + "if lookup returns no addresses call default DealContext": { + addr: testAddrWithPort, + setup: func() dialContextWithRoundRobinDNS { + mdc = newMockDialContext([]string{testAddrWithPort}) + return createDialContextWithRoundRobinDNS(mdc.dialContext, &mockedLookupHost{}, rand.New(rand.NewSource(time.Now().Unix()))) + }, + check: func() { + require.Equal(t, numberOfRuns, mdc.getCount(testAddrWithPort)) + }, + }, + "if lookup host is successful, shuffle results": { + addr: testAddrWithPort, + setup: func() dialContextWithRoundRobinDNS { + mdc = newMockDialContext(testLookupResultWithPort) + return createDialContextWithRoundRobinDNS(mdc.dialContext, &mockedLookupHost{result: testLookupResult}, rand.New(rand.NewSource(randSeed))) + }, + check: func() { + // we ensure that not all runs will choose the first element of the lookup + require.NotEqual(t, numberOfRuns, mdc.getCount(testLookupResultWithPort[0])) + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + dc := tc.setup() + require.NotNil(t, dc) + for i := 0; i < numberOfRuns; i++ { + _, err := dc.dialContextFn()(context.Background(), testNetwork, tc.addr) + require.NoError(t, err) + } + tc.check() + }) + } +} diff --git a/storage/remote/write.go b/storage/remote/write.go index 639f344520..0363095444 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -180,6 +180,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { GoogleIAMConfig: rwConf.GoogleIAMConfig, Headers: rwConf.Headers, RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit, + RoundRobinDNS: rwConf.RoundRobinDNS, }) if err != nil { return err From 16a4354b48d4ae3e0160a9d48fa883bab971402c Mon Sep 17 00:00:00 2001 From: TJ Hoplock Date: Fri, 15 Nov 2024 15:48:34 -0500 Subject: [PATCH 0936/1397] chore(deduper): add compile check that slog.Handler int is satisfied Signed-off-by: TJ Hoplock --- util/logging/dedupe.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/util/logging/dedupe.go b/util/logging/dedupe.go index d5aee5c095..e7dff20f78 100644 --- a/util/logging/dedupe.go +++ b/util/logging/dedupe.go @@ -26,6 +26,8 @@ const ( maxEntries = 1024 ) +var _ slog.Handler = (*Deduper)(nil) + // Deduper implements *slog.Handler, dedupes log lines based on a time duration. type Deduper struct { next *slog.Logger From 4f48e76086064833341a87a55c3b6cb861aaa4af Mon Sep 17 00:00:00 2001 From: huochexizhan Date: Tue, 19 Nov 2024 12:02:10 +0800 Subject: [PATCH 0937/1397] chore: fix some function names in comment Signed-off-by: huochexizhan --- model/textparse/openmetricsparse.go | 2 +- promql/engine.go | 2 +- web/web.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index 3ae9c7ddfc..16e805f3a9 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -388,7 +388,7 @@ func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, mfName p.skipCTSeries = skipCTSeries // Do we need to set it? } -// resetCtParseValues resets the parser to the state before CreatedTimestamp method was called. +// resetCTParseValues resets the parser to the state before CreatedTimestamp method was called. func (p *OpenMetricsParser) resetCTParseValues() { p.ctHashSet = 0 p.skipCTSeries = true diff --git a/promql/engine.go b/promql/engine.go index 5e0539d8f7..fdc0c39665 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3443,7 +3443,7 @@ func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotat return nil } -// groupingKey builds and returns the grouping key for the given metric and +// generateGroupingKey builds and returns the grouping key for the given metric and // grouping labels. func generateGroupingKey(metric labels.Labels, grouping []string, without bool, buf []byte) (uint64, []byte) { if without { diff --git a/web/web.go b/web/web.go index 21c41c55eb..08c683bae8 100644 --- a/web/web.go +++ b/web/web.go @@ -110,7 +110,7 @@ const ( Stopping ) -// withStackTrace logs the stack trace in case the request panics. The function +// withStackTracer logs the stack trace in case the request panics. The function // will re-raise the error which will then be handled by the net/http package. // It is needed because the go-kit log package doesn't manage properly the // panics from net/http (see https://github.com/go-kit/kit/issues/233). From 45db23617a13eb80dd56f349d66e437b07bc52d0 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Tue, 19 Nov 2024 18:17:49 +1100 Subject: [PATCH 0938/1397] promql: fix incorrect "native histogram ignored in aggregation" annotations (#15414) Signed-off-by: Charles Korn --- promql/engine.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 5e0539d8f7..e66ea67853 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3059,7 +3059,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix case parser.MAX: if h != nil { - annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("min", e.Expr.PositionRange())) + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("max", e.Expr.PositionRange())) continue } if group.floatValue < f || math.IsNaN(group.floatValue) { @@ -3068,7 +3068,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix case parser.MIN: if h != nil { - annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("max", e.Expr.PositionRange())) + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("min", e.Expr.PositionRange())) continue } if group.floatValue > f || math.IsNaN(group.floatValue) { @@ -3084,6 +3084,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix delta := f - group.floatMean group.floatMean += delta / group.groupCount group.floatValue += delta * (f - group.floatMean) + } else { if op == parser.STDVAR { annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stdvar", e.Expr.PositionRange())) } else { From 62e6e55c076240a436e464b474f7f203c079094b Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Tue, 19 Nov 2024 19:13:34 +1100 Subject: [PATCH 0939/1397] promql: fix issues with comparison binary operations with `bool` modifier and native histograms (#15413) * Fix issue where comparison operations with `bool` modifier and native histograms return histograms rather than 0 or 1 * Don't emit anything for comparisons between floats and histograms when `bool` modifier is used * Don't emit anything for comparisons between floats and histograms when `bool` modifier is used between a vector and a scalar --------- Signed-off-by: Charles Korn --- promql/engine.go | 3 + promql/promqltest/testdata/operators.test | 299 ++++++++++++++++++++++ 2 files changed, 302 insertions(+) diff --git a/promql/engine.go b/promql/engine.go index e66ea67853..8554149ba3 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2607,9 +2607,11 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr, pos) if err != nil { lastErr = err + continue } switch { case returnBool: + histogramValue = nil if keep { floatValue = 1.0 } else { @@ -2728,6 +2730,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh, pos) if err != nil { lastErr = err + continue } // Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // We want to always keep the vector element value as the output value, even if it's on the RHS. diff --git a/promql/promqltest/testdata/operators.test b/promql/promqltest/testdata/operators.test index 9070953abd..4b00831dfc 100644 --- a/promql/promqltest/testdata/operators.test +++ b/promql/promqltest/testdata/operators.test @@ -508,3 +508,302 @@ eval instant at 1m 10 atan2 20 eval instant at 1m 10 atan2 NaN NaN + +clear + +# Test comparison operations with floats and histograms. +load 6m + left_floats 1 2 _ _ 3 stale 4 5 NaN Inf -Inf + right_floats 4 _ _ 5 3 7 -1 20 NaN Inf -Inf + left_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} _ _ {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} + right_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:4 count:4 buckets:[1 2 1]}} _ _ + right_floats_for_histograms 0 -1 2 3 4 + +eval range from 0 to 60m step 6m left_floats == right_floats + left_floats _ _ _ _ 3 _ _ _ _ Inf -Inf + +eval range from 0 to 60m step 6m left_floats == bool right_floats + {} 0 _ _ _ 1 _ 0 0 0 1 1 + +eval range from 0 to 60m step 6m left_floats == does_not_match + # No results. + +eval range from 0 to 24m step 6m left_histograms == right_histograms + left_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} _ _ _ _ + +eval range from 0 to 24m step 6m left_histograms == bool right_histograms + {} 1 0 _ _ _ + +eval_info range from 0 to 24m step 6m left_histograms == right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms == bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats != right_floats + left_floats 1 _ _ _ _ _ 4 5 NaN _ _ + +eval range from 0 to 60m step 6m left_floats != bool right_floats + {} 1 _ _ _ 0 _ 1 1 1 0 0 + +eval range from 0 to 24m step 6m left_histograms != right_histograms + left_histograms _ {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} _ _ _ + +eval range from 0 to 24m step 6m left_histograms != bool right_histograms + {} 0 1 _ _ _ + +eval_info range from 0 to 24m step 6m left_histograms != right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms != bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats > right_floats + left_floats _ _ _ _ _ _ 4 _ _ _ _ + +eval range from 0 to 60m step 6m left_floats > bool right_floats + {} 0 _ _ _ 0 _ 1 0 0 0 0 + +eval_info range from 0 to 24m step 6m left_histograms > right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > bool right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats >= right_floats + left_floats _ _ _ _ 3 _ 4 _ _ Inf -Inf + +eval range from 0 to 60m step 6m left_floats >= bool right_floats + {} 0 _ _ _ 1 _ 1 0 0 1 1 + +eval_info range from 0 to 24m step 6m left_histograms >= right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= bool right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats < right_floats + left_floats 1 _ _ _ _ _ _ 5 _ _ _ + +eval range from 0 to 60m step 6m left_floats < bool right_floats + {} 1 _ _ _ 0 _ 0 1 0 0 0 + +eval_info range from 0 to 24m step 6m left_histograms < right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < bool right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats <= right_floats + left_floats 1 _ _ _ 3 _ _ 5 _ Inf -Inf + +eval range from 0 to 60m step 6m left_floats <= bool right_floats + {} 1 _ _ _ 1 _ 0 1 0 1 1 + +eval_info range from 0 to 24m step 6m left_histograms <= right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= bool right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= bool right_floats_for_histograms + # No results. + +# Vector / scalar combinations with scalar on right side +eval range from 0 to 60m step 6m left_floats == 3 + left_floats _ _ _ _ 3 _ _ _ _ _ _ + +eval range from 0 to 60m step 6m left_floats != 3 + left_floats 1 2 _ _ _ _ 4 5 NaN Inf -Inf + +eval range from 0 to 60m step 6m left_floats > 3 + left_floats _ _ _ _ _ _ 4 5 _ Inf _ + +eval range from 0 to 60m step 6m left_floats >= 3 + left_floats _ _ _ _ 3 _ 4 5 _ Inf _ + +eval range from 0 to 60m step 6m left_floats < 3 + left_floats 1 2 _ _ _ _ _ _ _ _ -Inf + +eval range from 0 to 60m step 6m left_floats <= 3 + left_floats 1 2 _ _ 3 _ _ _ _ _ -Inf + +eval range from 0 to 60m step 6m left_floats == bool 3 + {} 0 0 _ _ 1 _ 0 0 0 0 0 + +eval range from 0 to 60m step 6m left_floats == Inf + left_floats _ _ _ _ _ _ _ _ _ Inf _ + +eval range from 0 to 60m step 6m left_floats == bool Inf + {} 0 0 _ _ 0 _ 0 0 0 1 0 + +eval range from 0 to 60m step 6m left_floats == NaN + # No results. + +eval range from 0 to 60m step 6m left_floats == bool NaN + {} 0 0 _ _ 0 _ 0 0 0 0 0 + +eval_info range from 0 to 24m step 6m left_histograms == 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms == 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms != 3 + # No results. + +eval range from 0 to 24m step 6m left_histograms != 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > 0 + # No results. + +eval range from 0 to 24m step 6m left_histograms >= 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= 3 + # No results. + +eval range from 0 to 24m step 6m left_histograms <= 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms == bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms == bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms != bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms != bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= bool 0 + # No results. + +# Vector / scalar combinations with scalar on left side +eval range from 0 to 60m step 6m 3 == left_floats + left_floats _ _ _ _ 3 _ _ _ _ _ _ + +eval range from 0 to 60m step 6m 3 != left_floats + left_floats 1 2 _ _ _ _ 4 5 NaN Inf -Inf + +eval range from 0 to 60m step 6m 3 < left_floats + left_floats _ _ _ _ _ _ 4 5 _ Inf _ + +eval range from 0 to 60m step 6m 3 <= left_floats + left_floats _ _ _ _ 3 _ 4 5 _ Inf _ + +eval range from 0 to 60m step 6m 3 > left_floats + left_floats 1 2 _ _ _ _ _ _ _ _ -Inf + +eval range from 0 to 60m step 6m 3 >= left_floats + left_floats 1 2 _ _ 3 _ _ _ _ _ -Inf + +eval range from 0 to 60m step 6m 3 == bool left_floats + {} 0 0 _ _ 1 _ 0 0 0 0 0 + +eval range from 0 to 60m step 6m Inf == left_floats + left_floats _ _ _ _ _ _ _ _ _ Inf _ + +eval range from 0 to 60m step 6m Inf == bool left_floats + {} 0 0 _ _ 0 _ 0 0 0 1 0 + +eval range from 0 to 60m step 6m NaN == left_floats + # No results. + +eval range from 0 to 60m step 6m NaN == bool left_floats + {} 0 0 _ _ 0 _ 0 0 0 0 0 + +eval range from 0 to 24m step 6m 3 == left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 == left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 != left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 != left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 < left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 < left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 < left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 < left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 > left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 > left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 >= left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 >= left_histograms + # No results. + +clear From 06d54fcc6c104047d871466639e1ec889fb79564 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 19 Nov 2024 16:49:01 +0100 Subject: [PATCH 0940/1397] [PERF] TSDB: Optimize inverse matching (#14144) Simple follow-up to #13620. Modify `tsdb.PostingsForMatchers` to use the optimized tsdb.IndexReader.PostingsForLabelMatching method also for inverse matching. Introduce method `PostingsForAllLabelValues`, to avoid changing the existing method. The performance is much improved for a subset of the cases; there are up to ~60% CPU gains and ~12.5% reduction in memory usage. Remove `TestReader_InversePostingsForMatcherHonorsContextCancel` since `inversePostingsForMatcher` only passes `ctx` to `IndexReader` implementations now. Signed-off-by: Arve Knudsen --- tsdb/block.go | 8 +++++++ tsdb/head_read.go | 4 ++++ tsdb/index/index.go | 23 +++++++++++++++---- tsdb/index/index_test.go | 46 +++++++++++++++++++++++++++++++++++++ tsdb/index/postings.go | 16 +++++++++++++ tsdb/index/postings_test.go | 15 ++++++++++++ tsdb/ooo_head_read.go | 4 ++++ tsdb/querier.go | 27 ++++++---------------- tsdb/querier_test.go | 29 ++++++++++++++--------- 9 files changed, 137 insertions(+), 35 deletions(-) diff --git a/tsdb/block.go b/tsdb/block.go index aec99788c9..6483f8d8bb 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -82,6 +82,10 @@ type IndexReader interface { // If no postings are found having at least one matching label, an empty iterator is returned. PostingsForLabelMatching(ctx context.Context, name string, match func(value string) bool) index.Postings + // PostingsForAllLabelValues returns a sorted iterator over all postings having a label with the given name. + // If no postings are found with the label in question, an empty iterator is returned. + PostingsForAllLabelValues(ctx context.Context, name string) index.Postings + // SortedPostings returns a postings list that is reordered to be sorted // by the label set of the underlying series. SortedPostings(index.Postings) index.Postings @@ -531,6 +535,10 @@ func (r blockIndexReader) PostingsForLabelMatching(ctx context.Context, name str return r.ir.PostingsForLabelMatching(ctx, name, match) } +func (r blockIndexReader) PostingsForAllLabelValues(ctx context.Context, name string) index.Postings { + return r.ir.PostingsForAllLabelValues(ctx, name) +} + func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings { return r.ir.SortedPostings(p) } diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 29adc3ee74..79ed0f0240 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -123,6 +123,10 @@ func (h *headIndexReader) PostingsForLabelMatching(ctx context.Context, name str return h.head.postings.PostingsForLabelMatching(ctx, name, match) } +func (h *headIndexReader) PostingsForAllLabelValues(ctx context.Context, name string) index.Postings { + return h.head.postings.PostingsForAllLabelValues(ctx, name) +} + func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { series := make([]*memSeries, 0, 128) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 4e199c5bd2..2708b07b14 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -1777,6 +1777,15 @@ func (r *Reader) Postings(ctx context.Context, name string, values ...string) (P } func (r *Reader) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) Postings { + return r.postingsForLabelMatching(ctx, name, match) +} + +func (r *Reader) PostingsForAllLabelValues(ctx context.Context, name string) Postings { + return r.postingsForLabelMatching(ctx, name, nil) +} + +// postingsForLabelMatching implements PostingsForLabelMatching if match is non-nil, and PostingsForAllLabelValues otherwise. +func (r *Reader) postingsForLabelMatching(ctx context.Context, name string, match func(string) bool) Postings { if r.version == FormatV1 { return r.postingsForLabelMatchingV1(ctx, name, match) } @@ -1786,11 +1795,17 @@ func (r *Reader) PostingsForLabelMatching(ctx context.Context, name string, matc return EmptyPostings() } + postingsEstimate := 0 + if match == nil { + // The caller wants all postings for name. + postingsEstimate = len(e) * symbolFactor + } + lastVal := e[len(e)-1].value - var its []Postings + its := make([]Postings, 0, postingsEstimate) if err := r.traversePostingOffsets(ctx, e[0].off, func(val string, postingsOff uint64) (bool, error) { - if match(val) { - // We want this postings iterator since the value is a match + if match == nil || match(val) { + // We want this postings iterator since the value is a match. postingsDec := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable) _, p, err := r.dec.DecodePostings(postingsDec) if err != nil { @@ -1819,7 +1834,7 @@ func (r *Reader) postingsForLabelMatchingV1(ctx context.Context, name string, ma return ErrPostings(ctx.Err()) } count++ - if !match(val) { + if match != nil && !match(val) { continue } diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index 0d330cfcb9..0a45f3722a 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -613,6 +613,52 @@ func TestChunksTimeOrdering(t *testing.T) { require.NoError(t, idx.Close()) } +func TestReader_PostingsForLabelMatching(t *testing.T) { + const seriesCount = 9 + var input indexWriterSeriesSlice + for i := 1; i <= seriesCount; i++ { + input = append(input, &indexWriterSeries{ + labels: labels.FromStrings("__name__", strconv.Itoa(i)), + chunks: []chunks.Meta{ + {Ref: 1, MinTime: 0, MaxTime: 10}, + }, + }) + } + ir, _, _ := createFileReader(context.Background(), t, input) + + p := ir.PostingsForLabelMatching(context.Background(), "__name__", func(v string) bool { + iv, err := strconv.Atoi(v) + if err != nil { + panic(err) + } + return iv%2 == 0 + }) + require.NoError(t, p.Err()) + refs, err := ExpandPostings(p) + require.NoError(t, err) + require.Equal(t, []storage.SeriesRef{4, 6, 8, 10}, refs) +} + +func TestReader_PostingsForAllLabelValues(t *testing.T) { + const seriesCount = 9 + var input indexWriterSeriesSlice + for i := 1; i <= seriesCount; i++ { + input = append(input, &indexWriterSeries{ + labels: labels.FromStrings("__name__", strconv.Itoa(i)), + chunks: []chunks.Meta{ + {Ref: 1, MinTime: 0, MaxTime: 10}, + }, + }) + } + ir, _, _ := createFileReader(context.Background(), t, input) + + p := ir.PostingsForAllLabelValues(context.Background(), "__name__") + require.NoError(t, p.Err()) + refs, err := ExpandPostings(p) + require.NoError(t, err) + require.Equal(t, []storage.SeriesRef{3, 4, 5, 6, 7, 8, 9, 10, 11}, refs) +} + func TestReader_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) { const seriesCount = 1000 var input indexWriterSeriesSlice diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index d7b497e613..43ab1b55a3 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -447,6 +447,22 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, return Merge(ctx, its...) } +func (p *MemPostings) PostingsForAllLabelValues(ctx context.Context, name string) Postings { + p.mtx.RLock() + + e := p.m[name] + its := make([]Postings, 0, len(e)) + for _, refs := range e { + if len(refs) > 0 { + its = append(its, NewListPostings(refs)) + } + } + + // Let the mutex go before merging. + p.mtx.RUnlock() + return Merge(ctx, its...) +} + // labelValues returns a slice of label values for the given label name. // It will take the read lock. func (p *MemPostings) labelValues(name string) []string { diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 96c9ed124b..6ff5b9c060 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -1460,6 +1460,21 @@ func TestMemPostings_PostingsForLabelMatching(t *testing.T) { require.Equal(t, []storage.SeriesRef{2, 4}, refs) } +func TestMemPostings_PostingsForAllLabelValues(t *testing.T) { + mp := NewMemPostings() + mp.Add(1, labels.FromStrings("foo", "1")) + mp.Add(2, labels.FromStrings("foo", "2")) + mp.Add(3, labels.FromStrings("foo", "3")) + mp.Add(4, labels.FromStrings("foo", "4")) + + p := mp.PostingsForAllLabelValues(context.Background(), "foo") + require.NoError(t, p.Err()) + refs, err := ExpandPostings(p) + require.NoError(t, err) + // All postings for the label should be returned. + require.Equal(t, []storage.SeriesRef{1, 2, 3, 4}, refs) +} + func TestMemPostings_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) { memP := NewMemPostings() seriesCount := 10 * checkContextEveryNIterations diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 26cd4d057e..2a1a44d18e 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -450,6 +450,10 @@ func (ir *OOOCompactionHeadIndexReader) PostingsForLabelMatching(context.Context return index.ErrPostings(errors.New("not supported")) } +func (ir *OOOCompactionHeadIndexReader) PostingsForAllLabelValues(context.Context, string) index.Postings { + return index.ErrPostings(errors.New("not supported")) +} + func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.Postings { // This will already be sorted from the Postings() call above. return p diff --git a/tsdb/querier.go b/tsdb/querier.go index f741c5e281..be2dea2de4 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -365,29 +365,16 @@ func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Ma return ix.Postings(ctx, m.Name, m.Value) } - vals, err := ix.LabelValues(ctx, m.Name) - if err != nil { - return nil, err - } - - res := vals[:0] - // If the match before inversion was !="" or !~"", we just want all the values. + // If the matcher being inverted is =~"" or ="", we just want all the values. if m.Value == "" && (m.Type == labels.MatchRegexp || m.Type == labels.MatchEqual) { - res = vals - } else { - count := 1 - for _, val := range vals { - if count%checkContextEveryNIterations == 0 && ctx.Err() != nil { - return nil, ctx.Err() - } - count++ - if !m.Matches(val) { - res = append(res, val) - } - } + it := ix.PostingsForAllLabelValues(ctx, m.Name) + return it, it.Err() } - return ix.Postings(ctx, m.Name, res...) + it := ix.PostingsForLabelMatching(ctx, m.Name, func(s string) bool { + return !m.Matches(s) + }) + return it, it.Err() } func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, matchers ...*labels.Matcher) ([]string, error) { diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index e97aea2fde..8e23bee581 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -2340,6 +2340,16 @@ func (m mockIndex) PostingsForLabelMatching(ctx context.Context, name string, ma return index.Merge(ctx, res...) } +func (m mockIndex) PostingsForAllLabelValues(ctx context.Context, name string) index.Postings { + var res []index.Postings + for l, srs := range m.postings { + if l.Name == name { + res = append(res, index.NewListPostings(srs)) + } + } + return index.Merge(ctx, res...) +} + func (m mockIndex) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings { out := make([]storage.SeriesRef, 0, 128) @@ -3327,6 +3337,10 @@ func (m mockMatcherIndex) PostingsForLabelMatching(context.Context, string, func return index.ErrPostings(errors.New("PostingsForLabelMatching called")) } +func (m mockMatcherIndex) PostingsForAllLabelValues(context.Context, string) index.Postings { + return index.ErrPostings(errors.New("PostingsForAllLabelValues called")) +} + func TestPostingsForMatcher(t *testing.T) { ctx := context.Background() @@ -3725,17 +3739,6 @@ func TestReader_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) { require.Equal(t, failAfter+1, ctx.Count()) // Plus one for the Err() call that puts the error in the result. } -func TestReader_InversePostingsForMatcherHonorsContextCancel(t *testing.T) { - ir := mockReaderOfLabels{} - - failAfter := uint64(mockReaderOfLabelsSeriesCount / 2 / checkContextEveryNIterations) - ctx := &testutil.MockContextErrAfter{FailAfter: failAfter} - _, err := inversePostingsForMatcher(ctx, ir, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) - - require.Error(t, err) - require.Equal(t, failAfter+1, ctx.Count()) // Plus one for the Err() call that puts the error in the result. -} - type mockReaderOfLabels struct{} const mockReaderOfLabelsSeriesCount = checkContextEveryNIterations * 10 @@ -3768,6 +3771,10 @@ func (m mockReaderOfLabels) PostingsForLabelMatching(context.Context, string, fu panic("PostingsForLabelMatching called") } +func (m mockReaderOfLabels) PostingsForAllLabelValues(context.Context, string) index.Postings { + panic("PostingsForAllLabelValues called") +} + func (m mockReaderOfLabels) Postings(context.Context, string, ...string) (index.Postings, error) { panic("Postings called") } From 048222867a42485dfda35849e44afff133c87934 Mon Sep 17 00:00:00 2001 From: Neeraj Gartia Date: Wed, 20 Nov 2024 02:07:31 +0530 Subject: [PATCH 0941/1397] fix count_values for histograms Signed-off-by: Neeraj Gartia --- promql/engine.go | 6 +++++- promql/promqltest/testdata/aggregators.test | 18 ++++++++++++------ 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 5e0539d8f7..8f1587c62c 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3331,7 +3331,11 @@ func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping [] var buf []byte for _, s := range vec { enh.resetBuilder(s.Metric) - enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64)) + if s.H == nil { + enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64)) + } else { + enh.lb.Set(valueLabel, s.H.String()) + } metric := enh.lb.Labels() // Considering the count_values() diff --git a/promql/promqltest/testdata/aggregators.test b/promql/promqltest/testdata/aggregators.test index 19a896a6fb..475fb8a569 100644 --- a/promql/promqltest/testdata/aggregators.test +++ b/promql/promqltest/testdata/aggregators.test @@ -351,37 +351,41 @@ load 5m version{job="app-server", instance="1", group="production"} 6 version{job="app-server", instance="0", group="canary"} 7 version{job="app-server", instance="1", group="canary"} 7 + version{job="app-server", instance="2", group="canary"} {{schema:0 sum:10 count:20 z_bucket_w:0.001 z_bucket:2 buckets:[1 2] n_buckets:[1 2]}} + version{job="app-server", instance="3", group="canary"} {{schema:0 sum:10 count:20 z_bucket_w:0.001 z_bucket:2 buckets:[1 2] n_buckets:[1 2]}} eval instant at 1m count_values("version", version) {version="6"} 5 {version="7"} 2 {version="8"} 2 - + {version="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}"} 2 eval instant at 1m count_values(((("version"))), version) - {version="6"} 5 - {version="7"} 2 - {version="8"} 2 - + {version="6"} 5 + {version="7"} 2 + {version="8"} 2 + {version="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}"} 2 eval instant at 1m count_values without (instance)("version", version) {job="api-server", group="production", version="6"} 3 {job="api-server", group="canary", version="8"} 2 {job="app-server", group="production", version="6"} 2 {job="app-server", group="canary", version="7"} 2 + {job="app-server", group="canary", version="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}"} 2 # Overwrite label with output. Don't do this. eval instant at 1m count_values without (instance)("job", version) {job="6", group="production"} 5 {job="8", group="canary"} 2 {job="7", group="canary"} 2 + {job="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}", group="canary"} 2 # Overwrite label with output. Don't do this. eval instant at 1m count_values by (job, group)("job", version) {job="6", group="production"} 5 {job="8", group="canary"} 2 {job="7", group="canary"} 2 - + {job="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}", group="canary"} 2 # Tests for quantile. clear @@ -441,12 +445,14 @@ load 10s data{test="uneven samples",point="a"} 0 data{test="uneven samples",point="b"} 1 data{test="uneven samples",point="c"} 4 + data{test="histogram sample",point="c"} {{schema:0 sum:0 count:0}} foo .8 eval instant at 1m group without(point)(data) {test="two samples"} 1 {test="three samples"} 1 {test="uneven samples"} 1 + {test="histogram sample"} 1 eval instant at 1m group(foo) {} 1 From 5cd9855999041f942961fc45b38d09fac73bf6c1 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Wed, 20 Nov 2024 10:29:18 +0100 Subject: [PATCH 0942/1397] tests(promql/testdata): add regression test for and-on (#15425) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * tests(promql/testdata): add regression test for and-on I'd like to use queries of the form "x and on() (vector(y)==1)" to be able to include and exclude series for dashboards. This helps migration to native histograms in dashboards by using a dashboard variable to set "y" to either -1 or 1 to exclude or include the result. Signed-off-by: György Krajcsovits --------- Signed-off-by: György Krajcsovits --- promql/promqltest/testdata/operators.test | 47 +++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/promql/promqltest/testdata/operators.test b/promql/promqltest/testdata/operators.test index 4b00831dfc..ad908a228a 100644 --- a/promql/promqltest/testdata/operators.test +++ b/promql/promqltest/testdata/operators.test @@ -807,3 +807,50 @@ eval range from 0 to 24m step 6m 0 >= left_histograms # No results. clear + +# Test completely discarding or completely including series in results with "and on" +load_with_nhcb 5m + testhistogram_bucket{le="0.1", id="1"} 0+5x10 + testhistogram_bucket{le="0.2", id="1"} 0+7x10 + testhistogram_bucket{le="+Inf", id="1"} 0+12x10 + testhistogram_bucket{le="0.1", id="2"} 0+4x10 + testhistogram_bucket{le="0.2", id="2"} 0+6x10 + testhistogram_bucket{le="+Inf", id="2"} 0+11x10 + +# Include all series when "and on" with non-empty vector. +eval instant at 10m (testhistogram_bucket) and on() (vector(1) == 1) + {__name__="testhistogram_bucket", le="0.1", id="1"} 10.0 + {__name__="testhistogram_bucket", le="0.2", id="1"} 14.0 + {__name__="testhistogram_bucket", le="+Inf", id="1"} 24.0 + {__name__="testhistogram_bucket", le="0.1", id="2"} 8.0 + {__name__="testhistogram_bucket", le="0.2", id="2"} 12.0 + {__name__="testhistogram_bucket", le="+Inf", id="2"} 22.0 + +eval range from 0 to 10m step 5m (testhistogram_bucket) and on() (vector(1) == 1) + {__name__="testhistogram_bucket", le="0.1", id="1"} 0.0 5.0 10.0 + {__name__="testhistogram_bucket", le="0.2", id="1"} 0.0 7.0 14.0 + {__name__="testhistogram_bucket", le="+Inf", id="1"} 0.0 12.0 24.0 + {__name__="testhistogram_bucket", le="0.1", id="2"} 0.0 4.0 8.0 + {__name__="testhistogram_bucket", le="0.2", id="2"} 0.0 6.0 12.0 + {__name__="testhistogram_bucket", le="+Inf", id="2"} 0.0 11.0 22.0 + +# Exclude all series when "and on" with empty vector. +eval instant at 10m (testhistogram_bucket) and on() (vector(-1) == 1) + +eval range from 0 to 10m step 5m (testhistogram_bucket) and on() (vector(-1) == 1) + +# Include all native histogram series when "and on" with non-empty vector. +eval instant at 10m (testhistogram) and on() (vector(1) == 1) + {__name__="testhistogram", id="1"} {{schema:-53 sum:0 count:24 buckets:[10 4 10] custom_values:[0.1 0.2]}} + {__name__="testhistogram", id="2"} {{schema:-53 sum:0 count:22 buckets:[8 4 10] custom_values:[0.1 0.2]}} + +eval range from 0 to 10m step 5m (testhistogram) and on() (vector(1) == 1) + {__name__="testhistogram", id="1"} {{schema:-53 sum:0 count:0 custom_values:[0.1 0.2]}} {{schema:-53 sum:0 count:12 buckets:[5 2 5] custom_values:[0.1 0.2]}} {{schema:-53 sum:0 count:24 buckets:[10 4 10] custom_values:[0.1 0.2]}} + {__name__="testhistogram", id="2"} {{schema:-53 sum:0 count:0 custom_values:[0.1 0.2]}} {{schema:-53 sum:0 count:11 buckets:[4 2 5] custom_values:[0.1 0.2]}} {{schema:-53 sum:0 count:22 buckets:[8 4 10] custom_values:[0.1 0.2]}} + +# Exclude all native histogram series when "and on" with empty vector. +eval instant at 10m (testhistogram) and on() (vector(-1) == 1) + +eval range from 0 to 10m step 5m (testhistogram) and on() (vector(-1) == 1) + +clear From 9c02c26418b641fa5327a1dcaf19361354a2c0ab Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Wed, 20 Nov 2024 11:13:03 +0100 Subject: [PATCH 0943/1397] OTLP receiver: Convert also metric metadata (#15416) Signed-off-by: Arve Knudsen --- CHANGELOG.md | 2 ++ .../prometheusremotewrite/metrics_to_prw.go | 18 +++++++++- .../metrics_to_prw_test.go | 36 ++++++++++++++++++- .../otlp_to_openmetrics_metadata.go | 34 ------------------ .../prometheusremotewrite/timeseries.go | 5 +++ storage/remote/write_handler.go | 1 + 6 files changed, 60 insertions(+), 36 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ead292fc7d..9f9050cc96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## unreleased +* [ENHANCEMENT] OTLP receiver: Convert also metric metadata. #15416 + ## 3.0.0 / 2024-11-14 This release includes new features such as a brand new UI and UTF-8 support enabled by default. As this marks the first new major version in seven years, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. For users that want to upgrade we recommend to read through our [migration guide](https://prometheus.io/docs/prometheus/3.0/migration/). diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 4f8baf3100..63862c4a70 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -37,7 +37,6 @@ type Settings struct { DisableTargetInfo bool ExportCreatedMetric bool AddMetricSuffixes bool - SendMetadata bool AllowUTF8 bool PromoteResourceAttributes []string } @@ -47,6 +46,7 @@ type PrometheusConverter struct { unique map[uint64]*prompb.TimeSeries conflicts map[uint64][]*prompb.TimeSeries everyN everyNTimes + metadata []prompb.MetricMetadata } func NewPrometheusConverter() *PrometheusConverter { @@ -60,6 +60,16 @@ func NewPrometheusConverter() *PrometheusConverter { func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings) (annots annotations.Annotations, errs error) { c.everyN = everyNTimes{n: 128} resourceMetricsSlice := md.ResourceMetrics() + + numMetrics := 0 + for i := 0; i < resourceMetricsSlice.Len(); i++ { + scopeMetricsSlice := resourceMetricsSlice.At(i).ScopeMetrics() + for j := 0; j < scopeMetricsSlice.Len(); j++ { + numMetrics += scopeMetricsSlice.At(j).Metrics().Len() + } + } + c.metadata = make([]prompb.MetricMetadata, 0, numMetrics) + for i := 0; i < resourceMetricsSlice.Len(); i++ { resourceMetrics := resourceMetricsSlice.At(i) resource := resourceMetrics.Resource() @@ -86,6 +96,12 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric } promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes, settings.AllowUTF8) + c.metadata = append(c.metadata, prompb.MetricMetadata{ + Type: otelMetricTypeToPromMetricType(metric), + MetricFamilyName: promName, + Help: metric.Description(), + Unit: metric.Unit(), + }) // handle individual metrics based on type //exhaustive:enforce diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go index 641437632d..64d0ebd6f5 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go @@ -22,20 +22,46 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + + "github.com/prometheus/prometheus/prompb" + prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" ) func TestFromMetrics(t *testing.T) { t.Run("successful", func(t *testing.T) { converter := NewPrometheusConverter() payload := createExportRequest(5, 128, 128, 2, 0) + var expMetadata []prompb.MetricMetadata + resourceMetricsSlice := payload.Metrics().ResourceMetrics() + for i := 0; i < resourceMetricsSlice.Len(); i++ { + scopeMetricsSlice := resourceMetricsSlice.At(i).ScopeMetrics() + for j := 0; j < scopeMetricsSlice.Len(); j++ { + metricSlice := scopeMetricsSlice.At(j).Metrics() + for k := 0; k < metricSlice.Len(); k++ { + metric := metricSlice.At(k) + promName := prometheustranslator.BuildCompliantName(metric, "", false, false) + expMetadata = append(expMetadata, prompb.MetricMetadata{ + Type: otelMetricTypeToPromMetricType(metric), + MetricFamilyName: promName, + Help: metric.Description(), + Unit: metric.Unit(), + }) + } + } + } annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), Settings{}) require.NoError(t, err) require.Empty(t, annots) + + if diff := cmp.Diff(expMetadata, converter.Metadata()); diff != "" { + t.Errorf("mismatch (-want +got):\n%s", diff) + } }) t.Run("context cancellation", func(t *testing.T) { @@ -115,13 +141,15 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) { for _, exemplarsPerSeries := range []int{0, 5, 10} { b.Run(fmt.Sprintf("exemplars per series: %v", exemplarsPerSeries), func(b *testing.B) { payload := createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCount, labelsPerMetric, exemplarsPerSeries) + b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { converter := NewPrometheusConverter() annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), Settings{}) require.NoError(b, err) require.Empty(b, annots) require.NotNil(b, converter.TimeSeries()) + require.NotNil(b, converter.Metadata()) } }) } @@ -148,6 +176,8 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou m := metrics.AppendEmpty() m.SetEmptyHistogram() m.SetName(fmt.Sprintf("histogram-%v", i)) + m.SetDescription("histogram") + m.SetUnit("unit") m.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) h := m.Histogram().DataPoints().AppendEmpty() h.SetTimestamp(ts) @@ -166,6 +196,8 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou m := metrics.AppendEmpty() m.SetEmptySum() m.SetName(fmt.Sprintf("sum-%v", i)) + m.SetDescription("sum") + m.SetUnit("unit") m.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) point := m.Sum().DataPoints().AppendEmpty() point.SetTimestamp(ts) @@ -178,6 +210,8 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou m := metrics.AppendEmpty() m.SetEmptyGauge() m.SetName(fmt.Sprintf("gauge-%v", i)) + m.SetDescription("gauge") + m.SetUnit("unit") point := m.Gauge().DataPoints().AppendEmpty() point.SetTimestamp(ts) point.SetDoubleValue(1.23) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go index b423d2cc6e..359fc52522 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go @@ -20,7 +20,6 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "github.com/prometheus/prometheus/prompb" - prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" ) func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) prompb.MetricMetadata_MetricType { @@ -42,36 +41,3 @@ func otelMetricTypeToPromMetricType(otelMetric pmetric.Metric) prompb.MetricMeta } return prompb.MetricMetadata_UNKNOWN } - -func OtelMetricsToMetadata(md pmetric.Metrics, addMetricSuffixes, allowUTF8 bool) []*prompb.MetricMetadata { - resourceMetricsSlice := md.ResourceMetrics() - - metadataLength := 0 - for i := 0; i < resourceMetricsSlice.Len(); i++ { - scopeMetricsSlice := resourceMetricsSlice.At(i).ScopeMetrics() - for j := 0; j < scopeMetricsSlice.Len(); j++ { - metadataLength += scopeMetricsSlice.At(j).Metrics().Len() - } - } - - var metadata = make([]*prompb.MetricMetadata, 0, metadataLength) - for i := 0; i < resourceMetricsSlice.Len(); i++ { - resourceMetrics := resourceMetricsSlice.At(i) - scopeMetricsSlice := resourceMetrics.ScopeMetrics() - - for j := 0; j < scopeMetricsSlice.Len(); j++ { - scopeMetrics := scopeMetricsSlice.At(j) - for k := 0; k < scopeMetrics.Metrics().Len(); k++ { - metric := scopeMetrics.Metrics().At(k) - entry := prompb.MetricMetadata{ - Type: otelMetricTypeToPromMetricType(metric), - MetricFamilyName: prometheustranslator.BuildCompliantName(metric, "", addMetricSuffixes, allowUTF8), - Help: metric.Description(), - } - metadata = append(metadata, &entry) - } - } - } - - return metadata -} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go b/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go index fe973761a2..abffbe6105 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go @@ -39,3 +39,8 @@ func (c *PrometheusConverter) TimeSeries() []prompb.TimeSeries { return allTS } + +// Metadata returns a slice of the prompb.Metadata that were converted from OTel format. +func (c *PrometheusConverter) Metadata() []prompb.MetricMetadata { + return c.metadata +} diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 87102a374b..f719ea6a1f 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -526,6 +526,7 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{ Timeseries: converter.TimeSeries(), + Metadata: converter.Metadata(), }) switch { From a6fb16fcb4051b0c50dcbac4c54ec5fc2f8d5efb Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Wed, 20 Nov 2024 16:11:36 +0530 Subject: [PATCH 0944/1397] PromQL: Convert more native histogram tests to promql-test framework (#15419) This converts `TestNativeHistogram_SubOperator` to the promql testing framework. It also removes `TestNativeHistogram_Sum_Count_Add_AvgOperator`, which got converted earlier. Signed-off-by: Neeraj Gartia --- promql/engine_test.go | 448 ------------------ .../testdata/native_histograms.test | 20 + 2 files changed, 20 insertions(+), 448 deletions(-) diff --git a/promql/engine_test.go b/promql/engine_test.go index 6dbfde6d82..a9a8924f85 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -19,7 +19,6 @@ import ( "fmt" "math" "sort" - "strconv" "strings" "sync" "testing" @@ -39,7 +38,6 @@ import ( "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/stats" - "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" ) @@ -3150,452 +3148,6 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) { } } -func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - cases := []struct { - histograms []histogram.Histogram - expected histogram.FloatHistogram - expectedAvg histogram.FloatHistogram - }{ - { - histograms: []histogram.Histogram{ - { - CounterResetHint: histogram.GaugeType, - Schema: 0, - Count: 25, - Sum: 1234.5, - ZeroThreshold: 0.001, - ZeroCount: 4, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{1, 1, -1, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 2, Length: 2}, - }, - NegativeBuckets: []int64{2, 2, -3, 8}, - }, - { - CounterResetHint: histogram.GaugeType, - Schema: 0, - Count: 41, - Sum: 2345.6, - ZeroThreshold: 0.001, - ZeroCount: 5, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 4}, - {Offset: 0, Length: 0}, - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 2, Length: 0}, - {Offset: 2, Length: 3}, - }, - NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, - }, - { - CounterResetHint: histogram.GaugeType, - Schema: 0, - Count: 41, - Sum: 1111.1, - ZeroThreshold: 0.001, - ZeroCount: 5, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 4}, - {Offset: 0, Length: 0}, - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 2, Length: 0}, - {Offset: 2, Length: 3}, - }, - NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, - }, - { - CounterResetHint: histogram.GaugeType, - Schema: 1, // Everything is 0 just to make the count 4 so avg has nicer numbers. - }, - }, - expected: histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 0, - ZeroThreshold: 0.001, - ZeroCount: 14, - Count: 107, - Sum: 4691.2, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 7}, - }, - PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 6}, - {Offset: 3, Length: 3}, - }, - NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4}, - }, - expectedAvg: histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 0, - ZeroThreshold: 0.001, - ZeroCount: 3.5, - Count: 26.75, - Sum: 1172.8, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 7}, - }, - PositiveBuckets: []float64{0.75, 2, 0.5, 1.25, 0.75, 0.5, 0.5}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 6}, - {Offset: 3, Length: 3}, - }, - NegativeBuckets: []float64{0.5, 1.5, 2, 1, 3.75, 2.25, 2.5, 2.5, 1}, - }, - }, - } - - idx0 := int64(0) - for _, c := range cases { - for _, floatHisto := range []bool{true, false} { - t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - seriesNameOverTime := "sparse_histogram_series_over_time" - - engine := newTestEngine(t) - - ts := idx0 * int64(10*time.Minute/time.Millisecond) - app := storage.Appender(context.Background()) - _, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42) - require.NoError(t, err) - for idx1, h := range c.histograms { - lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1)) - // Since we mutate h later, we need to create a copy here. - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) - } - require.NoError(t, err) - - lbls = labels.FromStrings("__name__", seriesNameOverTime) - newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond) - // Since we mutate h later, we need to create a copy here. - if floatHisto { - _, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil) - } - require.NoError(t, err) - } - require.NoError(t, app.Commit()) - - queryAndCheck := func(queryString string, ts int64, exp promql.Vector) { - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - require.Empty(t, res.Warnings) - - vector, err := res.Vector() - require.NoError(t, err) - - testutil.RequireEqual(t, exp, vector) - } - queryAndCheckAnnotations := func(queryString string, ts int64, expWarnings annotations.Annotations) { - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - require.Equal(t, expWarnings, res.Warnings) - } - - // sum(). - queryString := fmt.Sprintf("sum(%s)", seriesName) - queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) - - queryString = `sum({idx="0"})` - var annos annotations.Annotations - annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(posrange.PositionRange{Start: 4, End: 13})) - queryAndCheckAnnotations(queryString, ts, annos) - - // + operator. - queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName) - for idx := 1; idx < len(c.histograms); idx++ { - queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx) - } - queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) - - // count(). - queryString = fmt.Sprintf("count(%s)", seriesName) - queryAndCheck(queryString, ts, []promql.Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}}) - - // avg(). - queryString = fmt.Sprintf("avg(%s)", seriesName) - queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) - - offset := int64(len(c.histograms) - 1) - newTs := ts + offset*int64(time.Minute/time.Millisecond) - - // sum_over_time(). - queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1) - queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels(), DropName: true}}) - - // avg_over_time(). - queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1) - queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels(), DropName: true}}) - }) - idx0++ - } - } -} - -func TestNativeHistogram_SubOperator(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - cases := []struct { - histograms []histogram.Histogram - expected histogram.FloatHistogram - }{ - { - histograms: []histogram.Histogram{ - { - Schema: 0, - Count: 41, - Sum: 2345.6, - ZeroThreshold: 0.001, - ZeroCount: 5, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 4}, - {Offset: 0, Length: 0}, - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 2, Length: 0}, - {Offset: 2, Length: 3}, - }, - NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, - }, - { - Schema: 0, - Count: 11, - Sum: 1234.5, - ZeroThreshold: 0.001, - ZeroCount: 3, - PositiveSpans: []histogram.Span{ - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, -1}, - NegativeSpans: []histogram.Span{ - {Offset: 2, Length: 2}, - }, - NegativeBuckets: []int64{3, -1}, - }, - }, - expected: histogram.FloatHistogram{ - Schema: 0, - Count: 30, - Sum: 1111.1, - ZeroThreshold: 0.001, - ZeroCount: 2, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 4}, - }, - PositiveBuckets: []float64{1, 1, 2, 1, 1, 1}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 2}, - {Offset: 1, Length: 1}, - {Offset: 4, Length: 3}, - }, - NegativeBuckets: []float64{1, 1, 7, 5, 5, 2}, - }, - }, - { - histograms: []histogram.Histogram{ - { - Schema: 0, - Count: 41, - Sum: 2345.6, - ZeroThreshold: 0.001, - ZeroCount: 5, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 4}, - {Offset: 0, Length: 0}, - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 2, Length: 0}, - {Offset: 2, Length: 3}, - }, - NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, - }, - { - Schema: 1, - Count: 11, - Sum: 1234.5, - ZeroThreshold: 0.001, - ZeroCount: 3, - PositiveSpans: []histogram.Span{ - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, -1}, - NegativeSpans: []histogram.Span{ - {Offset: 2, Length: 2}, - }, - NegativeBuckets: []int64{3, -1}, - }, - }, - expected: histogram.FloatHistogram{ - Schema: 0, - Count: 30, - Sum: 1111.1, - ZeroThreshold: 0.001, - ZeroCount: 2, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 1}, - {Offset: 1, Length: 5}, - }, - PositiveBuckets: []float64{1, 1, 2, 1, 1, 1}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 4, Length: 3}, - }, - NegativeBuckets: []float64{-2, 2, 2, 7, 5, 5, 2}, - }, - }, - { - histograms: []histogram.Histogram{ - { - Schema: 1, - Count: 11, - Sum: 1234.5, - ZeroThreshold: 0.001, - ZeroCount: 3, - PositiveSpans: []histogram.Span{ - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, -1}, - NegativeSpans: []histogram.Span{ - {Offset: 2, Length: 2}, - }, - NegativeBuckets: []int64{3, -1}, - }, - { - Schema: 0, - Count: 41, - Sum: 2345.6, - ZeroThreshold: 0.001, - ZeroCount: 5, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 4}, - {Offset: 0, Length: 0}, - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 2, Length: 0}, - {Offset: 2, Length: 3}, - }, - NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, - }, - }, - expected: histogram.FloatHistogram{ - Schema: 0, - Count: -30, - Sum: -1111.1, - ZeroThreshold: 0.001, - ZeroCount: -2, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 1}, - {Offset: 1, Length: 5}, - }, - PositiveBuckets: []float64{-1, -1, -2, -1, -1, -1}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 4, Length: 3}, - }, - NegativeBuckets: []float64{2, -2, -2, -7, -5, -5, -2}, - }, - }, - } - - idx0 := int64(0) - for _, c := range cases { - for _, floatHisto := range []bool{true, false} { - t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - engine := newTestEngine(t) - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - - ts := idx0 * int64(10*time.Minute/time.Millisecond) - app := storage.Appender(context.Background()) - for idx1, h := range c.histograms { - lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1)) - // Since we mutate h later, we need to create a copy here. - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) - } - require.NoError(t, err) - } - require.NoError(t, app.Commit()) - - queryAndCheck := func(queryString string, exp promql.Vector) { - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - if len(vector) == len(exp) { - for i, e := range exp { - got := vector[i].H - if got != e.H { - // Error messages are better if we compare structs, not pointers. - require.Equal(t, *e.H, *got) - } - } - } - - testutil.RequireEqual(t, exp, vector) - } - - // - operator. - queryString := fmt.Sprintf(`%s{idx="0"}`, seriesName) - for idx := 1; idx < len(c.histograms); idx++ { - queryString += fmt.Sprintf(` - ignoring(idx) %s{idx="%d"}`, seriesName, idx) - } - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) - }) - } - idx0++ - } -} - func TestQueryLookbackDelta(t *testing.T) { var ( load = `load 5m diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 0463384e2e..6be298cf7d 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1186,3 +1186,23 @@ eval instant at 3m avg_over_time(histogram_sum_over_time[4m:1m]) {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}} clear + +# Test native histograms with sub operator. +load 10m + histogram_sub_1{idx="0"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 + histogram_sub_1{idx="1"} {{schema:0 count:11 sum:1234.5 z_bucket:3 z_bucket_w:0.001 buckets:[0 2 1] n_buckets:[0 0 3 2]}}x1 + histogram_sub_2{idx="0"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 + histogram_sub_2{idx="1"} {{schema:1 count:11 sum:1234.5 z_bucket:3 z_bucket_w:0.001 buckets:[0 2 1] n_buckets:[0 0 3 2]}}x1 + histogram_sub_3{idx="0"} {{schema:1 count:11 sum:1234.5 z_bucket:3 z_bucket_w:0.001 buckets:[0 2 1] n_buckets:[0 0 3 2]}}x1 + histogram_sub_3{idx="1"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 + +eval instant at 10m histogram_sub_1{idx="0"} - ignoring(idx) histogram_sub_1{idx="1"} + {} {{schema:0 count:30 sum:1111.1 z_bucket:2 z_bucket_w:0.001 buckets:[1 1 0 2 1 1 1] n_buckets:[0 1 1 0 7 0 0 0 0 5 5 2]}} + +eval instant at 10m histogram_sub_2{idx="0"} - ignoring(idx) histogram_sub_2{idx="1"} + {} {{schema:0 count:30 sum:1111.1 z_bucket:2 z_bucket_w:0.001 buckets:[1 0 1 2 1 1 1] n_buckets:[0 -2 2 2 7 0 0 0 0 5 5 2]}} + +eval instant at 10m histogram_sub_3{idx="0"} - ignoring(idx) histogram_sub_3{idx="1"} + {} {{schema:0 count:-30 sum:-1111.1 z_bucket:-2 z_bucket_w:0.001 buckets:[-1 0 -1 -2 -1 -1 -1] n_buckets:[0 2 -2 -2 -7 0 0 0 0 -5 -5 -2]}} + +clear From d7c8177f56439ff2b5025a330481fb35786431a5 Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Wed, 20 Nov 2024 15:46:11 +0000 Subject: [PATCH 0945/1397] docs: update README with v3 tagging update (#15361) docs: update README with v3 tagging update Signed-off-by: Fiona Liao --------- Signed-off-by: Fiona Liao --- README.md | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 7528147b0e..63e5b13ba1 100644 --- a/README.md +++ b/README.md @@ -158,8 +158,19 @@ This is experimental. ### Prometheus code base In order to comply with [go mod](https://go.dev/ref/mod#versions) rules, -Prometheus release number do not exactly match Go module releases. For the -Prometheus v2.y.z releases, we are publishing equivalent v0.y.z tags. +Prometheus release number do not exactly match Go module releases. + +For the +Prometheus v3.y.z releases, we are publishing equivalent v0.3y.z tags. The y in v0.3y.z is always padded to two digits, with a leading zero if needed. + +Therefore, a user that would want to use Prometheus v3.0.0 as a library could do: + +```shell +go get github.com/prometheus/prometheus@v0.300.0 +``` + +For the +Prometheus v2.y.z releases, we published the equivalent v0.y.z tags. Therefore, a user that would want to use Prometheus v2.35.0 as a library could do: @@ -177,7 +188,7 @@ For more information on building, running, and developing on the React-based UI, ## More information -* Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v2.x.y will be displayed as v0.x.y. +* Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v3.y.z will be displayed as v0.3y.z (the y in v0.3y.z is always padded to two digits, with a leading zero if needed), while v2.y.z will be displayed as v0.y.z. * See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels. ## Contributing From 89bbb885e5a56ddb921d77a28797aae00b4903eb Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Wed, 20 Nov 2024 17:22:20 +0100 Subject: [PATCH 0946/1397] Upgrade to golangci-lint v1.62.0 (#15424) Signed-off-by: Arve Knudsen --- .github/workflows/ci.yml | 2 +- Makefile.common | 2 +- .../opentsdb/client_test.go | 6 +++--- model/labels/labels_test.go | 8 ++++---- promql/histogram_stats_iterator_test.go | 3 +-- scrape/scrape_test.go | 2 +- scripts/golangci-lint.yml | 2 +- storage/remote/codec_test.go | 2 +- storage/remote/intern_test.go | 13 ++++++------- storage/series_test.go | 15 +++++++-------- tsdb/index/index_test.go | 2 +- web/api/v1/api_test.go | 2 +- 12 files changed, 28 insertions(+), 31 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b7074a887e..1019ea4885 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -195,7 +195,7 @@ jobs: with: args: --verbose # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. - version: v1.61.0 + version: v1.62.0 fuzzing: uses: ./.github/workflows/fuzzing.yml if: github.event_name == 'pull_request' diff --git a/Makefile.common b/Makefile.common index 09e5bff858..fc47bdbb21 100644 --- a/Makefile.common +++ b/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.61.0 +GOLANGCI_LINT_VERSION ?= v1.62.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go index a30448e766..bc9703c88c 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go @@ -43,14 +43,14 @@ func TestMarshalStoreSamplesRequest(t *testing.T) { Value: 3.1415, Tags: tagsFromMetric(metric), } - expectedJSON := []byte(`{"metric":"test_.metric","timestamp":4711,"value":3.1415,"tags":{"many_chars":"abc_21ABC_.012-3_2145_C3_B667_7E89./","testlabel":"test_.value"}}`) + expectedJSON := `{"metric":"test_.metric","timestamp":4711,"value":3.1415,"tags":{"many_chars":"abc_21ABC_.012-3_2145_C3_B667_7E89./","testlabel":"test_.value"}}` resultingJSON, err := json.Marshal(request) require.NoError(t, err, "Marshal(request) resulted in err.") - require.Equal(t, expectedJSON, resultingJSON) + require.JSONEq(t, expectedJSON, string(resultingJSON)) var unmarshaledRequest StoreSamplesRequest - err = json.Unmarshal(expectedJSON, &unmarshaledRequest) + err = json.Unmarshal([]byte(expectedJSON), &unmarshaledRequest) require.NoError(t, err, "Unmarshal(expectedJSON, &unmarshaledRequest) resulted in err.") require.Equal(t, request, unmarshaledRequest) } diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index 9208908311..3a4040776c 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -954,7 +954,7 @@ func TestMarshaling(t *testing.T) { expectedJSON := "{\"aaa\":\"111\",\"bbb\":\"2222\",\"ccc\":\"33333\"}" b, err := json.Marshal(lbls) require.NoError(t, err) - require.Equal(t, expectedJSON, string(b)) + require.JSONEq(t, expectedJSON, string(b)) var gotJ Labels err = json.Unmarshal(b, &gotJ) @@ -964,7 +964,7 @@ func TestMarshaling(t *testing.T) { expectedYAML := "aaa: \"111\"\nbbb: \"2222\"\nccc: \"33333\"\n" b, err = yaml.Marshal(lbls) require.NoError(t, err) - require.Equal(t, expectedYAML, string(b)) + require.YAMLEq(t, expectedYAML, string(b)) var gotY Labels err = yaml.Unmarshal(b, &gotY) @@ -980,7 +980,7 @@ func TestMarshaling(t *testing.T) { b, err = json.Marshal(f) require.NoError(t, err) expectedJSONFromStruct := "{\"a_labels\":" + expectedJSON + "}" - require.Equal(t, expectedJSONFromStruct, string(b)) + require.JSONEq(t, expectedJSONFromStruct, string(b)) var gotFJ foo err = json.Unmarshal(b, &gotFJ) @@ -990,7 +990,7 @@ func TestMarshaling(t *testing.T) { b, err = yaml.Marshal(f) require.NoError(t, err) expectedYAMLFromStruct := "a_labels:\n aaa: \"111\"\n bbb: \"2222\"\n ccc: \"33333\"\n" - require.Equal(t, expectedYAMLFromStruct, string(b)) + require.YAMLEq(t, expectedYAMLFromStruct, string(b)) var gotFY foo err = yaml.Unmarshal(b, &gotFY) diff --git a/promql/histogram_stats_iterator_test.go b/promql/histogram_stats_iterator_test.go index 7a2953d3e2..ea0e8b469f 100644 --- a/promql/histogram_stats_iterator_test.go +++ b/promql/histogram_stats_iterator_test.go @@ -14,7 +14,6 @@ package promql import ( - "fmt" "math" "testing" @@ -108,7 +107,7 @@ func TestHistogramStatsDecoding(t *testing.T) { decodedStats = append(decodedStats, h) } for i := 0; i < len(tc.histograms); i++ { - require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint, fmt.Sprintf("mismatch in counter reset hint for histogram %d", i)) + require.Equalf(t, tc.expectedHints[i], decodedStats[i].CounterResetHint, "mismatch in counter reset hint for histogram %d", i) h := tc.histograms[i] if value.IsStaleNaN(h.Sum) { require.True(t, value.IsStaleNaN(decodedStats[i].Sum)) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index ed40f52fa6..ce1686feca 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -2609,7 +2609,7 @@ func TestTargetScraperScrapeOK(t *testing.T) { http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { accept := r.Header.Get("Accept") if allowUTF8 { - require.Truef(t, strings.Contains(accept, "escaping=allow-utf-8"), "Expected Accept header to allow utf8, got %q", accept) + require.Containsf(t, accept, "escaping=allow-utf-8", "Expected Accept header to allow utf8, got %q", accept) } if protobufParsing { require.True(t, strings.HasPrefix(accept, "application/vnd.google.protobuf;"), diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 3051469937..e645ba30ae 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -36,4 +36,4 @@ jobs: uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 with: args: --verbose - version: v1.61.0 + version: v1.62.0 diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index c2fe6186ce..35cd4c4cd5 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -526,7 +526,7 @@ func TestFromQueryResultWithDuplicates(t *testing.T) { require.True(t, isErrSeriesSet, "Expected resulting series to be an errSeriesSet") errMessage := errSeries.Err().Error() - require.Equal(t, "duplicate label with name: foo", errMessage, fmt.Sprintf("Expected error to be from duplicate label, but got: %s", errMessage)) + require.Equalf(t, "duplicate label with name: foo", errMessage, "Expected error to be from duplicate label, but got: %s", errMessage) } func TestNegotiateResponseType(t *testing.T) { diff --git a/storage/remote/intern_test.go b/storage/remote/intern_test.go index 917c42e912..98e16ef80d 100644 --- a/storage/remote/intern_test.go +++ b/storage/remote/intern_test.go @@ -19,7 +19,6 @@ package remote import ( - "fmt" "testing" "time" @@ -33,7 +32,7 @@ func TestIntern(t *testing.T) { interned, ok := interner.pool[testString] require.True(t, ok) - require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) + require.Equalf(t, int64(1), interned.refs.Load(), "expected refs to be 1 but it was %d", interned.refs.Load()) } func TestIntern_MultiRef(t *testing.T) { @@ -44,13 +43,13 @@ func TestIntern_MultiRef(t *testing.T) { interned, ok := interner.pool[testString] require.True(t, ok) - require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) + require.Equalf(t, int64(1), interned.refs.Load(), "expected refs to be 1 but it was %d", interned.refs.Load()) interner.intern(testString) interned, ok = interner.pool[testString] require.True(t, ok) - require.Equal(t, int64(2), interned.refs.Load(), fmt.Sprintf("expected refs to be 2 but it was %d", interned.refs.Load())) + require.Equalf(t, int64(2), interned.refs.Load(), "expected refs to be 2 but it was %d", interned.refs.Load()) } func TestIntern_DeleteRef(t *testing.T) { @@ -61,7 +60,7 @@ func TestIntern_DeleteRef(t *testing.T) { interned, ok := interner.pool[testString] require.True(t, ok) - require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) + require.Equalf(t, int64(1), interned.refs.Load(), "expected refs to be 1 but it was %d", interned.refs.Load()) interner.release(testString) _, ok = interner.pool[testString] @@ -75,7 +74,7 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) { interner.intern(testString) interned, ok := interner.pool[testString] require.True(t, ok) - require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) + require.Equalf(t, int64(1), interned.refs.Load(), "expected refs to be 1 but it was %d", interned.refs.Load()) go interner.release(testString) @@ -87,5 +86,5 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) { interned, ok = interner.pool[testString] interner.mtx.RUnlock() require.True(t, ok) - require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) + require.Equalf(t, int64(1), interned.refs.Load(), "expected refs to be 1 but it was %d", interned.refs.Load()) } diff --git a/storage/series_test.go b/storage/series_test.go index f8ba2af67c..5309494069 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -14,7 +14,6 @@ package storage import ( - "fmt" "math" "testing" @@ -612,36 +611,36 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { encodedSample := encodedSamples[i] switch expectedSample := s.(type) { case hSample: - require.Equal(t, chunkenc.ValHistogram, encodedSample.Type(), "expect histogram", fmt.Sprintf("at idx %d", i)) + require.Equalf(t, chunkenc.ValHistogram, encodedSample.Type(), "expect histogram at idx %d", i) h := encodedSample.H() // Ignore counter reset if not gauge here, will check on chunk level. if expectedSample.h.CounterResetHint != histogram.GaugeType { h.CounterResetHint = histogram.UnknownCounterReset } if value.IsStaleNaN(expectedSample.h.Sum) { - require.True(t, value.IsStaleNaN(h.Sum), fmt.Sprintf("at idx %d", i)) + require.Truef(t, value.IsStaleNaN(h.Sum), "at idx %d", i) continue } - require.Equal(t, *expectedSample.h, *h, fmt.Sprintf("at idx %d", i)) + require.Equalf(t, *expectedSample.h, *h, "at idx %d", i) case fhSample: - require.Equal(t, chunkenc.ValFloatHistogram, encodedSample.Type(), "expect float histogram", fmt.Sprintf("at idx %d", i)) + require.Equalf(t, chunkenc.ValFloatHistogram, encodedSample.Type(), "expect float histogram at idx %d", i) fh := encodedSample.FH() // Ignore counter reset if not gauge here, will check on chunk level. if expectedSample.fh.CounterResetHint != histogram.GaugeType { fh.CounterResetHint = histogram.UnknownCounterReset } if value.IsStaleNaN(expectedSample.fh.Sum) { - require.True(t, value.IsStaleNaN(fh.Sum), fmt.Sprintf("at idx %d", i)) + require.Truef(t, value.IsStaleNaN(fh.Sum), "at idx %d", i) continue } - require.Equal(t, *expectedSample.fh, *fh, fmt.Sprintf("at idx %d", i)) + require.Equalf(t, *expectedSample.fh, *fh, "at idx %d", i) default: t.Error("internal error, unexpected type") } } for i, expectedCounterResetHint := range test.expectedCounterResetHeaders { - require.Equal(t, expectedCounterResetHint, getCounterResetHint(chks[i]), fmt.Sprintf("chunk at index %d", i)) + require.Equalf(t, expectedCounterResetHint, getCounterResetHint(chks[i]), "chunk at index %d", i) } } diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index 0a45f3722a..ee186c1d95 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -331,7 +331,7 @@ func TestPostingsMany(t *testing.T) { exp = append(exp, e) } } - require.Equal(t, exp, got, fmt.Sprintf("input: %v", c.in)) + require.Equalf(t, exp, got, "input: %v", c.in) } } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index f5c81ebfa4..ad2434ee69 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -3862,7 +3862,7 @@ func TestRespondSuccess_DefaultCodecCannotEncodeResponse(t *testing.T) { require.Equal(t, http.StatusNotAcceptable, resp.StatusCode) require.Equal(t, "application/json", resp.Header.Get("Content-Type")) - require.Equal(t, `{"status":"error","errorType":"not_acceptable","error":"cannot encode response as application/default-format"}`, string(body)) + require.JSONEq(t, `{"status":"error","errorType":"not_acceptable","error":"cannot encode response as application/default-format"}`, string(body)) } func TestRespondError(t *testing.T) { From cc390aab64badd803ce68c6d7fd4299592120799 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Wed, 20 Nov 2024 17:52:20 +0100 Subject: [PATCH 0947/1397] MemPostings: allocate ListPostings once in PFLM (#15427) Instead of allocating ListPostings pointers one by one, allocate a slice and take pointers from that. It's faster, and also generates less garbage (NewListPostings is one of the top offenders in number of allocations). Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 43ab1b55a3..f211bade5f 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -430,15 +430,17 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, // Now `vals` only contains the values that matched, get their postings. its := make([]Postings, 0, len(vals)) + lps := make([]ListPostings, len(vals)) p.mtx.RLock() e := p.m[name] - for _, v := range vals { + for i, v := range vals { if refs, ok := e[v]; ok { // Some of the values may have been garbage-collected in the meantime this is fine, we'll just skip them. // If we didn't let the mutex go, we'd have these postings here, but they would be pointing nowhere // because there would be a `MemPostings.Delete()` call waiting for the lock to delete these labels, // because the series were deleted already. - its = append(its, NewListPostings(refs)) + lps[i] = ListPostings{list: refs} + its = append(its, &lps[i]) } } // Let the mutex go before merging. From 125a90899c7f9f3d49902651cf5c1b9138e2497e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Rabenstein?= Date: Thu, 21 Nov 2024 14:20:38 +0100 Subject: [PATCH 0948/1397] promqltest: Complete the tests for info annotations (#15429) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit promqltest: Complete the tests for info annotations So far, we did not test for the _absence_ of an info annotation (because many tests triggered info annotations, which we haven't taken into account so far). The test for info annotations was also missed for range queries. This completes the tests for info annotations (and refactors the many `if` statements into a somewhat more compact `switch` statement). It fixes most tests to not emit an info annotation anymore. Or it changes the `eval` to `eval_info` where we actually want to test for the info annotation. It also fixes a few spelling errors in comments. --------- Signed-off-by: beorn7 Signed-off-by: Björn Rabenstein Co-authored-by: Arve Knudsen --- promql/engine_test.go | 30 +-- promql/promqltest/test.go | 23 +- promql/promqltest/testdata/aggregators.test | 12 +- promql/promqltest/testdata/at_modifier.test | 3 +- promql/promqltest/testdata/functions.test | 138 +++++------ promql/promqltest/testdata/histograms.test | 8 +- .../testdata/name_label_dropping.test | 96 ++++---- promql/promqltest/testdata/operators.test | 232 +++++++++--------- promql/promqltest/testdata/selectors.test | 156 ++++++------ promql/promqltest/testdata/subquery.test | 84 +++---- 10 files changed, 394 insertions(+), 388 deletions(-) diff --git a/promql/engine_test.go b/promql/engine_test.go index a9a8924f85..bd8f77804a 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3494,51 +3494,51 @@ func TestEvaluationWithDelayedNameRemovalDisabled(t *testing.T) { promqltest.RunTest(t, ` load 5m - metric{env="1"} 0 60 120 + metric_total{env="1"} 0 60 120 another_metric{env="1"} 60 120 180 # Does not drop __name__ for vector selector -eval instant at 10m metric{env="1"} - metric{env="1"} 120 +eval instant at 10m metric_total{env="1"} + metric_total{env="1"} 120 # Drops __name__ for unary operators -eval instant at 10m -metric +eval instant at 10m -metric_total {env="1"} -120 # Drops __name__ for binary operators -eval instant at 10m metric + another_metric +eval instant at 10m metric_total + another_metric {env="1"} 300 # Does not drop __name__ for binary comparison operators -eval instant at 10m metric <= another_metric - metric{env="1"} 120 +eval instant at 10m metric_total <= another_metric + metric_total{env="1"} 120 # Drops __name__ for binary comparison operators with "bool" modifier -eval instant at 10m metric <= bool another_metric +eval instant at 10m metric_total <= bool another_metric {env="1"} 1 # Drops __name__ for vector-scalar operations -eval instant at 10m metric * 2 +eval instant at 10m metric_total * 2 {env="1"} 240 # Drops __name__ for instant-vector functions -eval instant at 10m clamp(metric, 0, 100) +eval instant at 10m clamp(metric_total, 0, 100) {env="1"} 100 # Drops __name__ for round function -eval instant at 10m round(metric) +eval instant at 10m round(metric_total) {env="1"} 120 # Drops __name__ for range-vector functions -eval instant at 10m rate(metric{env="1"}[10m]) +eval instant at 10m rate(metric_total{env="1"}[10m]) {env="1"} 0.2 # Does not drop __name__ for last_over_time function -eval instant at 10m last_over_time(metric{env="1"}[10m]) - metric{env="1"} 120 +eval instant at 10m last_over_time(metric_total{env="1"}[10m]) + metric_total{env="1"} 120 # Drops name for other _over_time functions -eval instant at 10m max_over_time(metric{env="1"}[10m]) +eval instant at 10m max_over_time(metric_total{env="1"}[10m]) {env="1"} 120 `, engine) } diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index 4cbd39b403..36519561ae 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -1097,12 +1097,16 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error { if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line) } - countWarnings, _ := res.Warnings.CountWarningsAndInfo() - if !cmd.warn && countWarnings > 0 { + countWarnings, countInfo := res.Warnings.CountWarningsAndInfo() + switch { + case !cmd.warn && countWarnings > 0: return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings) - } - if cmd.warn && countWarnings == 0 { + case cmd.warn && countWarnings == 0: return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line) + case !cmd.info && countInfo > 0: + return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings) + case cmd.info && countInfo == 0: + return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", cmd.expr, cmd.line) } defer q.Close() @@ -1148,13 +1152,14 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) } countWarnings, countInfo := res.Warnings.CountWarningsAndInfo() - if !cmd.warn && countWarnings > 0 { + switch { + case !cmd.warn && countWarnings > 0: return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) - } - if cmd.warn && countWarnings == 0 { + case cmd.warn && countWarnings == 0: return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) - } - if cmd.info && countInfo == 0 { + case !cmd.info && countInfo > 0: + return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) + case cmd.info && countInfo == 0: return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", iq.expr, cmd.line) } err = cmd.compareResult(res.Value) diff --git a/promql/promqltest/testdata/aggregators.test b/promql/promqltest/testdata/aggregators.test index 475fb8a569..00f393a865 100644 --- a/promql/promqltest/testdata/aggregators.test +++ b/promql/promqltest/testdata/aggregators.test @@ -624,11 +624,11 @@ eval_info instant at 0m stddev({label="c"}) eval_info instant at 0m stdvar({label="c"}) -eval instant at 0m stddev by (label) (series) +eval_info instant at 0m stddev by (label) (series) {label="a"} 0 {label="b"} 0 -eval instant at 0m stdvar by (label) (series) +eval_info instant at 0m stdvar by (label) (series) {label="a"} 0 {label="b"} 0 @@ -639,17 +639,17 @@ load 5m series{label="b"} 1 series{label="c"} 2 -eval instant at 0m stddev(series) +eval_info instant at 0m stddev(series) {} 0.5 -eval instant at 0m stdvar(series) +eval_info instant at 0m stdvar(series) {} 0.25 -eval instant at 0m stddev by (label) (series) +eval_info instant at 0m stddev by (label) (series) {label="b"} 0 {label="c"} 0 -eval instant at 0m stdvar by (label) (series) +eval_info instant at 0m stdvar by (label) (series) {label="b"} 0 {label="c"} 0 diff --git a/promql/promqltest/testdata/at_modifier.test b/promql/promqltest/testdata/at_modifier.test index 4091f7eabf..1ad301bdb7 100644 --- a/promql/promqltest/testdata/at_modifier.test +++ b/promql/promqltest/testdata/at_modifier.test @@ -90,7 +90,8 @@ eval instant at 25s sum_over_time(metric{job="1"}[100] offset 50s @ 100) eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100 {job="1"} 15 -eval instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metric{job="2"}[123s] @ 200), "job", "1", "", "") +# Note that this triggers an info annotation because we are rate'ing a metric that does not end in `_total`. +eval_info instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metric{job="2"}[123s] @ 200), "job", "1", "", "") {job="1"} 0.3 eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "") diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 23c56565f5..e4caf0e363 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -83,13 +83,13 @@ clear # Tests for increase(). load 5m - http_requests{path="/foo"} 0+10x10 - http_requests{path="/bar"} 0+18x5 0+18x5 - http_requests{path="/dings"} 10+10x10 - http_requests{path="/bumms"} 1+10x10 + http_requests_total{path="/foo"} 0+10x10 + http_requests_total{path="/bar"} 0+18x5 0+18x5 + http_requests_total{path="/dings"} 10+10x10 + http_requests_total{path="/bumms"} 1+10x10 # Tests for increase(). -eval instant at 50m increase(http_requests[50m]) +eval instant at 50m increase(http_requests_total[50m]) {path="/foo"} 100 {path="/bar"} 160 {path="/dings"} 100 @@ -102,7 +102,7 @@ eval instant at 50m increase(http_requests[50m]) # chosen. However, "bumms" has value 1 at t=0 and would reach 0 at # t=-30s. Here the extrapolation to t=-2m30s would reach a negative # value, and therefore the extrapolation happens only by 30s. -eval instant at 50m increase(http_requests[100m]) +eval instant at 50m increase(http_requests_total[100m]) {path="/foo"} 100 {path="/bar"} 162 {path="/dings"} 105 @@ -115,57 +115,57 @@ clear # So the sequence 3 2 (decreasing counter = reset) is interpreted the same as 3 0 1 2. # Prometheus assumes it missed the intermediate values 0 and 1. load 5m - http_requests{path="/foo"} 0 1 2 3 2 3 4 + http_requests_total{path="/foo"} 0 1 2 3 2 3 4 -eval instant at 30m increase(http_requests[30m]) +eval instant at 30m increase(http_requests_total[30m]) {path="/foo"} 7 clear # Tests for rate(). load 5m - testcounter_reset_middle 0+27x4 0+27x5 - testcounter_reset_end 0+10x9 0 10 + testcounter_reset_middle_total 0+27x4 0+27x5 + testcounter_reset_end_total 0+10x9 0 10 # Counter resets at in the middle of range are handled correctly by rate(). -eval instant at 50m rate(testcounter_reset_middle[50m]) +eval instant at 50m rate(testcounter_reset_middle_total[50m]) {} 0.08 # Counter resets at end of range are ignored by rate(). -eval instant at 50m rate(testcounter_reset_end[5m]) +eval instant at 50m rate(testcounter_reset_end_total[5m]) -eval instant at 50m rate(testcounter_reset_end[6m]) +eval instant at 50m rate(testcounter_reset_end_total[6m]) {} 0 clear load 5m - calculate_rate_offset{x="a"} 0+10x10 - calculate_rate_offset{x="b"} 0+20x10 - calculate_rate_window 0+80x10 + calculate_rate_offset_total{x="a"} 0+10x10 + calculate_rate_offset_total{x="b"} 0+20x10 + calculate_rate_window_total 0+80x10 # Rates should calculate per-second rates. -eval instant at 50m rate(calculate_rate_window[50m]) +eval instant at 50m rate(calculate_rate_window_total[50m]) {} 0.26666666666666666 -eval instant at 50m rate(calculate_rate_offset[10m] offset 5m) +eval instant at 50m rate(calculate_rate_offset_total[10m] offset 5m) {x="a"} 0.03333333333333333 {x="b"} 0.06666666666666667 clear load 4m - testcounter_zero_cutoff{start="0m"} 0+240x10 - testcounter_zero_cutoff{start="1m"} 60+240x10 - testcounter_zero_cutoff{start="2m"} 120+240x10 - testcounter_zero_cutoff{start="3m"} 180+240x10 - testcounter_zero_cutoff{start="4m"} 240+240x10 - testcounter_zero_cutoff{start="5m"} 300+240x10 + testcounter_zero_cutoff_total{start="0m"} 0+240x10 + testcounter_zero_cutoff_total{start="1m"} 60+240x10 + testcounter_zero_cutoff_total{start="2m"} 120+240x10 + testcounter_zero_cutoff_total{start="3m"} 180+240x10 + testcounter_zero_cutoff_total{start="4m"} 240+240x10 + testcounter_zero_cutoff_total{start="5m"} 300+240x10 # Zero cutoff for left-side extrapolation happens until we # reach half a sampling interval (2m). Beyond that, we only # extrapolate by half a sampling interval. -eval instant at 10m rate(testcounter_zero_cutoff[20m]) +eval instant at 10m rate(testcounter_zero_cutoff_total[20m]) {start="0m"} 0.5 {start="1m"} 0.55 {start="2m"} 0.6 @@ -174,7 +174,7 @@ eval instant at 10m rate(testcounter_zero_cutoff[20m]) {start="5m"} 0.6 # Normal half-interval cutoff for left-side extrapolation. -eval instant at 50m rate(testcounter_zero_cutoff[20m]) +eval instant at 50m rate(testcounter_zero_cutoff_total[20m]) {start="0m"} 0.6 {start="1m"} 0.6 {start="2m"} 0.6 @@ -186,15 +186,15 @@ clear # Tests for irate(). load 5m - http_requests{path="/foo"} 0+10x10 - http_requests{path="/bar"} 0+10x5 0+10x5 + http_requests_total{path="/foo"} 0+10x10 + http_requests_total{path="/bar"} 0+10x5 0+10x5 -eval instant at 50m irate(http_requests[50m]) +eval instant at 50m irate(http_requests_total[50m]) {path="/foo"} .03333333333333333333 {path="/bar"} .03333333333333333333 # Counter reset. -eval instant at 30m irate(http_requests[50m]) +eval instant at 30m irate(http_requests_total[50m]) {path="/foo"} .03333333333333333333 {path="/bar"} 0 @@ -224,18 +224,18 @@ clear # Tests for deriv() and predict_linear(). load 5m - testcounter_reset_middle 0+10x4 0+10x5 - http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + testcounter_reset_middle_total 0+10x4 0+10x5 + http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10 # deriv should return the same as rate in simple cases. -eval instant at 50m rate(http_requests{group="canary", instance="1", job="app-server"}[50m]) +eval instant at 50m rate(http_requests_total{group="canary", instance="1", job="app-server"}[50m]) {group="canary", instance="1", job="app-server"} 0.26666666666666666 -eval instant at 50m deriv(http_requests{group="canary", instance="1", job="app-server"}[50m]) +eval instant at 50m deriv(http_requests_total{group="canary", instance="1", job="app-server"}[50m]) {group="canary", instance="1", job="app-server"} 0.26666666666666666 # deriv should return correct result. -eval instant at 50m deriv(testcounter_reset_middle[100m]) +eval instant at 50m deriv(testcounter_reset_middle_total[100m]) {} 0.010606060606060607 # predict_linear should return correct result. @@ -252,31 +252,31 @@ eval instant at 50m deriv(testcounter_reset_middle[100m]) # intercept at t=0: 6.818181818181818 # intercept at t=3000: 38.63636363636364 # intercept at t=3000+3600: 76.81818181818181 -eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600) +eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 3600) {} 70 -eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h) +eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 1h) {} 70 # intercept at t = 3000+3600 = 6600 -eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) +eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) {} 76.81818181818181 -eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 1h) +eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 1h) {} 76.81818181818181 # intercept at t = 600+3600 = 4200 -eval instant at 10m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) +eval instant at 10m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) {} 51.36363636363637 # intercept at t = 4200+3600 = 7800 -eval instant at 70m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) +eval instant at 70m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) {} 89.54545454545455 -# With http_requests, there is a sample value exactly at the end of +# With http_requests_total, there is a sample value exactly at the end of # the range, and it has exactly the predicted value, so predict_linear # can be emulated with deriv. -eval instant at 50m predict_linear(http_requests[50m], 3600) - (http_requests + deriv(http_requests[50m]) * 3600) +eval instant at 50m predict_linear(http_requests_total[50m], 3600) - (http_requests_total + deriv(http_requests_total[50m]) * 3600) {group="canary", instance="1", job="app-server"} 0 clear @@ -1073,49 +1073,49 @@ eval instant at 50m absent(rate(nonexistant[5m])) clear # Testdata for absent_over_time() -eval instant at 1m absent_over_time(http_requests[5m]) +eval instant at 1m absent_over_time(http_requests_total[5m]) {} 1 -eval instant at 1m absent_over_time(http_requests{handler="/foo"}[5m]) +eval instant at 1m absent_over_time(http_requests_total{handler="/foo"}[5m]) {handler="/foo"} 1 -eval instant at 1m absent_over_time(http_requests{handler!="/foo"}[5m]) +eval instant at 1m absent_over_time(http_requests_total{handler!="/foo"}[5m]) {} 1 -eval instant at 1m absent_over_time(http_requests{handler="/foo", handler="/bar", handler="/foobar"}[5m]) +eval instant at 1m absent_over_time(http_requests_total{handler="/foo", handler="/bar", handler="/foobar"}[5m]) {} 1 eval instant at 1m absent_over_time(rate(nonexistant[5m])[5m:]) {} 1 -eval instant at 1m absent_over_time(http_requests{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) +eval instant at 1m absent_over_time(http_requests_total{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) {instance="127.0.0.1"} 1 load 1m - http_requests{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10 - http_requests{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10 + http_requests_total{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10 + http_requests_total{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10 httpd_handshake_failures_total{instance="127.0.0.1",job="node"} 1+1x15 httpd_log_lines_total{instance="127.0.0.1",job="node"} 1 ssl_certificate_expiry_seconds{job="ingress"} NaN NaN NaN NaN NaN -eval instant at 5m absent_over_time(http_requests[5m]) +eval instant at 5m absent_over_time(http_requests_total[5m]) -eval instant at 5m absent_over_time(rate(http_requests[5m])[5m:1m]) +eval instant at 5m absent_over_time(rate(http_requests_total[5m])[5m:1m]) eval instant at 0m absent_over_time(httpd_log_lines_total[30s]) eval instant at 1m absent_over_time(httpd_log_lines_total[30s]) {} 1 -eval instant at 15m absent_over_time(http_requests[5m]) +eval instant at 15m absent_over_time(http_requests_total[5m]) {} 1 -eval instant at 15m absent_over_time(http_requests[10m]) +eval instant at 15m absent_over_time(http_requests_total[10m]) -eval instant at 16m absent_over_time(http_requests[6m]) +eval instant at 16m absent_over_time(http_requests_total[6m]) {} 1 -eval instant at 16m absent_over_time(http_requests[16m]) +eval instant at 16m absent_over_time(http_requests_total[16m]) eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m]) {} 1 @@ -1143,30 +1143,30 @@ eval instant at 10m absent_over_time({job="ingress"}[4m]) clear # Testdata for present_over_time() -eval instant at 1m present_over_time(http_requests[5m]) +eval instant at 1m present_over_time(http_requests_total[5m]) -eval instant at 1m present_over_time(http_requests{handler="/foo"}[5m]) +eval instant at 1m present_over_time(http_requests_total{handler="/foo"}[5m]) -eval instant at 1m present_over_time(http_requests{handler!="/foo"}[5m]) +eval instant at 1m present_over_time(http_requests_total{handler!="/foo"}[5m]) -eval instant at 1m present_over_time(http_requests{handler="/foo", handler="/bar", handler="/foobar"}[5m]) +eval instant at 1m present_over_time(http_requests_total{handler="/foo", handler="/bar", handler="/foobar"}[5m]) eval instant at 1m present_over_time(rate(nonexistant[5m])[5m:]) -eval instant at 1m present_over_time(http_requests{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) +eval instant at 1m present_over_time(http_requests_total{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) load 1m - http_requests{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10 - http_requests{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10 + http_requests_total{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10 + http_requests_total{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10 httpd_handshake_failures_total{instance="127.0.0.1",job="node"} 1+1x15 httpd_log_lines_total{instance="127.0.0.1",job="node"} 1 ssl_certificate_expiry_seconds{job="ingress"} NaN NaN NaN NaN NaN -eval instant at 5m present_over_time(http_requests[5m]) +eval instant at 5m present_over_time(http_requests_total[5m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 -eval instant at 5m present_over_time(rate(http_requests[5m])[5m:1m]) +eval instant at 5m present_over_time(rate(http_requests_total[5m])[5m:1m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 @@ -1175,15 +1175,15 @@ eval instant at 0m present_over_time(httpd_log_lines_total[30s]) eval instant at 1m present_over_time(httpd_log_lines_total[30s]) -eval instant at 15m present_over_time(http_requests[5m]) +eval instant at 15m present_over_time(http_requests_total[5m]) -eval instant at 15m present_over_time(http_requests[10m]) +eval instant at 15m present_over_time(http_requests_total[10m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 -eval instant at 16m present_over_time(http_requests[6m]) +eval instant at 16m present_over_time(http_requests_total[6m]) -eval instant at 16m present_over_time(http_requests[16m]) +eval instant at 16m present_over_time(http_requests_total[16m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test index 6089fd01d2..8ab23640af 100644 --- a/promql/promqltest/testdata/histograms.test +++ b/promql/promqltest/testdata/histograms.test @@ -452,14 +452,14 @@ load 5m nonmonotonic_bucket{le="1000"} 0+9x10 nonmonotonic_bucket{le="+Inf"} 0+8x10 -# Nonmonotonic buckets -eval instant at 50m histogram_quantile(0.01, nonmonotonic_bucket) +# Nonmonotonic buckets, triggering an info annotation. +eval_info instant at 50m histogram_quantile(0.01, nonmonotonic_bucket) {} 0.0045 -eval instant at 50m histogram_quantile(0.5, nonmonotonic_bucket) +eval_info instant at 50m histogram_quantile(0.5, nonmonotonic_bucket) {} 8.5 -eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) +eval_info instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) {} 979.75 # Buckets with different representations of the same upper bound. diff --git a/promql/promqltest/testdata/name_label_dropping.test b/promql/promqltest/testdata/name_label_dropping.test index d4a2ad257e..9af45a7324 100644 --- a/promql/promqltest/testdata/name_label_dropping.test +++ b/promql/promqltest/testdata/name_label_dropping.test @@ -1,88 +1,88 @@ # Test for __name__ label drop. load 5m - metric{env="1"} 0 60 120 - another_metric{env="1"} 60 120 180 + metric_total{env="1"} 0 60 120 + another_metric_total{env="1"} 60 120 180 -# Does not drop __name__ for vector selector -eval instant at 10m metric{env="1"} - metric{env="1"} 120 +# Does not drop __name__ for vector selector. +eval instant at 10m metric_total{env="1"} + metric_total{env="1"} 120 -# Drops __name__ for unary operators -eval instant at 10m -metric +# Drops __name__ for unary operators. +eval instant at 10m -metric_total {env="1"} -120 -# Drops __name__ for binary operators -eval instant at 10m metric + another_metric +# Drops __name__ for binary operators. +eval instant at 10m metric_total + another_metric_total {env="1"} 300 -# Does not drop __name__ for binary comparison operators -eval instant at 10m metric <= another_metric - metric{env="1"} 120 +# Does not drop __name__ for binary comparison operators. +eval instant at 10m metric_total <= another_metric_total + metric_total{env="1"} 120 -# Drops __name__ for binary comparison operators with "bool" modifier -eval instant at 10m metric <= bool another_metric +# Drops __name__ for binary comparison operators with "bool" modifier. +eval instant at 10m metric_total <= bool another_metric_total {env="1"} 1 -# Drops __name__ for vector-scalar operations -eval instant at 10m metric * 2 +# Drops __name__ for vector-scalar operations. +eval instant at 10m metric_total * 2 {env="1"} 240 -# Drops __name__ for instant-vector functions -eval instant at 10m clamp(metric, 0, 100) +# Drops __name__ for instant-vector functions. +eval instant at 10m clamp(metric_total, 0, 100) {env="1"} 100 -# Drops __name__ for round function -eval instant at 10m round(metric) +# Drops __name__ for round function. +eval instant at 10m round(metric_total) {env="1"} 120 -# Drops __name__ for range-vector functions -eval instant at 10m rate(metric{env="1"}[10m]) +# Drops __name__ for range-vector functions. +eval instant at 10m rate(metric_total{env="1"}[10m]) {env="1"} 0.2 -# Does not drop __name__ for last_over_time function -eval instant at 10m last_over_time(metric{env="1"}[10m]) - metric{env="1"} 120 +# Does not drop __name__ for last_over_time function. +eval instant at 10m last_over_time(metric_total{env="1"}[10m]) + metric_total{env="1"} 120 -# Drops name for other _over_time functions -eval instant at 10m max_over_time(metric{env="1"}[10m]) +# Drops name for other _over_time functions. +eval instant at 10m max_over_time(metric_total{env="1"}[10m]) {env="1"} 120 -# Allows relabeling (to-be-dropped) __name__ via label_replace +# Allows relabeling (to-be-dropped) __name__ via label_replace. eval instant at 10m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)") - {my_name="rate_metric", env="1"} 0.2 - {my_name="rate_another_metric", env="1"} 0.2 + {my_name="rate_metric_total", env="1"} 0.2 + {my_name="rate_another_metric_total", env="1"} 0.2 -# Allows preserving __name__ via label_replace +# Allows preserving __name__ via label_replace. eval instant at 10m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)") - rate_metric{env="1"} 0.2 - rate_another_metric{env="1"} 0.2 + rate_metric_total{env="1"} 0.2 + rate_another_metric_total{env="1"} 0.2 -# Allows relabeling (to-be-dropped) __name__ via label_join +# Allows relabeling (to-be-dropped) __name__ via label_join. eval instant at 10m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__") - {my_name="metric", env="1"} 0.2 - {my_name="another_metric", env="1"} 0.2 + {my_name="metric_total", env="1"} 0.2 + {my_name="another_metric_total", env="1"} 0.2 -# Allows preserving __name__ via label_join +# Allows preserving __name__ via label_join. eval instant at 10m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env") - metric_1{env="1"} 0.2 - another_metric_1{env="1"} 0.2 + metric_total_1{env="1"} 0.2 + another_metric_total_1{env="1"} 0.2 -# Does not drop metric names fro aggregation operators -eval instant at 10m sum by (__name__, env) (metric{env="1"}) - metric{env="1"} 120 +# Does not drop metric names from aggregation operators. +eval instant at 10m sum by (__name__, env) (metric_total{env="1"}) + metric_total{env="1"} 120 -# Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label) +# Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label). # This is an accidental side effect of delayed __name__ label dropping eval_fail instant at 10m sum by (__name__) (rate({env="1"}[10m])) -# Aggregation operators aggregate metrics with same labelset and to-be-dropped names +# Aggregation operators aggregate metrics with same labelset and to-be-dropped names. # This is an accidental side effect of delayed __name__ label dropping eval instant at 10m sum(rate({env="1"}[10m])) by (env) {env="1"} 0.4 -# Aggregationk operators propagate __name__ label dropping information -eval instant at 10m topk(10, sum by (__name__, env) (metric{env="1"})) - metric{env="1"} 120 +# Aggregationk operators propagate __name__ label dropping information. +eval instant at 10m topk(10, sum by (__name__, env) (metric_total{env="1"})) + metric_total{env="1"} 120 -eval instant at 10m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m]))) +eval instant at 10m topk(10, sum by (__name__, env) (rate(metric_total{env="1"}[10m]))) {env="1"} 0.2 diff --git a/promql/promqltest/testdata/operators.test b/promql/promqltest/testdata/operators.test index ad908a228a..667989ca77 100644 --- a/promql/promqltest/testdata/operators.test +++ b/promql/promqltest/testdata/operators.test @@ -1,12 +1,12 @@ load 5m - http_requests{job="api-server", instance="0", group="production"} 0+10x10 - http_requests{job="api-server", instance="1", group="production"} 0+20x10 - http_requests{job="api-server", instance="0", group="canary"} 0+30x10 - http_requests{job="api-server", instance="1", group="canary"} 0+40x10 - http_requests{job="app-server", instance="0", group="production"} 0+50x10 - http_requests{job="app-server", instance="1", group="production"} 0+60x10 - http_requests{job="app-server", instance="0", group="canary"} 0+70x10 - http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + http_requests_total{job="api-server", instance="0", group="production"} 0+10x10 + http_requests_total{job="api-server", instance="1", group="production"} 0+20x10 + http_requests_total{job="api-server", instance="0", group="canary"} 0+30x10 + http_requests_total{job="api-server", instance="1", group="canary"} 0+40x10 + http_requests_total{job="app-server", instance="0", group="production"} 0+50x10 + http_requests_total{job="app-server", instance="1", group="production"} 0+60x10 + http_requests_total{job="app-server", instance="0", group="canary"} 0+70x10 + http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10 http_requests_histogram{job="app-server", instance="1", group="production"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}}x11 load 5m @@ -15,21 +15,21 @@ load 5m vector_matching_b{l="x"} 0+4x25 -eval instant at 50m SUM(http_requests) BY (job) - COUNT(http_requests) BY (job) +eval instant at 50m SUM(http_requests_total) BY (job) - COUNT(http_requests_total) BY (job) {job="api-server"} 996 {job="app-server"} 2596 -eval instant at 50m 2 - SUM(http_requests) BY (job) +eval instant at 50m 2 - SUM(http_requests_total) BY (job) {job="api-server"} -998 {job="app-server"} -2598 -eval instant at 50m -http_requests{job="api-server",instance="0",group="production"} +eval instant at 50m -http_requests_total{job="api-server",instance="0",group="production"} {job="api-server",instance="0",group="production"} -100 -eval instant at 50m +http_requests{job="api-server",instance="0",group="production"} - http_requests{job="api-server",instance="0",group="production"} 100 +eval instant at 50m +http_requests_total{job="api-server",instance="0",group="production"} + http_requests_total{job="api-server",instance="0",group="production"} 100 -eval instant at 50m - - - SUM(http_requests) BY (job) +eval instant at 50m - - - SUM(http_requests_total) BY (job) {job="api-server"} -1000 {job="app-server"} -2600 @@ -42,83 +42,83 @@ eval instant at 50m -2^---1*3 eval instant at 50m 2/-2^---1*3+2 -10 -eval instant at 50m -10^3 * - SUM(http_requests) BY (job) ^ -1 +eval instant at 50m -10^3 * - SUM(http_requests_total) BY (job) ^ -1 {job="api-server"} 1 {job="app-server"} 0.38461538461538464 -eval instant at 50m 1000 / SUM(http_requests) BY (job) +eval instant at 50m 1000 / SUM(http_requests_total) BY (job) {job="api-server"} 1 {job="app-server"} 0.38461538461538464 -eval instant at 50m SUM(http_requests) BY (job) - 2 +eval instant at 50m SUM(http_requests_total) BY (job) - 2 {job="api-server"} 998 {job="app-server"} 2598 -eval instant at 50m SUM(http_requests) BY (job) % 3 +eval instant at 50m SUM(http_requests_total) BY (job) % 3 {job="api-server"} 1 {job="app-server"} 2 -eval instant at 50m SUM(http_requests) BY (job) % 0.3 +eval instant at 50m SUM(http_requests_total) BY (job) % 0.3 {job="api-server"} 0.1 {job="app-server"} 0.2 -eval instant at 50m SUM(http_requests) BY (job) ^ 2 +eval instant at 50m SUM(http_requests_total) BY (job) ^ 2 {job="api-server"} 1000000 {job="app-server"} 6760000 -eval instant at 50m SUM(http_requests) BY (job) % 3 ^ 2 +eval instant at 50m SUM(http_requests_total) BY (job) % 3 ^ 2 {job="api-server"} 1 {job="app-server"} 8 -eval instant at 50m SUM(http_requests) BY (job) % 2 ^ (3 ^ 2) +eval instant at 50m SUM(http_requests_total) BY (job) % 2 ^ (3 ^ 2) {job="api-server"} 488 {job="app-server"} 40 -eval instant at 50m SUM(http_requests) BY (job) % 2 ^ 3 ^ 2 +eval instant at 50m SUM(http_requests_total) BY (job) % 2 ^ 3 ^ 2 {job="api-server"} 488 {job="app-server"} 40 -eval instant at 50m SUM(http_requests) BY (job) % 2 ^ 3 ^ 2 ^ 2 +eval instant at 50m SUM(http_requests_total) BY (job) % 2 ^ 3 ^ 2 ^ 2 {job="api-server"} 1000 {job="app-server"} 2600 -eval instant at 50m COUNT(http_requests) BY (job) ^ COUNT(http_requests) BY (job) +eval instant at 50m COUNT(http_requests_total) BY (job) ^ COUNT(http_requests_total) BY (job) {job="api-server"} 256 {job="app-server"} 256 -eval instant at 50m SUM(http_requests) BY (job) / 0 +eval instant at 50m SUM(http_requests_total) BY (job) / 0 {job="api-server"} +Inf {job="app-server"} +Inf -eval instant at 50m http_requests{group="canary", instance="0", job="api-server"} / 0 +eval instant at 50m http_requests_total{group="canary", instance="0", job="api-server"} / 0 {group="canary", instance="0", job="api-server"} +Inf -eval instant at 50m -1 * http_requests{group="canary", instance="0", job="api-server"} / 0 +eval instant at 50m -1 * http_requests_total{group="canary", instance="0", job="api-server"} / 0 {group="canary", instance="0", job="api-server"} -Inf -eval instant at 50m 0 * http_requests{group="canary", instance="0", job="api-server"} / 0 +eval instant at 50m 0 * http_requests_total{group="canary", instance="0", job="api-server"} / 0 {group="canary", instance="0", job="api-server"} NaN -eval instant at 50m 0 * http_requests{group="canary", instance="0", job="api-server"} % 0 +eval instant at 50m 0 * http_requests_total{group="canary", instance="0", job="api-server"} % 0 {group="canary", instance="0", job="api-server"} NaN -eval instant at 50m SUM(http_requests) BY (job) + SUM(http_requests) BY (job) +eval instant at 50m SUM(http_requests_total) BY (job) + SUM(http_requests_total) BY (job) {job="api-server"} 2000 {job="app-server"} 5200 -eval instant at 50m (SUM((http_requests)) BY (job)) + SUM(http_requests) BY (job) +eval instant at 50m (SUM((http_requests_total)) BY (job)) + SUM(http_requests_total) BY (job) {job="api-server"} 2000 {job="app-server"} 5200 -eval instant at 50m http_requests{job="api-server", group="canary"} - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="1", job="api-server"} 400 +eval instant at 50m http_requests_total{job="api-server", group="canary"} + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="1", job="api-server"} 400 -eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[10m]) * 5 * 60 +eval instant at 50m http_requests_total{job="api-server", group="canary"} + rate(http_requests_total{job="api-server"}[10m]) * 5 * 60 {group="canary", instance="0", job="api-server"} 330 {group="canary", instance="1", job="api-server"} 440 -eval instant at 50m rate(http_requests[25m]) * 25 * 60 +eval instant at 50m rate(http_requests_total[25m]) * 25 * 60 {group="canary", instance="0", job="api-server"} 150 {group="canary", instance="0", job="app-server"} 350 {group="canary", instance="1", job="api-server"} 200 @@ -128,7 +128,7 @@ eval instant at 50m rate(http_requests[25m]) * 25 * 60 {group="production", instance="1", job="api-server"} 100 {group="production", instance="1", job="app-server"} 300 -eval instant at 50m (rate((http_requests[25m])) * 25) * 60 +eval instant at 50m (rate((http_requests_total[25m])) * 25) * 60 {group="canary", instance="0", job="api-server"} 150 {group="canary", instance="0", job="app-server"} 350 {group="canary", instance="1", job="api-server"} 200 @@ -139,53 +139,53 @@ eval instant at 50m (rate((http_requests[25m])) * 25) * 60 {group="production", instance="1", job="app-server"} 300 -eval instant at 50m http_requests{group="canary"} and http_requests{instance="0"} - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 +eval instant at 50m http_requests_total{group="canary"} and http_requests_total{instance="0"} + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 -eval instant at 50m (http_requests{group="canary"} + 1) and http_requests{instance="0"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and http_requests_total{instance="0"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m (http_requests{group="canary"} + 1) and on(instance, job) http_requests{instance="0", group="production"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and on(instance, job) http_requests_total{instance="0", group="production"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m (http_requests{group="canary"} + 1) and on(instance) http_requests{instance="0", group="production"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and on(instance) http_requests_total{instance="0", group="production"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m (http_requests{group="canary"} + 1) and ignoring(group) http_requests{instance="0", group="production"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and ignoring(group) http_requests_total{instance="0", group="production"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m (http_requests{group="canary"} + 1) and ignoring(group, job) http_requests{instance="0", group="production"} +eval instant at 50m (http_requests_total{group="canary"} + 1) and ignoring(group, job) http_requests_total{instance="0", group="production"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 -eval instant at 50m http_requests{group="canary"} or http_requests{group="production"} - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 +eval instant at 50m http_requests_total{group="canary"} or http_requests_total{group="production"} + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 # On overlap the rhs samples must be dropped. -eval instant at 50m (http_requests{group="canary"} + 1) or http_requests{instance="1"} +eval instant at 50m (http_requests_total{group="canary"} + 1) or http_requests_total{instance="1"} {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 {group="canary", instance="1", job="api-server"} 401 {group="canary", instance="1", job="app-server"} 801 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 # Matching only on instance excludes everything that has instance=0/1 but includes # entries without the instance label. -eval instant at 50m (http_requests{group="canary"} + 1) or on(instance) (http_requests or cpu_count or vector_matching_a) +eval instant at 50m (http_requests_total{group="canary"} + 1) or on(instance) (http_requests_total or cpu_count or vector_matching_a) {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 {group="canary", instance="1", job="api-server"} 401 @@ -193,7 +193,7 @@ eval instant at 50m (http_requests{group="canary"} + 1) or on(instance) (http_re vector_matching_a{l="x"} 10 vector_matching_a{l="y"} 20 -eval instant at 50m (http_requests{group="canary"} + 1) or ignoring(l, group, job) (http_requests or cpu_count or vector_matching_a) +eval instant at 50m (http_requests_total{group="canary"} + 1) or ignoring(l, group, job) (http_requests_total or cpu_count or vector_matching_a) {group="canary", instance="0", job="api-server"} 301 {group="canary", instance="0", job="app-server"} 701 {group="canary", instance="1", job="api-server"} 401 @@ -201,81 +201,81 @@ eval instant at 50m (http_requests{group="canary"} + 1) or ignoring(l, group, jo vector_matching_a{l="x"} 10 vector_matching_a{l="y"} 20 -eval instant at 50m http_requests{group="canary"} unless http_requests{instance="0"} - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 +eval instant at 50m http_requests_total{group="canary"} unless http_requests_total{instance="0"} + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 -eval instant at 50m http_requests{group="canary"} unless on(job) http_requests{instance="0"} +eval instant at 50m http_requests_total{group="canary"} unless on(job) http_requests_total{instance="0"} -eval instant at 50m http_requests{group="canary"} unless on(job, instance) http_requests{instance="0"} - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 +eval instant at 50m http_requests_total{group="canary"} unless on(job, instance) http_requests_total{instance="0"} + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 -eval instant at 50m http_requests{group="canary"} / on(instance,job) http_requests{group="production"} +eval instant at 50m http_requests_total{group="canary"} / on(instance,job) http_requests_total{group="production"} {instance="0", job="api-server"} 3 {instance="0", job="app-server"} 1.4 {instance="1", job="api-server"} 2 {instance="1", job="app-server"} 1.3333333333333333 -eval instant at 50m http_requests{group="canary"} unless ignoring(group, instance) http_requests{instance="0"} +eval instant at 50m http_requests_total{group="canary"} unless ignoring(group, instance) http_requests_total{instance="0"} -eval instant at 50m http_requests{group="canary"} unless ignoring(group) http_requests{instance="0"} - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 +eval instant at 50m http_requests_total{group="canary"} unless ignoring(group) http_requests_total{instance="0"} + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 -eval instant at 50m http_requests{group="canary"} / ignoring(group) http_requests{group="production"} +eval instant at 50m http_requests_total{group="canary"} / ignoring(group) http_requests_total{group="production"} {instance="0", job="api-server"} 3 {instance="0", job="app-server"} 1.4 {instance="1", job="api-server"} 2 {instance="1", job="app-server"} 1.3333333333333333 # https://github.com/prometheus/prometheus/issues/1489 -eval instant at 50m http_requests AND ON (dummy) vector(1) - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 +eval instant at 50m http_requests_total AND ON (dummy) vector(1) + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 -eval instant at 50m http_requests AND IGNORING (group, instance, job) vector(1) - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 +eval instant at 50m http_requests_total AND IGNORING (group, instance, job) vector(1) + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 # Comparisons. -eval instant at 50m SUM(http_requests) BY (job) > 1000 +eval instant at 50m SUM(http_requests_total) BY (job) > 1000 {job="app-server"} 2600 -eval instant at 50m 1000 < SUM(http_requests) BY (job) +eval instant at 50m 1000 < SUM(http_requests_total) BY (job) {job="app-server"} 2600 -eval instant at 50m SUM(http_requests) BY (job) <= 1000 +eval instant at 50m SUM(http_requests_total) BY (job) <= 1000 {job="api-server"} 1000 -eval instant at 50m SUM(http_requests) BY (job) != 1000 +eval instant at 50m SUM(http_requests_total) BY (job) != 1000 {job="app-server"} 2600 -eval instant at 50m SUM(http_requests) BY (job) == 1000 +eval instant at 50m SUM(http_requests_total) BY (job) == 1000 {job="api-server"} 1000 -eval instant at 50m SUM(http_requests) BY (job) == bool 1000 +eval instant at 50m SUM(http_requests_total) BY (job) == bool 1000 {job="api-server"} 1 {job="app-server"} 0 -eval instant at 50m SUM(http_requests) BY (job) == bool SUM(http_requests) BY (job) +eval instant at 50m SUM(http_requests_total) BY (job) == bool SUM(http_requests_total) BY (job) {job="api-server"} 1 {job="app-server"} 1 -eval instant at 50m SUM(http_requests) BY (job) != bool SUM(http_requests) BY (job) +eval instant at 50m SUM(http_requests_total) BY (job) != bool SUM(http_requests_total) BY (job) {job="api-server"} 0 {job="app-server"} 0 @@ -285,12 +285,12 @@ eval instant at 50m 0 == bool 1 eval instant at 50m 1 == bool 1 1 -eval instant at 50m http_requests{job="api-server", instance="0", group="production"} == bool 100 +eval instant at 50m http_requests_total{job="api-server", instance="0", group="production"} == bool 100 {job="api-server", instance="0", group="production"} 1 # The histogram is ignored here so the result doesn't change but it has an info annotation now. eval_info instant at 5m {job="app-server"} == 80 - http_requests{group="canary", instance="1", job="app-server"} 80 + http_requests_total{group="canary", instance="1", job="app-server"} 80 eval_info instant at 5m http_requests_histogram != 80 @@ -673,7 +673,7 @@ eval_info range from 0 to 24m step 6m left_histograms == 0 eval_info range from 0 to 24m step 6m left_histograms != 3 # No results. -eval range from 0 to 24m step 6m left_histograms != 0 +eval_info range from 0 to 24m step 6m left_histograms != 0 # No results. eval_info range from 0 to 24m step 6m left_histograms > 3 @@ -682,7 +682,7 @@ eval_info range from 0 to 24m step 6m left_histograms > 3 eval_info range from 0 to 24m step 6m left_histograms > 0 # No results. -eval range from 0 to 24m step 6m left_histograms >= 3 +eval_info range from 0 to 24m step 6m left_histograms >= 3 # No results. eval_info range from 0 to 24m step 6m left_histograms >= 0 @@ -697,7 +697,7 @@ eval_info range from 0 to 24m step 6m left_histograms < 0 eval_info range from 0 to 24m step 6m left_histograms <= 3 # No results. -eval range from 0 to 24m step 6m left_histograms <= 0 +eval_info range from 0 to 24m step 6m left_histograms <= 0 # No results. eval_info range from 0 to 24m step 6m left_histograms == bool 3 @@ -770,40 +770,40 @@ eval range from 0 to 60m step 6m NaN == left_floats eval range from 0 to 60m step 6m NaN == bool left_floats {} 0 0 _ _ 0 _ 0 0 0 0 0 -eval range from 0 to 24m step 6m 3 == left_histograms +eval_info range from 0 to 24m step 6m 3 == left_histograms # No results. -eval range from 0 to 24m step 6m 0 == left_histograms +eval_info range from 0 to 24m step 6m 0 == left_histograms # No results. -eval range from 0 to 24m step 6m 3 != left_histograms +eval_info range from 0 to 24m step 6m 3 != left_histograms # No results. -eval range from 0 to 24m step 6m 0 != left_histograms +eval_info range from 0 to 24m step 6m 0 != left_histograms # No results. -eval range from 0 to 24m step 6m 3 < left_histograms +eval_info range from 0 to 24m step 6m 3 < left_histograms # No results. -eval range from 0 to 24m step 6m 0 < left_histograms +eval_info range from 0 to 24m step 6m 0 < left_histograms # No results. -eval range from 0 to 24m step 6m 3 < left_histograms +eval_info range from 0 to 24m step 6m 3 < left_histograms # No results. -eval range from 0 to 24m step 6m 0 < left_histograms +eval_info range from 0 to 24m step 6m 0 < left_histograms # No results. -eval range from 0 to 24m step 6m 3 > left_histograms +eval_info range from 0 to 24m step 6m 3 > left_histograms # No results. -eval range from 0 to 24m step 6m 0 > left_histograms +eval_info range from 0 to 24m step 6m 0 > left_histograms # No results. -eval range from 0 to 24m step 6m 3 >= left_histograms +eval_info range from 0 to 24m step 6m 3 >= left_histograms # No results. -eval range from 0 to 24m step 6m 0 >= left_histograms +eval_info range from 0 to 24m step 6m 0 >= left_histograms # No results. clear diff --git a/promql/promqltest/testdata/selectors.test b/promql/promqltest/testdata/selectors.test index 6742d83e99..3a1f5263dd 100644 --- a/promql/promqltest/testdata/selectors.test +++ b/promql/promqltest/testdata/selectors.test @@ -1,109 +1,109 @@ load 10s - http_requests{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 - http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 - http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 - http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 + http_requests_total{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 + http_requests_total{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 + http_requests_total{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 + http_requests_total{job="api-server", instance="1", group="canary"} 0+40x2000 -eval instant at 8000s rate(http_requests[1m]) +eval instant at 8000s rate(http_requests_total[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 {job="api-server", instance="0", group="canary"} 3 {job="api-server", instance="1", group="canary"} 4 -eval instant at 18000s rate(http_requests[1m]) +eval instant at 18000s rate(http_requests_total[1m]) {job="api-server", instance="0", group="production"} 3 {job="api-server", instance="1", group="production"} 3 {job="api-server", instance="0", group="canary"} 8 {job="api-server", instance="1", group="canary"} 4 -eval instant at 8000s rate(http_requests{group=~"pro.*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~"pro.*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 18000s rate(http_requests{group=~".*ry", instance="1"}[1m]) +eval instant at 18000s rate(http_requests_total{group=~".*ry", instance="1"}[1m]) {job="api-server", instance="1", group="canary"} 4 -eval instant at 18000s rate(http_requests{instance!="3"}[1m] offset 10000s) +eval instant at 18000s rate(http_requests_total{instance!="3"}[1m] offset 10000s) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 {job="api-server", instance="0", group="canary"} 3 {job="api-server", instance="1", group="canary"} 4 -eval instant at 4000s rate(http_requests{instance!="3"}[1m] offset -4000s) +eval instant at 4000s rate(http_requests_total{instance!="3"}[1m] offset -4000s) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 {job="api-server", instance="0", group="canary"} 3 {job="api-server", instance="1", group="canary"} 4 -eval instant at 18000s rate(http_requests[40s]) - rate(http_requests[1m] offset 10000s) +eval instant at 18000s rate(http_requests_total[40s]) - rate(http_requests_total[1m] offset 10000s) {job="api-server", instance="0", group="production"} 2 {job="api-server", instance="1", group="production"} 1 {job="api-server", instance="0", group="canary"} 5 {job="api-server", instance="1", group="canary"} 0 # https://github.com/prometheus/prometheus/issues/3575 -eval instant at 0s http_requests{foo!="bar"} - http_requests{job="api-server", instance="0", group="production"} 0 - http_requests{job="api-server", instance="1", group="production"} 0 - http_requests{job="api-server", instance="0", group="canary"} 0 - http_requests{job="api-server", instance="1", group="canary"} 0 +eval instant at 0s http_requests_total{foo!="bar"} + http_requests_total{job="api-server", instance="0", group="production"} 0 + http_requests_total{job="api-server", instance="1", group="production"} 0 + http_requests_total{job="api-server", instance="0", group="canary"} 0 + http_requests_total{job="api-server", instance="1", group="canary"} 0 -eval instant at 0s http_requests{foo!="bar", job="api-server"} - http_requests{job="api-server", instance="0", group="production"} 0 - http_requests{job="api-server", instance="1", group="production"} 0 - http_requests{job="api-server", instance="0", group="canary"} 0 - http_requests{job="api-server", instance="1", group="canary"} 0 +eval instant at 0s http_requests_total{foo!="bar", job="api-server"} + http_requests_total{job="api-server", instance="0", group="production"} 0 + http_requests_total{job="api-server", instance="1", group="production"} 0 + http_requests_total{job="api-server", instance="0", group="canary"} 0 + http_requests_total{job="api-server", instance="1", group="canary"} 0 -eval instant at 0s http_requests{foo!~"bar", job="api-server"} - http_requests{job="api-server", instance="0", group="production"} 0 - http_requests{job="api-server", instance="1", group="production"} 0 - http_requests{job="api-server", instance="0", group="canary"} 0 - http_requests{job="api-server", instance="1", group="canary"} 0 +eval instant at 0s http_requests_total{foo!~"bar", job="api-server"} + http_requests_total{job="api-server", instance="0", group="production"} 0 + http_requests_total{job="api-server", instance="1", group="production"} 0 + http_requests_total{job="api-server", instance="0", group="canary"} 0 + http_requests_total{job="api-server", instance="1", group="canary"} 0 -eval instant at 0s http_requests{foo!~"bar", job="api-server", instance="1", x!="y", z="", group!=""} - http_requests{job="api-server", instance="1", group="production"} 0 - http_requests{job="api-server", instance="1", group="canary"} 0 +eval instant at 0s http_requests_total{foo!~"bar", job="api-server", instance="1", x!="y", z="", group!=""} + http_requests_total{job="api-server", instance="1", group="production"} 0 + http_requests_total{job="api-server", instance="1", group="canary"} 0 # https://github.com/prometheus/prometheus/issues/7994 -eval instant at 8000s rate(http_requests{group=~"(?i:PRO).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~"(?i:PRO).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*?(?i:PRO).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*?(?i:PRO).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*(?i:DUC).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*(?i:DUC).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*(?i:TION)"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*(?i:TION)"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*(?i:TION).*?"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*(?i:TION).*?"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~"((?i)PRO).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~"((?i)PRO).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*((?i)DUC).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*((?i)DUC).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*((?i)TION)"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*((?i)TION)"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~"(?i:PRODUCTION)"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~"(?i:PRODUCTION)"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 8000s rate(http_requests{group=~".*(?i:C).*"}[1m]) +eval instant at 8000s rate(http_requests_total{group=~".*(?i:C).*"}[1m]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 {job="api-server", instance="0", group="canary"} 3 @@ -133,14 +133,14 @@ load 5m label_grouping_test{a="a", b="abb"} 0+20x10 load 5m - http_requests{job="api-server", instance="0", group="production"} 0+10x10 - http_requests{job="api-server", instance="1", group="production"} 0+20x10 - http_requests{job="api-server", instance="0", group="canary"} 0+30x10 - http_requests{job="api-server", instance="1", group="canary"} 0+40x10 - http_requests{job="app-server", instance="0", group="production"} 0+50x10 - http_requests{job="app-server", instance="1", group="production"} 0+60x10 - http_requests{job="app-server", instance="0", group="canary"} 0+70x10 - http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + http_requests_total{job="api-server", instance="0", group="production"} 0+10x10 + http_requests_total{job="api-server", instance="1", group="production"} 0+20x10 + http_requests_total{job="api-server", instance="0", group="canary"} 0+30x10 + http_requests_total{job="api-server", instance="1", group="canary"} 0+40x10 + http_requests_total{job="app-server", instance="0", group="production"} 0+50x10 + http_requests_total{job="app-server", instance="1", group="production"} 0+60x10 + http_requests_total{job="app-server", instance="0", group="canary"} 0+70x10 + http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10 # Single-letter label names and values. eval instant at 50m x{y="testvalue"} @@ -148,14 +148,14 @@ eval instant at 50m x{y="testvalue"} # Basic Regex eval instant at 50m {__name__=~".+"} - http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="canary", instance="0", job="api-server"} 300 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="api-server"} 400 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="1", job="app-server"} 600 x{y="testvalue"} 100 label_grouping_test{a="a", b="abb"} 200 label_grouping_test{a="aa", b="bb"} 100 @@ -164,34 +164,34 @@ eval instant at 50m {__name__=~".+"} cpu_count{instance="0", type="numa"} 300 eval instant at 50m {job=~".+-server", job!~"api-.+"} - http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="canary", instance="0", job="app-server"} 700 + http_requests_total{group="canary", instance="1", job="app-server"} 800 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="app-server"} 600 -eval instant at 50m http_requests{group!="canary"} - http_requests{group="production", instance="1", job="app-server"} 600 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="0", job="api-server"} 100 +eval instant at 50m http_requests_total{group!="canary"} + http_requests_total{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="0", job="api-server"} 100 -eval instant at 50m http_requests{job=~".+-server",group!="canary"} - http_requests{group="production", instance="1", job="app-server"} 600 - http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="production", instance="0", job="api-server"} 100 +eval instant at 50m http_requests_total{job=~".+-server",group!="canary"} + http_requests_total{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="production", instance="0", job="app-server"} 500 + http_requests_total{group="production", instance="1", job="api-server"} 200 + http_requests_total{group="production", instance="0", job="api-server"} 100 -eval instant at 50m http_requests{job!~"api-.+",group!="canary"} - http_requests{group="production", instance="1", job="app-server"} 600 - http_requests{group="production", instance="0", job="app-server"} 500 +eval instant at 50m http_requests_total{job!~"api-.+",group!="canary"} + http_requests_total{group="production", instance="1", job="app-server"} 600 + http_requests_total{group="production", instance="0", job="app-server"} 500 -eval instant at 50m http_requests{group="production",job=~"api-.+"} - http_requests{group="production", instance="0", job="api-server"} 100 - http_requests{group="production", instance="1", job="api-server"} 200 +eval instant at 50m http_requests_total{group="production",job=~"api-.+"} + http_requests_total{group="production", instance="0", job="api-server"} 100 + http_requests_total{group="production", instance="1", job="api-server"} 200 -eval instant at 50m http_requests{group="production",job="api-server"} offset 5m - http_requests{group="production", instance="0", job="api-server"} 90 - http_requests{group="production", instance="1", job="api-server"} 180 +eval instant at 50m http_requests_total{group="production",job="api-server"} offset 5m + http_requests_total{group="production", instance="0", job="api-server"} 90 + http_requests_total{group="production", instance="1", job="api-server"} 180 clear diff --git a/promql/promqltest/testdata/subquery.test b/promql/promqltest/testdata/subquery.test index 3ac547a2b5..5a5e4e0092 100644 --- a/promql/promqltest/testdata/subquery.test +++ b/promql/promqltest/testdata/subquery.test @@ -1,41 +1,41 @@ load 10s - metric 1 2 + metric_total 1 2 # Evaluation before 0s gets no sample. -eval instant at 10s sum_over_time(metric[50s:10s]) +eval instant at 10s sum_over_time(metric_total[50s:10s]) {} 3 -eval instant at 10s sum_over_time(metric[50s:5s]) +eval instant at 10s sum_over_time(metric_total[50s:5s]) {} 4 # Every evaluation yields the last value, i.e. 2 -eval instant at 5m sum_over_time(metric[50s:10s]) +eval instant at 5m sum_over_time(metric_total[50s:10s]) {} 10 -# Series becomes stale at 5m10s (5m after last sample) +# Series becomes stale at 5m10s (5m after last sample). # Hence subquery gets a single sample at 5m10s. -eval instant at 5m59s sum_over_time(metric[60s:10s]) +eval instant at 5m59s sum_over_time(metric_total[60s:10s]) {} 2 -eval instant at 10s rate(metric[20s:10s]) +eval instant at 10s rate(metric_total[20s:10s]) {} 0.1 -eval instant at 20s rate(metric[20s:5s]) +eval instant at 20s rate(metric_total[20s:5s]) {} 0.06666666666666667 clear load 10s - http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 - http_requests{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 - http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 - http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 + http_requests_total{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 + http_requests_total{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 + http_requests_total{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 + http_requests_total{job="api-server", instance="1", group="canary"} 0+40x2000 -eval instant at 8000s rate(http_requests{group=~"pro.*"}[1m:10s]) +eval instant at 8000s rate(http_requests_total{group=~"pro.*"}[1m:10s]) {job="api-server", instance="0", group="production"} 1 {job="api-server", instance="1", group="production"} 2 -eval instant at 20000s avg_over_time(rate(http_requests[1m])[1m:1s]) +eval instant at 20000s avg_over_time(rate(http_requests_total[1m])[1m:1s]) {job="api-server", instance="0", group="canary"} 8 {job="api-server", instance="1", group="canary"} 4 {job="api-server", instance="1", group="production"} 3 @@ -44,64 +44,64 @@ eval instant at 20000s avg_over_time(rate(http_requests[1m])[1m:1s]) clear load 10s - metric1 0+1x1000 - metric2 0+2x1000 - metric3 0+3x1000 + metric1_total 0+1x1000 + metric2_total 0+2x1000 + metric3_total 0+3x1000 -eval instant at 1000s sum_over_time(metric1[30s:10s]) +eval instant at 1000s sum_over_time(metric1_total[30s:10s]) {} 297 # This is (97 + 98*2 + 99*2 + 100), because other than 97@975s and 100@1000s, # everything else is repeated with the 5s step. -eval instant at 1000s sum_over_time(metric1[30s:5s]) +eval instant at 1000s sum_over_time(metric1_total[30s:5s]) {} 591 # Offset is aligned with the step, so this is from [98@980s, 99@990s, 100@1000s]. -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 10s) {} 297 # Same result for different offsets due to step alignment. -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 9s) {} 297 -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 7s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 7s) {} 297 -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 5s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 5s) {} 297 -eval instant at 1010s sum_over_time(metric1[30s:10s] offset 3s) +eval instant at 1010s sum_over_time(metric1_total[30s:10s] offset 3s) {} 297 -eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s) +eval instant at 1010s sum_over_time((metric1_total)[30s:10s] offset 3s) {} 297 -eval instant at 1010s sum_over_time(metric1[30:10] offset 3) +eval instant at 1010s sum_over_time(metric1_total[30:10] offset 3) {} 297 -eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s) +eval instant at 1010s sum_over_time((metric1_total)[30:10s] offset 3s) {} 297 -eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s) +eval instant at 1010s sum_over_time((metric1_total)[30:10s] offset 3s) {} 297 -eval instant at 1010s sum_over_time((metric1)[30:10] offset 3s) +eval instant at 1010s sum_over_time((metric1_total)[30:10] offset 3s) {} 297 -eval instant at 1010s sum_over_time((metric1)[30:10] offset 3) +eval instant at 1010s sum_over_time((metric1_total)[30:10] offset 3) {} 297 -# Nested subqueries -eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s]) +# Nested subqueries. +eval instant at 1000s rate(sum_over_time(metric1_total[30s:10s])[50s:10s]) {} 0.30000000000000004 -eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s]) +eval instant at 1000s rate(sum_over_time(metric2_total[30s:10s])[50s:10s]) {} 0.6000000000000001 -eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s]) +eval instant at 1000s rate(sum_over_time(metric3_total[30s:10s])[50s:10s]) {} 0.9 -eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s]) +eval instant at 1000s rate(sum_over_time((metric1_total+metric2_total+metric3_total)[30s:10s])[30s:10s]) {} 1.8 clear @@ -109,28 +109,28 @@ clear # Fibonacci sequence, to ensure the rate is not constant. # Additional note: using subqueries unnecessarily is unwise. load 7s - metric 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 1597 2584 4181 6765 10946 17711 28657 46368 75025 121393 196418 317811 514229 832040 1346269 2178309 3524578 5702887 9227465 14930352 24157817 39088169 63245986 102334155 165580141 267914296 433494437 701408733 1134903170 1836311903 2971215073 4807526976 7778742049 12586269025 20365011074 32951280099 53316291173 86267571272 139583862445 225851433717 365435296162 591286729879 956722026041 1548008755920 2504730781961 4052739537881 6557470319842 10610209857723 17167680177565 27777890035288 44945570212853 72723460248141 117669030460994 190392490709135 308061521170129 498454011879264 806515533049393 1304969544928657 2111485077978050 3416454622906707 5527939700884757 8944394323791464 14472334024676221 23416728348467685 37889062373143906 61305790721611591 99194853094755497 160500643816367088 259695496911122585 420196140727489673 679891637638612258 1100087778366101931 1779979416004714189 2880067194370816120 4660046610375530309 7540113804746346429 12200160415121876738 19740274219868223167 31940434634990099905 51680708854858323072 83621143489848422977 135301852344706746049 218922995834555169026 354224848179261915075 573147844013817084101 927372692193078999176 1500520536206896083277 2427893228399975082453 3928413764606871165730 6356306993006846248183 10284720757613717413913 16641027750620563662096 26925748508234281076009 43566776258854844738105 70492524767089125814114 114059301025943970552219 184551825793033096366333 298611126818977066918552 483162952612010163284885 781774079430987230203437 1264937032042997393488322 2046711111473984623691759 3311648143516982017180081 5358359254990966640871840 8670007398507948658051921 14028366653498915298923761 22698374052006863956975682 36726740705505779255899443 59425114757512643212875125 96151855463018422468774568 155576970220531065681649693 251728825683549488150424261 407305795904080553832073954 659034621587630041982498215 1066340417491710595814572169 1725375039079340637797070384 2791715456571051233611642553 4517090495650391871408712937 7308805952221443105020355490 11825896447871834976429068427 19134702400093278081449423917 30960598847965113057878492344 50095301248058391139327916261 81055900096023504197206408605 131151201344081895336534324866 212207101440105399533740733471 343358302784187294870275058337 555565404224292694404015791808 898923707008479989274290850145 1454489111232772683678306641953 2353412818241252672952597492098 3807901929474025356630904134051 6161314747715278029583501626149 9969216677189303386214405760200 16130531424904581415797907386349 26099748102093884802012313146549 42230279526998466217810220532898 68330027629092351019822533679447 110560307156090817237632754212345 178890334785183168257455287891792 289450641941273985495088042104137 468340976726457153752543329995929 757791618667731139247631372100066 1226132595394188293000174702095995 1983924214061919432247806074196061 3210056809456107725247980776292056 5193981023518027157495786850488117 8404037832974134882743767626780173 13598018856492162040239554477268290 22002056689466296922983322104048463 35600075545958458963222876581316753 57602132235424755886206198685365216 93202207781383214849429075266681969 150804340016807970735635273952047185 244006547798191185585064349218729154 394810887814999156320699623170776339 638817435613190341905763972389505493 1033628323428189498226463595560281832 1672445759041379840132227567949787325 2706074082469569338358691163510069157 4378519841510949178490918731459856482 7084593923980518516849609894969925639 11463113765491467695340528626429782121 18547707689471986212190138521399707760 + metric_total 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 1597 2584 4181 6765 10946 17711 28657 46368 75025 121393 196418 317811 514229 832040 1346269 2178309 3524578 5702887 9227465 14930352 24157817 39088169 63245986 102334155 165580141 267914296 433494437 701408733 1134903170 1836311903 2971215073 4807526976 7778742049 12586269025 20365011074 32951280099 53316291173 86267571272 139583862445 225851433717 365435296162 591286729879 956722026041 1548008755920 2504730781961 4052739537881 6557470319842 10610209857723 17167680177565 27777890035288 44945570212853 72723460248141 117669030460994 190392490709135 308061521170129 498454011879264 806515533049393 1304969544928657 2111485077978050 3416454622906707 5527939700884757 8944394323791464 14472334024676221 23416728348467685 37889062373143906 61305790721611591 99194853094755497 160500643816367088 259695496911122585 420196140727489673 679891637638612258 1100087778366101931 1779979416004714189 2880067194370816120 4660046610375530309 7540113804746346429 12200160415121876738 19740274219868223167 31940434634990099905 51680708854858323072 83621143489848422977 135301852344706746049 218922995834555169026 354224848179261915075 573147844013817084101 927372692193078999176 1500520536206896083277 2427893228399975082453 3928413764606871165730 6356306993006846248183 10284720757613717413913 16641027750620563662096 26925748508234281076009 43566776258854844738105 70492524767089125814114 114059301025943970552219 184551825793033096366333 298611126818977066918552 483162952612010163284885 781774079430987230203437 1264937032042997393488322 2046711111473984623691759 3311648143516982017180081 5358359254990966640871840 8670007398507948658051921 14028366653498915298923761 22698374052006863956975682 36726740705505779255899443 59425114757512643212875125 96151855463018422468774568 155576970220531065681649693 251728825683549488150424261 407305795904080553832073954 659034621587630041982498215 1066340417491710595814572169 1725375039079340637797070384 2791715456571051233611642553 4517090495650391871408712937 7308805952221443105020355490 11825896447871834976429068427 19134702400093278081449423917 30960598847965113057878492344 50095301248058391139327916261 81055900096023504197206408605 131151201344081895336534324866 212207101440105399533740733471 343358302784187294870275058337 555565404224292694404015791808 898923707008479989274290850145 1454489111232772683678306641953 2353412818241252672952597492098 3807901929474025356630904134051 6161314747715278029583501626149 9969216677189303386214405760200 16130531424904581415797907386349 26099748102093884802012313146549 42230279526998466217810220532898 68330027629092351019822533679447 110560307156090817237632754212345 178890334785183168257455287891792 289450641941273985495088042104137 468340976726457153752543329995929 757791618667731139247631372100066 1226132595394188293000174702095995 1983924214061919432247806074196061 3210056809456107725247980776292056 5193981023518027157495786850488117 8404037832974134882743767626780173 13598018856492162040239554477268290 22002056689466296922983322104048463 35600075545958458963222876581316753 57602132235424755886206198685365216 93202207781383214849429075266681969 150804340016807970735635273952047185 244006547798191185585064349218729154 394810887814999156320699623170776339 638817435613190341905763972389505493 1033628323428189498226463595560281832 1672445759041379840132227567949787325 2706074082469569338358691163510069157 4378519841510949178490918731459856482 7084593923980518516849609894969925639 11463113765491467695340528626429782121 18547707689471986212190138521399707760 # Extrapolated from [3@21, 144@77]: (144 - 3) / (77 - 21) -eval instant at 80s rate(metric[1m]) +eval instant at 80s rate(metric_total[1m]) {} 2.517857143 # Extrapolated to range start for counter, [2@20, 144@80]: (144 - 2) / (80 - 20) -eval instant at 80s rate(metric[1m500ms:10s]) +eval instant at 80s rate(metric_total[1m500ms:10s]) {} 2.3666666666666667 # Extrapolated to zero value for counter, [2@20, 144@80]: (144 - 0) / 61 -eval instant at 80s rate(metric[1m1s:10s]) +eval instant at 80s rate(metric_total[1m1s:10s]) {} 2.360655737704918 # Only one value between 10s and 20s, 2@14 -eval instant at 20s min_over_time(metric[10s]) +eval instant at 20s min_over_time(metric_total[10s]) {} 2 # min(2@20) -eval instant at 20s min_over_time(metric[15s:10s]) +eval instant at 20s min_over_time(metric_total[15s:10s]) {} 1 -eval instant at 20m min_over_time(rate(metric[5m])[20m:1m]) +eval instant at 20m min_over_time(rate(metric_total[5m])[20m:1m]) {} 0.12119047619047618 From 4b573e0521c226db63475a5d156c2aeedb973a3c Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 20 Nov 2024 18:18:39 +0100 Subject: [PATCH 0949/1397] promql: Fix subqueries to be really left-open Previously, we managed to get rid of the sample on the left bound later, so the problem didn't show up in the framework tests. But the subqueries were still evaluation with the sample on the left bound, taking space and showing up if returning the subquery result directly (without further processing through PromQL like in all the framework tests). Signed-off-by: beorn7 --- promql/engine.go | 4 +- promql/engine_test.go | 161 ++++++++++++++++++++++++++++++++++++++---- 2 files changed, 149 insertions(+), 16 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 56323748fe..8d85b092bb 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1524,7 +1524,7 @@ func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr // Avoid double counting samples when running a subquery, those samples will be counted in later stage. ev.samplesStats = ev.samplesStats.NewChild() val, ws := ev.eval(ctx, subq) - // But do incorporate the peak from the subquery + // But do incorporate the peak from the subquery. samplesStats.UpdatePeakFromSubquery(ev.samplesStats) ev.samplesStats = samplesStats mat := val.(Matrix) @@ -1989,7 +1989,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, // Start with the first timestamp after (ev.startTimestamp - offset - range) // that is aligned with the step (multiple of 'newEv.interval'). newEv.startTimestamp = newEv.interval * ((ev.startTimestamp - offsetMillis - rangeMillis) / newEv.interval) - if newEv.startTimestamp < (ev.startTimestamp - offsetMillis - rangeMillis) { + if newEv.startTimestamp <= (ev.startTimestamp - offsetMillis - rangeMillis) { newEv.startTimestamp += newEv.interval } diff --git a/promql/engine_test.go b/promql/engine_test.go index 7c398029f5..369def024d 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -1428,23 +1428,23 @@ load 10s }, { // The peak samples in memory is during the first evaluation: - // - Subquery takes 22 samples, 11 for each bigmetric, but samples on the left bound won't be evaluated. + // - Subquery takes 20 samples, 10 for each bigmetric. // - Result is calculated per series where the series samples is buffered, hence 10 more here. // - The result of two series is added before the last series buffer is discarded, so 2 more here. - // Hence at peak it is 22 (subquery) + 10 (buffer of a series) + 2 (result from 2 series). + // Hence at peak it is 20 (subquery) + 10 (buffer of a series) + 2 (result from 2 series). // The subquery samples and the buffer is discarded before duplicating. Query: `rate(bigmetric[10s:1s] @ 10)`, - MaxSamples: 34, + MaxSamples: 32, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, }, { // Here the reasoning is same as above. But LHS and RHS are done one after another. - // So while one of them takes 34 samples at peak, we need to hold the 2 sample + // So while one of them takes 32 samples at peak, we need to hold the 2 sample // result of the other till then. Query: `rate(bigmetric[10s:1s] @ 10) + rate(bigmetric[10s:1s] @ 30)`, - MaxSamples: 36, + MaxSamples: 34, Start: time.Unix(0, 0), End: time.Unix(10, 0), Interval: 5 * time.Second, @@ -1452,28 +1452,28 @@ load 10s { // promql.Sample as above but with only 1 part as step invariant. // Here the peak is caused by the non-step invariant part as it touches more time range. - // Hence at peak it is 2*21 (subquery from 0s to 20s) + // Hence at peak it is 2*20 (subquery from 0s to 20s) // + 10 (buffer of a series per evaluation) // + 6 (result from 2 series at 3 eval times). Query: `rate(bigmetric[10s:1s]) + rate(bigmetric[10s:1s] @ 30)`, - MaxSamples: 58, + MaxSamples: 56, Start: time.Unix(10, 0), End: time.Unix(20, 0), Interval: 5 * time.Second, }, { // Nested subquery. - // We saw that innermost rate takes 34 samples which is still the peak + // We saw that innermost rate takes 32 samples which is still the peak // since the other two subqueries just duplicate the result. - Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`, - MaxSamples: 34, + Query: `rate(rate(bigmetric[10:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`, + MaxSamples: 32, Start: time.Unix(10, 0), }, { // Nested subquery. - // Now the outermost subquery produces more samples than inner most rate. + // Now the outermost subquery produces more samples than innermost rate. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[17s:1s] @ 2000`, - MaxSamples: 36, + MaxSamples: 34, Start: time.Unix(10, 0), }, } @@ -1618,6 +1618,19 @@ load 1ms }, { query: "metric[100s:25s] @ 300", start: 100, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}}, + Metric: lbls1, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 44, T: 225000}, {F: 50, T: 250000}, {F: 54, T: 275000}, {F: 60, T: 300000}}, + Metric: lbls2, + }, + }, + }, { + query: "metric[100s1ms:25s] @ 300", // Add 1ms to the range to see the legacy behavior of the previous test. + start: 100, result: promql.Matrix{ promql.Series{ Floats: []promql.FPoint{{F: 20, T: 200000}, {F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}}, @@ -1631,6 +1644,15 @@ load 1ms }, { query: "metric_neg[50s:25s] @ 0", start: 100, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 26, T: -25000}, {F: 1, T: 0}}, + Metric: lblsneg, + }, + }, + }, { + query: "metric_neg[50s1ms:25s] @ 0", // Add 1ms to the range to see the legacy behavior of the previous test. + start: 100, result: promql.Matrix{ promql.Series{ Floats: []promql.FPoint{{F: 51, T: -50000}, {F: 26, T: -25000}, {F: 1, T: 0}}, @@ -1640,6 +1662,15 @@ load 1ms }, { query: "metric_neg[50s:25s] @ -100", start: 100, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 126, T: -125000}, {F: 101, T: -100000}}, + Metric: lblsneg, + }, + }, + }, { + query: "metric_neg[50s1ms:25s] @ -100", // Add 1ms to the range to see the legacy behavior of the previous test. + start: 100, result: promql.Matrix{ promql.Series{ Floats: []promql.FPoint{{F: 151, T: -150000}, {F: 126, T: -125000}, {F: 101, T: -100000}}, @@ -1647,7 +1678,7 @@ load 1ms }, }, }, { - query: `metric_ms[100ms:25ms] @ 2.345`, + query: `metric_ms[101ms:25ms] @ 2.345`, start: 100, result: promql.Matrix{ promql.Series{ @@ -1832,7 +1863,7 @@ func TestSubquerySelector(t *testing.T) { nil, promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, + Floats: []promql.FPoint{{F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1879,6 +1910,20 @@ func TestSubquerySelector(t *testing.T) { cases: []caseType{ { // Normal selector. Query: `http_requests{group=~"pro.*",instance="0"}[30s:10s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 10000, T: 10000000}, {F: 100, T: 10010000}, {F: 130, T: 10020000}}, + Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), + }, + }, + nil, + }, + Start: time.Unix(10020, 0), + }, + { // Normal selector. Add 1ms to the range to see the legacy behavior of the previous test. + Query: `http_requests{group=~"pro.*",instance="0"}[30s1ms:10s]`, Result: promql.Result{ nil, promql.Matrix{ @@ -1921,6 +1966,36 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `rate(http_requests[1m])[15s:5s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), + DropName: true, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), + DropName: true, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), + DropName: true, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), + DropName: true, + }, + }, + nil, + }, + Start: time.Unix(8000, 0), + }, + { + Query: `rate(http_requests[1m])[15s1ms:5s]`, // Add 1ms to the range to see the legacy behavior of the previous test. Result: promql.Result{ nil, promql.Matrix{ @@ -1951,6 +2026,35 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + nil, + }, + Start: time.Unix(120, 0), + }, + { + Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + nil, + }, + Start: time.Unix(121, 0), // 1s later doesn't change the result. + }, + { + // Add 1ms to the range to see the legacy behavior of the previous test. + Query: `sum(http_requests{group=~"pro.*"})[30s1ms:10s]`, Result: promql.Result{ nil, promql.Matrix{ @@ -1965,6 +2069,20 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `sum(http_requests)[40s:10s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 900, T: 90000}, {F: 1000, T: 100000}, {F: 1100, T: 110000}, {F: 1200, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + nil, + }, + Start: time.Unix(120, 0), + }, + { + Query: `sum(http_requests)[40s1ms:10s]`, // Add 1ms to the range to see the legacy behavior of the previous test. Result: promql.Result{ nil, promql.Matrix{ @@ -1979,6 +2097,21 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s:5s]`, + Result: promql.Result{ + nil, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1000, T: 105000}, {F: 1100, T: 110000}, {F: 1100, T: 115000}, {F: 1200, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + nil, + }, + Start: time.Unix(120, 0), + }, + { + // Add 1ms to the range to see the legacy behavior of the previous test. + Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s1ms:5s]`, Result: promql.Result{ nil, promql.Matrix{ From c2e28f21baeb4537b56d49bf404d21292cdb33e1 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Thu, 21 Nov 2024 16:53:06 +0100 Subject: [PATCH 0950/1397] rules.NewGroup: Fix when no logger is passed (#15356) Signed-off-by: Arve Knudsen --- rules/group.go | 18 +++++++++++------- rules/group_test.go | 9 +++++++++ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/rules/group.go b/rules/group.go index 7dd046b57a..1f7953188b 100644 --- a/rules/group.go +++ b/rules/group.go @@ -99,9 +99,13 @@ type GroupOptions struct { // NewGroup makes a new Group with the given name, options, and rules. func NewGroup(o GroupOptions) *Group { - metrics := o.Opts.Metrics + opts := o.Opts + if opts == nil { + opts = &ManagerOptions{} + } + metrics := opts.Metrics if metrics == nil { - metrics = NewGroupMetrics(o.Opts.Registerer) + metrics = NewGroupMetrics(opts.Registerer) } key := GroupKey(o.File, o.Name) @@ -120,13 +124,13 @@ func NewGroup(o GroupOptions) *Group { evalIterationFunc = DefaultEvalIterationFunc } - concurrencyController := o.Opts.RuleConcurrencyController + concurrencyController := opts.RuleConcurrencyController if concurrencyController == nil { concurrencyController = sequentialRuleEvalController{} } - if o.Opts.Logger == nil { - promslog.NewNopLogger() + if opts.Logger == nil { + opts.Logger = promslog.NewNopLogger() } return &Group{ @@ -137,12 +141,12 @@ func NewGroup(o GroupOptions) *Group { limit: o.Limit, rules: o.Rules, shouldRestore: o.ShouldRestore, - opts: o.Opts, + opts: opts, seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)), done: make(chan struct{}), managerDone: o.done, terminated: make(chan struct{}), - logger: o.Opts.Logger.With("file", o.File, "group", o.Name), + logger: opts.Logger.With("file", o.File, "group", o.Name), metrics: metrics, evalIterationFunc: evalIterationFunc, concurrencyController: concurrencyController, diff --git a/rules/group_test.go b/rules/group_test.go index ff1ef3d6c1..3911640a82 100644 --- a/rules/group_test.go +++ b/rules/group_test.go @@ -17,9 +17,18 @@ import ( "testing" "time" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" ) +func TestNewGroup(t *testing.T) { + g := NewGroup(GroupOptions{ + File: "test-file", + Name: "test-name", + }) + require.Equal(t, promslog.NewNopLogger().With("file", "test-file", "group", "test-name"), g.logger) +} + func TestGroup_Equals(t *testing.T) { tests := map[string]struct { first *Group From 3e24e84172483e0bca57cb0a1359ed939261c0aa Mon Sep 17 00:00:00 2001 From: TJ Hoplock Date: Sat, 23 Nov 2024 14:20:37 -0500 Subject: [PATCH 0951/1397] fix!: stop unbounded memory usage from query log Resolves: #15433 When I converted prometheus to use slog in #14906, I update both the `QueryLogger` interface, as well as how the log calls to the `QueryLogger` were built up in `promql.Engine.exec()`. The backing logger for the `QueryLogger` in the engine is a `util/logging.JSONFileLogger`, and it's implementation of the `With()` method updates the logger the logger in place with the new keyvals added onto the underlying slog.Logger, which means they get inherited onto everything after. All subsequent calls to `With()`, even in later queries, would continue to then append on more and more keyvals for the various params and fields built up in the logger. In turn, this causes unbounded growth of the logger, leading to increased memory usage, and in at least one report was the likely cause of an OOM kill. More information can be found in the issue and the linked slack thread. This commit does a few things: - It was referenced in feedback in #14906 that it would've been better to not change the `QueryLogger` interface if possible, this PR proposes changes that bring it closer to alignment with the pre-3.0 `QueryLogger` interface contract - reverts `promql.Engine.exec()`'s usage of the query logger to the pattern of building up an array of args to pass at once to the end log call. Avoiding the repetitious calls to `.With()` are what resolve the issue with the logger growth/memory usage. - updates the scrape failure logger to use the update `QueryLogger` methods in the contract. - updates tests accordingly - cleans up unused methods Builds and passes tests successfully. Tested locally and confirmed I could no longer reproduce the issue/it resolved the issue. Signed-off-by: TJ Hoplock --- promql/engine.go | 17 +++++++---------- promql/engine_test.go | 30 +++++------------------------- scrape/scrape.go | 2 +- util/logging/file.go | 25 ++++--------------------- util/logging/file_test.go | 8 +++++--- 5 files changed, 22 insertions(+), 60 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 8d85b092bb..eecb4bcc4e 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -126,10 +126,7 @@ type QueryEngine interface { // QueryLogger is an interface that can be used to log all the queries logged // by the engine. type QueryLogger interface { - Error(msg string, args ...any) - Info(msg string, args ...any) - Debug(msg string, args ...any) - Warn(msg string, args ...any) + Log(context.Context, slog.Level, string, ...any) With(args ...any) Close() error } @@ -637,20 +634,20 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota // The step provided by the user is in seconds. params["step"] = int64(eq.Interval / (time.Second / time.Nanosecond)) } - l.With("params", params) + f := []interface{}{"params", params} if err != nil { - l.With("error", err) + f = append(f, "error", err) } - l.With("stats", stats.NewQueryStats(q.Stats())) + f = append(f, "stats", stats.NewQueryStats(q.Stats())) if span := trace.SpanFromContext(ctx); span != nil { - l.With("spanID", span.SpanContext().SpanID()) + f = append(f, "spanID", span.SpanContext().SpanID()) } if origin := ctx.Value(QueryOrigin{}); origin != nil { for k, v := range origin.(map[string]interface{}) { - l.With(k, v) + f = append(f, k, v) } } - l.Info("promql query logged") + l.Log(context.Background(), slog.LevelInfo, "promql query logged", f...) // TODO: @tjhop -- do we still need this metric/error log if logger doesn't return errors? // ng.metrics.queryLogFailures.Inc() // ng.logger.Error("can't log query", "err", err) diff --git a/promql/engine_test.go b/promql/engine_test.go index 369def024d..59d8503e8e 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "sort" "strconv" @@ -2169,31 +2170,10 @@ func (f *FakeQueryLogger) Close() error { } // It implements the promql.QueryLogger interface. -func (f *FakeQueryLogger) Info(msg string, args ...any) { - log := append([]any{msg}, args...) - log = append(log, f.attrs...) - f.attrs = f.attrs[:0] - f.logs = append(f.logs, log...) -} - -// It implements the promql.QueryLogger interface. -func (f *FakeQueryLogger) Error(msg string, args ...any) { - log := append([]any{msg}, args...) - log = append(log, f.attrs...) - f.attrs = f.attrs[:0] - f.logs = append(f.logs, log...) -} - -// It implements the promql.QueryLogger interface. -func (f *FakeQueryLogger) Warn(msg string, args ...any) { - log := append([]any{msg}, args...) - log = append(log, f.attrs...) - f.attrs = f.attrs[:0] - f.logs = append(f.logs, log...) -} - -// It implements the promql.QueryLogger interface. -func (f *FakeQueryLogger) Debug(msg string, args ...any) { +func (f *FakeQueryLogger) Log(ctx context.Context, level slog.Level, msg string, args ...any) { + // Test usage only really cares about existence of keyvals passed in + // via args, just append in the log message before handling the + // provided args and any embedded kvs added via `.With()` on f.attrs. log := append([]any{msg}, args...) log = append(log, f.attrs...) f.attrs = f.attrs[:0] diff --git a/scrape/scrape.go b/scrape/scrape.go index a668eb465b..eeab208d65 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1421,7 +1421,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er sl.l.Debug("Scrape failed", "err", scrapeErr) sl.scrapeFailureLoggerMtx.RLock() if sl.scrapeFailureLogger != nil { - sl.scrapeFailureLogger.Error("err", scrapeErr) + sl.scrapeFailureLogger.Log(context.Background(), slog.LevelError, scrapeErr.Error()) } sl.scrapeFailureLoggerMtx.RUnlock() if errc != nil { diff --git a/util/logging/file.go b/util/logging/file.go index f20927beda..9db7fb722b 100644 --- a/util/logging/file.go +++ b/util/logging/file.go @@ -14,6 +14,7 @@ package logging import ( + "context" "fmt" "log/slog" "os" @@ -57,26 +58,8 @@ func (l *JSONFileLogger) With(args ...any) { l.logger = l.logger.With(args...) } -// Info calls the `Info()` method on the underlying `log/slog.Logger` with the +// Log calls the `Log()` method on the underlying `log/slog.Logger` with the // provided msg and args. It implements the promql.QueryLogger interface. -func (l *JSONFileLogger) Info(msg string, args ...any) { - l.logger.Info(msg, args...) -} - -// Error calls the `Error()` method on the underlying `log/slog.Logger` with the -// provided msg and args. It implements the promql.QueryLogger interface. -func (l *JSONFileLogger) Error(msg string, args ...any) { - l.logger.Error(msg, args...) -} - -// Debug calls the `Debug()` method on the underlying `log/slog.Logger` with the -// provided msg and args. It implements the promql.QueryLogger interface. -func (l *JSONFileLogger) Debug(msg string, args ...any) { - l.logger.Debug(msg, args...) -} - -// Warn calls the `Warn()` method on the underlying `log/slog.Logger` with the -// provided msg and args. It implements the promql.QueryLogger interface. -func (l *JSONFileLogger) Warn(msg string, args ...any) { - l.logger.Warn(msg, args...) +func (l *JSONFileLogger) Log(ctx context.Context, level slog.Level, msg string, args ...any) { + l.logger.Log(ctx, level, msg, args...) } diff --git a/util/logging/file_test.go b/util/logging/file_test.go index 00752df8db..c9f7240fee 100644 --- a/util/logging/file_test.go +++ b/util/logging/file_test.go @@ -14,6 +14,8 @@ package logging import ( + "context" + "log/slog" "os" "strings" "testing" @@ -34,7 +36,7 @@ func TestJSONFileLogger_basic(t *testing.T) { require.NoError(t, err) require.NotNil(t, l, "logger can't be nil") - l.Info("test", "hello", "world") + l.Log(context.Background(), slog.LevelInfo, "test", "hello", "world") require.NoError(t, err) r := make([]byte, 1024) _, err = f.Read(r) @@ -64,14 +66,14 @@ func TestJSONFileLogger_parallel(t *testing.T) { require.NoError(t, err) require.NotNil(t, l, "logger can't be nil") - l.Info("test", "hello", "world") + l.Log(context.Background(), slog.LevelInfo, "test", "hello", "world") require.NoError(t, err) l2, err := NewJSONFileLogger(f.Name()) require.NoError(t, err) require.NotNil(t, l, "logger can't be nil") - l2.Info("test", "hello", "world") + l2.Log(context.Background(), slog.LevelInfo, "test", "hello", "world") require.NoError(t, err) err = l.Close() From abfc3145a2e4d1819b10d8356105ddcf874a6ca6 Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Sun, 24 Nov 2024 21:38:59 +0530 Subject: [PATCH 0952/1397] apply DRY on clamp (#15441) Signed-off-by: Neeraj Gartia --- promql/functions.go | 46 +++++++++++---------------------------------- 1 file changed, 11 insertions(+), 35 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index 009a370ebf..4be743040d 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -465,11 +465,7 @@ func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *Eval return vals[0].(Vector), nil } -// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === -func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vals[0].(Vector) - minVal := vals[1].(Vector)[0].F - maxVal := vals[2].(Vector)[0].F +func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if maxVal < minVal { return enh.Out, nil } @@ -490,46 +486,26 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper return enh.Out, nil } +// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === +func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + vec := vals[0].(Vector) + minVal := vals[1].(Vector)[0].F + maxVal := vals[2].(Vector)[0].F + return clamp(vec, minVal, maxVal, enh) +} + // === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) maxVal := vals[1].(Vector)[0].F - for _, el := range vec { - if el.H != nil { - // Process only float samples. - continue - } - if !enh.enableDelayedNameRemoval { - el.Metric = el.Metric.DropMetricName() - } - enh.Out = append(enh.Out, Sample{ - Metric: el.Metric, - F: math.Min(maxVal, el.F), - DropName: true, - }) - } - return enh.Out, nil + return clamp(vec, math.Inf(-1), maxVal, enh) } // === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) minVal := vals[1].(Vector)[0].F - for _, el := range vec { - if el.H != nil { - // Process only float samples. - continue - } - if !enh.enableDelayedNameRemoval { - el.Metric = el.Metric.DropMetricName() - } - enh.Out = append(enh.Out, Sample{ - Metric: el.Metric, - F: math.Max(minVal, el.F), - DropName: true, - }) - } - return enh.Out, nil + return clamp(vec, minVal, math.Inf(+1), enh) } // === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) === From 63d87a60a8bdc2a9974b3a270f75e36a8cdf850b Mon Sep 17 00:00:00 2001 From: Simon Pasquier Date: Sun, 24 Nov 2024 17:13:19 +0100 Subject: [PATCH 0953/1397] docs: fix typo (#15436) Signed-off-by: Simon Pasquier --- docs/configuration/configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 6fa5ac0d9b..3ec15f9b10 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -135,7 +135,7 @@ global: [ keep_dropped_targets: | default = 0 ] # Specifies the validation scheme for metric and label names. Either blank or - # "utf8" for for full UTF-8 support, or "legacy" for letters, numbers, colons, + # "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons, # and underscores. [ metric_name_validation_scheme | default "utf8" ] From e664c16b31c86702ac700e04e46ca0890410a6b8 Mon Sep 17 00:00:00 2001 From: Raphael Philipe Mendes da Silva Date: Sun, 24 Nov 2024 08:28:21 -0800 Subject: [PATCH 0954/1397] api: fix typo in list rules API response (#15400) * Fix typo in list rules API response --------- Signed-off-by: Raphael Silva --- web/api/v1/api.go | 2 +- web/api/v1/api_test.go | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 85d48f28ff..38a6e67816 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1374,7 +1374,7 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { // RuleDiscovery has info for all rules. type RuleDiscovery struct { RuleGroups []*RuleGroup `json:"groups"` - GroupNextToken string `json:"groupNextToken:omitempty"` + GroupNextToken string `json:"groupNextToken,omitempty"` } // RuleGroup has info for rules which are part of a group. diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index ad2434ee69..14fcfea041 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -2899,6 +2899,14 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E errType: errorBadData, zeroFunc: rulesZeroFunc, }, + { // groupNextToken should not be in empty response + endpoint: api.rules, + query: url.Values{ + "match[]": []string{`{testlabel="abc-cannot-find"}`}, + "group_limit": []string{"1"}, + }, + responseAsJSON: `{"groups":[]}`, + }, { endpoint: api.queryExemplars, query: url.Values{ From 40c3ba9fd411ac82b46a0cb27869c5ae2de4ba5a Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Sun, 24 Nov 2024 08:30:20 -0800 Subject: [PATCH 0955/1397] fix promtool analyze block shows metric name with 0 cardinality (#15438) Signed-off-by: Ben Ye --- cmd/promtool/tsdb.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index e1c6cf4298..cefacfec28 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -589,7 +589,10 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten if err != nil { return err } - postings = index.Intersect(postings, index.NewListPostings(refs)) + // Only intersect postings if matchers are specified. + if len(matchers) > 0 { + postings = index.Intersect(postings, index.NewListPostings(refs)) + } count := 0 for postings.Next() { count++ From fc5b7dc5d6ed0a2d3b6950d261e7883dd13c21f3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 24 Nov 2024 17:39:51 +0100 Subject: [PATCH 0956/1397] chore(deps): bump the k8s-io group with 3 updates (#15283) Bumps the k8s-io group with 3 updates: [k8s.io/api](https://github.com/kubernetes/api), [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) and [k8s.io/client-go](https://github.com/kubernetes/client-go). Updates `k8s.io/api` from 0.31.1 to 0.31.2 - [Commits](https://github.com/kubernetes/api/compare/v0.31.1...v0.31.2) Updates `k8s.io/apimachinery` from 0.31.1 to 0.31.2 - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.31.1...v0.31.2) Updates `k8s.io/client-go` from 0.31.1 to 0.31.2 - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.31.1...v0.31.2) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io - dependency-name: k8s.io/apimachinery dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 1f3413372a..5f45dd2327 100644 --- a/go.mod +++ b/go.mod @@ -85,9 +85,9 @@ require ( google.golang.org/protobuf v1.35.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.31.1 - k8s.io/apimachinery v0.31.1 - k8s.io/client-go v0.31.1 + k8s.io/api v0.31.2 + k8s.io/apimachinery v0.31.2 + k8s.io/client-go v0.31.2 k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.130.1 ) diff --git a/go.sum b/go.sum index 334f59b733..7b4c18f782 100644 --- a/go.sum +++ b/go.sum @@ -733,12 +733,12 @@ gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= -k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= -k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= -k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= -k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= From e243ed66b9638fe78d334b7e3fb9fc41863fb3cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 24 Nov 2024 18:40:58 +0100 Subject: [PATCH 0957/1397] chore(deps): bump github.com/fsnotify/fsnotify from 1.7.0 to 1.8.0 (#15300) Bumps [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify) from 1.7.0 to 1.8.0. - [Release notes](https://github.com/fsnotify/fsnotify/releases) - [Changelog](https://github.com/fsnotify/fsnotify/blob/main/CHANGELOG.md) - [Commits](https://github.com/fsnotify/fsnotify/compare/v1.7.0...v1.8.0) --- updated-dependencies: - dependency-name: github.com/fsnotify/fsnotify dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5f45dd2327..1a8f4c36f3 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/envoyproxy/go-control-plane v0.13.1 github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb - github.com/fsnotify/fsnotify v1.7.0 + github.com/fsnotify/fsnotify v1.8.0 github.com/go-openapi/strfmt v0.23.0 github.com/go-zookeeper/zk v1.0.4 github.com/gogo/protobuf v1.3.2 diff --git a/go.sum b/go.sum index 7b4c18f782..80e4532aac 100644 --- a/go.sum +++ b/go.sum @@ -126,8 +126,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= From 872e2db2a99b454498ec0278108978dcf66c28d2 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Sun, 24 Nov 2024 10:46:24 -0800 Subject: [PATCH 0958/1397] Implement json encoder/decoder for regexp (#15383) * implement json encoder/decoder for regexp --------- Signed-off-by: Ben Ye --- model/relabel/relabel.go | 34 +++++++++++++++++++++++++++------- model/relabel/relabel_test.go | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 7 deletions(-) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index 93331cf99f..2bec6cfabb 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -16,6 +16,7 @@ package relabel import ( "crypto/md5" "encoding/binary" + "encoding/json" "errors" "fmt" "strconv" @@ -84,20 +85,20 @@ func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error { type Config struct { // A list of labels from which values are taken and concatenated // with the configured separator in order. - SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty"` + SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty" json:"sourceLabels,omitempty"` // Separator is the string between concatenated values from the source labels. - Separator string `yaml:"separator,omitempty"` + Separator string `yaml:"separator,omitempty" json:"separator,omitempty"` // Regex against which the concatenation is matched. - Regex Regexp `yaml:"regex,omitempty"` + Regex Regexp `yaml:"regex,omitempty" json:"regex,omitempty"` // Modulus to take of the hash of concatenated values from the source labels. - Modulus uint64 `yaml:"modulus,omitempty"` + Modulus uint64 `yaml:"modulus,omitempty" json:"modulus,omitempty"` // TargetLabel is the label to which the resulting string is written in a replacement. // Regexp interpolation is allowed for the replace action. - TargetLabel string `yaml:"target_label,omitempty"` + TargetLabel string `yaml:"target_label,omitempty" json:"targetLabel,omitempty"` // Replacement is the regex replacement pattern to be used. - Replacement string `yaml:"replacement,omitempty"` + Replacement string `yaml:"replacement,omitempty" json:"replacement,omitempty"` // Action is the action to be performed for the relabeling. - Action Action `yaml:"action,omitempty"` + Action Action `yaml:"action,omitempty" json:"action,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -207,6 +208,25 @@ func (re Regexp) MarshalYAML() (interface{}, error) { return nil, nil } +// UnmarshalJSON implements the json.Unmarshaler interface. +func (re *Regexp) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + r, err := NewRegexp(s) + if err != nil { + return err + } + *re = r + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (re Regexp) MarshalJSON() ([]byte, error) { + return json.Marshal(re.String()) +} + // IsZero implements the yaml.IsZeroer interface. func (re Regexp) IsZero() bool { return re.Regexp == DefaultRelabelConfig.Regex.Regexp diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index 0c6d41f5e3..6f234675c6 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -14,6 +14,7 @@ package relabel import ( + "encoding/json" "strconv" "testing" @@ -964,3 +965,35 @@ func TestRegexp_ShouldMarshalAndUnmarshalZeroValue(t *testing.T) { require.NoError(t, err) require.Nil(t, unmarshalled.Regexp) } + +func TestRegexp_JSONUnmarshalThenMarshal(t *testing.T) { + tests := []struct { + name string + input string + }{ + { + name: "Empty regex", + input: `{"regex":""}`, + }, + { + name: "string literal", + input: `{"regex":"foo"}`, + }, + { + name: "regex", + input: `{"regex":".*foo.*"}`, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var unmarshalled Config + err := json.Unmarshal([]byte(test.input), &unmarshalled) + require.NoError(t, err) + + marshalled, err := json.Marshal(&unmarshalled) + require.NoError(t, err) + + require.Equal(t, test.input, string(marshalled)) + }) + } +} From bc008f1b0ec2a2f0db682f6c7baec09b02773150 Mon Sep 17 00:00:00 2001 From: Kateryna Pavlova Date: Sun, 24 Nov 2024 12:37:28 +0100 Subject: [PATCH 0959/1397] docs: fix range queries link Signed-off-by: Kateryna Pavlova --- docs/querying/basics.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index 1c06afb85d..4f7624211d 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -37,7 +37,7 @@ Depending on the use-case (e.g. when graphing vs. displaying the output of an expression), only some of these types are legal as the result of a user-specified expression. For [instant queries](api.md#instant-queries), any of the above data types are allowed as the root of the expression. -[Range queries](api.md/#range-queries) only support scalar-typed and instant-vector-typed expressions. +[Range queries](api.md#range-queries) only support scalar-typed and instant-vector-typed expressions. _Notes about the experimental native histograms:_ @@ -152,7 +152,7 @@ The value returned will be that of the most recent sample at or before the query's evaluation timestamp (in the case of an [instant query](api.md#instant-queries)) or the current step within the query (in the case of a -[range query](api.md/#range-queries)). +[range query](api.md#range-queries)). The [`@` modifier](#modifier) allows overriding the timestamp relative to which the selection takes place. Time series are only returned if their most recent sample is less than the [lookback period](#staleness) ago. From d6e43b89f45c0540d6cd4743ffeaa0feb69e65ad Mon Sep 17 00:00:00 2001 From: Kateryna Pavlova Date: Sun, 24 Nov 2024 22:03:04 +0100 Subject: [PATCH 0960/1397] docs: add regex section in querying basics Signed-off-by: Kateryna Pavlova --- docs/querying/basics.md | 11 +++++++---- docs/querying/examples.md | 5 +---- docs/querying/functions.md | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index 4f7624211d..c1c76e7e04 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -178,7 +178,7 @@ against regular expressions. The following label matching operators exist: * `=~`: Select labels that regex-match the provided string. * `!~`: Select labels that do not regex-match the provided string. -Regex matches are fully anchored. A match of `env=~"foo"` is treated as `env=~"^foo$"`. +[Regex](#regular-expressions) matches are fully anchored. A match of `env=~"foo"` is treated as `env=~"^foo$"`. For example, this selects all `http_requests_total` time series for `staging`, `testing`, and `development` environments and HTTP methods other than `GET`. @@ -241,9 +241,6 @@ A workaround for this restriction is to use the `__name__` label: {__name__="on"} # Good! -All regular expressions in Prometheus use [RE2 -syntax](https://github.com/google/re2/wiki/Syntax). - ### Range Vector Selectors Range vector literals work like instant vector literals, except that they @@ -365,6 +362,12 @@ PromQL supports line comments that start with `#`. Example: # This is a comment +## Regular expressions + +All regular expressions in Prometheus use [RE2 syntax](https://github.com/google/re2/wiki/Syntax). + +Regex matches are always fully anchored. + ## Gotchas ### Staleness diff --git a/docs/querying/examples.md b/docs/querying/examples.md index 8287ff6f62..1879a9945c 100644 --- a/docs/querying/examples.md +++ b/docs/querying/examples.md @@ -25,14 +25,11 @@ for the same vector, making it a [range vector](../basics/#range-vector-selector Note that an expression resulting in a range vector cannot be graphed directly, but viewed in the tabular ("Console") view of the expression browser. -Using regular expressions, you could select time series only for jobs whose +Using [regular expressions](./basics.md#regular-expressions), you could select time series only for jobs whose name match a certain pattern, in this case, all jobs that end with `server`: http_requests_total{job=~".*server"} -All regular expressions in Prometheus use [RE2 -syntax](https://github.com/google/re2/wiki/Syntax). - To select all HTTP status codes except 4xx ones, you could run: http_requests_total{status!~"4.."} diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 310b7b9337..facdfce154 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -598,7 +598,7 @@ label_join(up{job="api-server",src1="a",src2="b",src3="c"}, "foo", ",", "src1", ## `label_replace()` For each timeseries in `v`, `label_replace(v instant-vector, dst_label string, replacement string, src_label string, regex string)` -matches the [regular expression](https://github.com/google/re2/wiki/Syntax) `regex` against the value of the label `src_label`. If it +matches the [regular expression](./basics.md#regular-expressions) `regex` against the value of the label `src_label`. If it matches, the value of the label `dst_label` in the returned timeseries will be the expansion of `replacement`, together with the original labels in the input. Capturing groups in the regular expression can be referenced with `$1`, `$2`, etc. Named capturing groups in the regular expression can be referenced with `$name` (where `name` is the capturing group name). If the regular expression doesn't match then the timeseries is returned unchanged. From a53b48a912200a9e5628eb889b76deb502031345 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 25 Nov 2024 09:27:50 +0100 Subject: [PATCH 0961/1397] OTLP translate: keep identifying attributes in target_info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- .../otlptranslator/prometheusremotewrite/helper.go | 13 +++++-------- .../prometheusremotewrite/helper_test.go | 2 +- .../prometheusremotewrite/histograms.go | 1 - .../prometheusremotewrite/number_data_points.go | 2 -- 4 files changed, 6 insertions(+), 12 deletions(-) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 30cfa86436..89e00f3759 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -22,7 +22,6 @@ import ( "fmt" "log" "math" - "slices" "sort" "strconv" "unicode/utf8" @@ -117,7 +116,7 @@ var seps = []byte{'\xff'} // if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized. // If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels. func createAttributes(resource pcommon.Resource, attributes pcommon.Map, settings Settings, - ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label { + logOnOverwrite bool, extras ...string) []prompb.Label { resourceAttrs := resource.Attributes() serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName) instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID) @@ -147,9 +146,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting // XXX: Should we always drop service namespace/service name/service instance ID from the labels // (as they get mapped to other Prometheus labels)? attributes.Range(func(key string, value pcommon.Value) bool { - if !slices.Contains(ignoreAttrs, key) { - labels = append(labels, prompb.Label{Name: key, Value: value.AsString()}) - } + labels = append(labels, prompb.Label{Name: key, Value: value.AsString()}) return true }) sort.Stable(ByLabelName(labels)) @@ -251,7 +248,7 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) - baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false) + baseLabels := createAttributes(resource, pt.Attributes(), settings, false) // If the sum is unset, it indicates the _sum metric point should be // omitted @@ -451,7 +448,7 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) - baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false) + baseLabels := createAttributes(resource, pt.Attributes(), settings, false) // treat sum as a sample in an individual TimeSeries sum := &prompb.Sample{ @@ -600,7 +597,7 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta } settings.PromoteResourceAttributes = nil - labels := createAttributes(resource, attributes, settings, identifyingAttrs, false, model.MetricNameLabel, name) + labels := createAttributes(resource, attributes, settings, false, model.MetricNameLabel, name) haveIdentifier := false for _, l := range labels { if l.Name == model.JobLabel || l.Name == model.InstanceLabel { diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go index b22282097d..b3b93dcc06 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go @@ -161,7 +161,7 @@ func TestCreateAttributes(t *testing.T) { settings := Settings{ PromoteResourceAttributes: tc.promoteResourceAttributes, } - lbls := createAttributes(resource, attrs, settings, nil, false, model.MetricNameLabel, "test_metric") + lbls := createAttributes(resource, attrs, settings, false, model.MetricNameLabel, "test_metric") assert.ElementsMatch(t, lbls, tc.expectedLabels) }) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index 8349d4f907..dfe0c9f90e 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -54,7 +54,6 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont resource, pt.Attributes(), settings, - nil, true, model.MetricNameLabel, promName, diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index 6cdab450e1..035c0676c4 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -40,7 +40,6 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data resource, pt.Attributes(), settings, - nil, true, model.MetricNameLabel, name, @@ -76,7 +75,6 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo resource, pt.Attributes(), settings, - nil, true, model.MetricNameLabel, name, From 2a1b940ae43cb69c4f399dceda4c7b59b2593123 Mon Sep 17 00:00:00 2001 From: hongmengning Date: Mon, 25 Nov 2024 17:33:04 +0800 Subject: [PATCH 0962/1397] discovery: fix some function names in comment Signed-off-by: hongmengning --- discovery/consul/consul.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index cb3dfe1373..33b82d23a4 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -241,7 +241,7 @@ func (d *Discovery) shouldWatch(name string, tags []string) bool { return d.shouldWatchFromName(name) && d.shouldWatchFromTags(tags) } -// shouldWatch returns whether the service of the given name should be watched based on its name. +// shouldWatchFromName returns whether the service of the given name should be watched based on its name. func (d *Discovery) shouldWatchFromName(name string) bool { // If there's no fixed set of watched services, we watch everything. if len(d.watchedServices) == 0 { @@ -256,7 +256,7 @@ func (d *Discovery) shouldWatchFromName(name string) bool { return false } -// shouldWatch returns whether the service of the given name should be watched based on its tags. +// shouldWatchFromTags returns whether the service of the given name should be watched based on its tags. // This gets called when the user doesn't specify a list of services in order to avoid watching // *all* services. Details in https://github.com/prometheus/prometheus/pull/3814 func (d *Discovery) shouldWatchFromTags(tags []string) bool { From 41f051e9a494bfe88d18bcd5a5fb2a6b97320d0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 25 Nov 2024 14:46:22 +0100 Subject: [PATCH 0963/1397] Small improvement in handling cases without count MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- util/convertnhcb/convertnhcb.go | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/util/convertnhcb/convertnhcb.go b/util/convertnhcb/convertnhcb.go index ee5bcb72de..ce8fdda899 100644 --- a/util/convertnhcb/convertnhcb.go +++ b/util/convertnhcb/convertnhcb.go @@ -139,19 +139,15 @@ func (h TempHistogram) Convert() (*histogram.Histogram, *histogram.FloatHistogra return nil, nil, h.err } - if len(h.buckets) == 0 || h.buckets[len(h.buckets)-1].le != math.Inf(1) { - // No +Inf bucket. - if !h.hasCount && len(h.buckets) > 0 { - // No count either, so set count to the last known bucket's count. - h.count = h.buckets[len(h.buckets)-1].count - } - // Let the last bucket be +Inf with the overall count. - h.buckets = append(h.buckets, tempHistogramBucket{le: math.Inf(1), count: h.count}) + if !h.hasCount && len(h.buckets) > 0 { + // No count, so set count to the highest known bucket's count. + h.count = h.buckets[len(h.buckets)-1].count } - if !h.hasCount { - h.count = h.buckets[len(h.buckets)-1].count - h.hasCount = true + if len(h.buckets) == 0 || h.buckets[len(h.buckets)-1].le != math.Inf(1) { + // No +Inf bucket. + // Let the last bucket be +Inf with the overall count. + h.buckets = append(h.buckets, tempHistogramBucket{le: math.Inf(1), count: h.count}) } for _, b := range h.buckets { From a48d05912d80e03532d1883a492e7e8de21a1786 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 25 Nov 2024 15:01:26 +0100 Subject: [PATCH 0964/1397] nhcb: optimize, do not recalculate suffixes multiple times MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reduce string manipulation by just cutting off the histogram suffixes from the series name label once. Signed-off-by: György Krajcsovits --- model/textparse/nhcbparse.go | 34 +++++++++++++++++++-------------- promql/promqltest/test.go | 22 ++++++++++++--------- util/convertnhcb/convertnhcb.go | 24 +++++++++++++++-------- 3 files changed, 49 insertions(+), 31 deletions(-) diff --git a/model/textparse/nhcbparse.go b/model/textparse/nhcbparse.go index 6fe2e8e54e..ff756965f4 100644 --- a/model/textparse/nhcbparse.go +++ b/model/textparse/nhcbparse.go @@ -243,7 +243,8 @@ func (p *NHCBParser) compareLabels() bool { // Different metric type. return true } - if p.lastHistogramName != convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) { + _, name := convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) + if p.lastHistogramName != name { // Different metric name. return true } @@ -253,8 +254,8 @@ func (p *NHCBParser) compareLabels() bool { } // Save the label set of the classic histogram without suffix and bucket `le` label. -func (p *NHCBParser) storeClassicLabels() { - p.lastHistogramName = convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) +func (p *NHCBParser) storeClassicLabels(name string) { + p.lastHistogramName = name p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel) p.lastHistogramExponential = false } @@ -275,25 +276,30 @@ func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool { } mName := lset.Get(labels.MetricName) // Sanity check to ensure that the TYPE metadata entry name is the same as the base name. - if convertnhcb.GetHistogramMetricBaseName(mName) != string(p.bName) { + suffixType, name := convertnhcb.GetHistogramMetricBaseName(mName) + if name != string(p.bName) { return false } - switch { - case strings.HasSuffix(mName, "_bucket") && lset.Has(labels.BucketLabel): + switch suffixType { + case convertnhcb.SuffixBucket: + if !lset.Has(labels.BucketLabel) { + // This should not really happen. + return false + } le, err := strconv.ParseFloat(lset.Get(labels.BucketLabel), 64) if err == nil && !math.IsNaN(le) { - p.processClassicHistogramSeries(lset, "_bucket", func(hist *convertnhcb.TempHistogram) { + p.processClassicHistogramSeries(lset, name, func(hist *convertnhcb.TempHistogram) { _ = hist.SetBucketCount(le, p.value) }) return true } - case strings.HasSuffix(mName, "_count"): - p.processClassicHistogramSeries(lset, "_count", func(hist *convertnhcb.TempHistogram) { + case convertnhcb.SuffixCount: + p.processClassicHistogramSeries(lset, name, func(hist *convertnhcb.TempHistogram) { _ = hist.SetCount(p.value) }) return true - case strings.HasSuffix(mName, "_sum"): - p.processClassicHistogramSeries(lset, "_sum", func(hist *convertnhcb.TempHistogram) { + case convertnhcb.SuffixSum: + p.processClassicHistogramSeries(lset, name, func(hist *convertnhcb.TempHistogram) { _ = hist.SetSum(p.value) }) return true @@ -301,12 +307,12 @@ func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool { return false } -func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, suffix string, updateHist func(*convertnhcb.TempHistogram)) { +func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, name string, updateHist func(*convertnhcb.TempHistogram)) { if p.state != stateCollecting { - p.storeClassicLabels() + p.storeClassicLabels(name) p.tempCT = p.parser.CreatedTimestamp() p.state = stateCollecting - p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, suffix) + p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, name) } p.storeExemplars() updateHist(&p.tempNHCB) diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index 36519561ae..a4dbd64ff8 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -491,8 +491,8 @@ func newTempHistogramWrapper() tempHistogramWrapper { } } -func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogram func(*convertnhcb.TempHistogram, float64)) { - m2 := convertnhcb.GetHistogramMetricBase(m, suffix) +func processClassicHistogramSeries(m labels.Labels, name string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogram func(*convertnhcb.TempHistogram, float64)) { + m2 := convertnhcb.GetHistogramMetricBase(m, name) m2hash := m2.Hash() histogramWrapper, exists := histogramMap[m2hash] if !exists { @@ -523,21 +523,25 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { for hash, smpls := range cmd.defs { m := cmd.metrics[hash] mName := m.Get(labels.MetricName) - switch { - case strings.HasSuffix(mName, "_bucket") && m.Has(labels.BucketLabel): + suffixType, name := convertnhcb.GetHistogramMetricBaseName(mName) + switch suffixType { + case convertnhcb.SuffixBucket: + if !m.Has(labels.BucketLabel) { + panic(fmt.Sprintf("expected bucket label in metric %s", m)) + } le, err := strconv.ParseFloat(m.Get(labels.BucketLabel), 64) if err != nil || math.IsNaN(le) { continue } - processClassicHistogramSeries(m, "_bucket", histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { + processClassicHistogramSeries(m, name, histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { _ = histogram.SetBucketCount(le, f) }) - case strings.HasSuffix(mName, "_count"): - processClassicHistogramSeries(m, "_count", histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { + case convertnhcb.SuffixCount: + processClassicHistogramSeries(m, name, histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { _ = histogram.SetCount(f) }) - case strings.HasSuffix(mName, "_sum"): - processClassicHistogramSeries(m, "_sum", histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { + case convertnhcb.SuffixSum: + processClassicHistogramSeries(m, name, histogramMap, smpls, func(histogram *convertnhcb.TempHistogram, f float64) { _ = histogram.SetSum(f) }) } diff --git a/util/convertnhcb/convertnhcb.go b/util/convertnhcb/convertnhcb.go index ce8fdda899..b3dc851daa 100644 --- a/util/convertnhcb/convertnhcb.go +++ b/util/convertnhcb/convertnhcb.go @@ -228,26 +228,34 @@ func (h TempHistogram) convertToFloatHistogram() (*histogram.Histogram, *histogr return nil, rh.Compact(0), nil } -func GetHistogramMetricBase(m labels.Labels, suffix string) labels.Labels { - mName := m.Get(labels.MetricName) +func GetHistogramMetricBase(m labels.Labels, name string) labels.Labels { return labels.NewBuilder(m). - Set(labels.MetricName, strings.TrimSuffix(mName, suffix)). + Set(labels.MetricName, name). Del(labels.BucketLabel). Labels() } +type SuffixType int + +const ( + SuffixNone SuffixType = iota + SuffixBucket + SuffixSum + SuffixCount +) + // GetHistogramMetricBaseName removes the suffixes _bucket, _sum, _count from // the metric name. We specifically do not remove the _created suffix as that // should be removed by the caller. -func GetHistogramMetricBaseName(s string) string { +func GetHistogramMetricBaseName(s string) (SuffixType, string) { if r, ok := strings.CutSuffix(s, "_bucket"); ok { - return r + return SuffixBucket, r } if r, ok := strings.CutSuffix(s, "_sum"); ok { - return r + return SuffixSum, r } if r, ok := strings.CutSuffix(s, "_count"); ok { - return r + return SuffixCount, r } - return s + return SuffixNone, s } From 88675710f97c126fbda3bda878465756dcd439f2 Mon Sep 17 00:00:00 2001 From: newtonne <14221622+newtonne@users.noreply.github.com> Date: Thu, 19 Sep 2024 19:34:28 +0100 Subject: [PATCH 0965/1397] Add support for utf8 names on `/v1/label/:name/values` endpoint Previously, the api was evaluating this regex to determine if the label name was valid or not: https://github.com/prometheus/common/blob/14bac55a992f7b83ab9d147a041e274606bdb607/model/labels.go#L94 However, I believe that the `IsValid()` function is what ought to be used in the brave new utf8 era. **Before** ``` $ curl localhost:9090/api/v1/label/host.name/values {"status":"error","errorType":"bad_data","error":"invalid label name: \"host.name\""} ``` **After** ``` $ curl localhost:9090/api/v1/label/host.name/values {"status":"success","data":["localhost"]} ``` It's very likely that I'm missing something here or you were already planning to do this at some point but I just encountered this issue and figured I'd give it a go. Signed-off-by: Owen Williams --- promql/parser/generated_parser.y | 6 + promql/parser/generated_parser.y.go | 747 ++++++++++++++-------------- web/api/v1/api.go | 3 +- web/api/v1/api_test.go | 45 +- 4 files changed, 423 insertions(+), 378 deletions(-) diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index befb9bdf3e..c321a1e973 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -667,10 +667,16 @@ label_set_list : label_set_list COMMA label_set_item label_set_item : IDENTIFIER EQL STRING { $$ = labels.Label{Name: $1.Val, Value: yylex.(*parser).unquoteString($3.Val) } } + | string_identifier EQL STRING + { $$ = labels.Label{Name: $1.Val, Value: yylex.(*parser).unquoteString($3.Val) } } | IDENTIFIER EQL error { yylex.(*parser).unexpected("label set", "string"); $$ = labels.Label{}} + | string_identifier EQL error + { yylex.(*parser).unexpected("label set", "string"); $$ = labels.Label{}} | IDENTIFIER error { yylex.(*parser).unexpected("label set", "\"=\""); $$ = labels.Label{}} + | string_identifier error + { yylex.(*parser).unexpected("label set", "\"=\""); $$ = labels.Label{}} | error { yylex.(*parser).unexpected("label set", "identifier or \"}\""); $$ = labels.Label{} } ; diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index ad58a52976..8979410ceb 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -251,293 +251,295 @@ var yyExca = [...]int16{ 1, -1, -2, 0, -1, 37, - 1, 138, - 10, 138, - 24, 138, + 1, 141, + 10, 141, + 24, 141, -2, 0, -1, 61, - 2, 181, - 15, 181, - 79, 181, - 85, 181, - -2, 102, - -1, 62, - 2, 182, - 15, 182, - 79, 182, - 85, 182, - -2, 103, - -1, 63, - 2, 183, - 15, 183, - 79, 183, - 85, 183, - -2, 105, - -1, 64, 2, 184, 15, 184, 79, 184, 85, 184, - -2, 106, - -1, 65, + -2, 102, + -1, 62, 2, 185, 15, 185, 79, 185, 85, 185, - -2, 107, - -1, 66, + -2, 103, + -1, 63, 2, 186, 15, 186, 79, 186, 85, 186, - -2, 112, - -1, 67, + -2, 105, + -1, 64, 2, 187, 15, 187, 79, 187, 85, 187, - -2, 114, - -1, 68, + -2, 106, + -1, 65, 2, 188, 15, 188, 79, 188, 85, 188, - -2, 116, - -1, 69, + -2, 107, + -1, 66, 2, 189, 15, 189, 79, 189, 85, 189, - -2, 117, - -1, 70, + -2, 112, + -1, 67, 2, 190, 15, 190, 79, 190, 85, 190, - -2, 118, - -1, 71, + -2, 114, + -1, 68, 2, 191, 15, 191, 79, 191, 85, 191, - -2, 119, - -1, 72, + -2, 116, + -1, 69, 2, 192, 15, 192, 79, 192, 85, 192, - -2, 120, - -1, 73, + -2, 117, + -1, 70, 2, 193, 15, 193, 79, 193, 85, 193, - -2, 124, - -1, 74, + -2, 118, + -1, 71, 2, 194, 15, 194, 79, 194, 85, 194, + -2, 119, + -1, 72, + 2, 195, + 15, 195, + 79, 195, + 85, 195, + -2, 120, + -1, 73, + 2, 196, + 15, 196, + 79, 196, + 85, 196, + -2, 124, + -1, 74, + 2, 197, + 15, 197, + 79, 197, + 85, 197, -2, 125, - -1, 200, - 9, 243, - 12, 243, - 13, 243, - 18, 243, - 19, 243, - 25, 243, - 41, 243, - 47, 243, - 48, 243, - 51, 243, - 57, 243, - 62, 243, - 63, 243, - 64, 243, - 65, 243, - 66, 243, - 67, 243, - 68, 243, - 69, 243, - 70, 243, - 71, 243, - 72, 243, - 73, 243, - 74, 243, - 75, 243, - 79, 243, - 83, 243, - 85, 243, - 88, 243, - 89, 243, + -1, 205, + 9, 246, + 12, 246, + 13, 246, + 18, 246, + 19, 246, + 25, 246, + 41, 246, + 47, 246, + 48, 246, + 51, 246, + 57, 246, + 62, 246, + 63, 246, + 64, 246, + 65, 246, + 66, 246, + 67, 246, + 68, 246, + 69, 246, + 70, 246, + 71, 246, + 72, 246, + 73, 246, + 74, 246, + 75, 246, + 79, 246, + 83, 246, + 85, 246, + 88, 246, + 89, 246, -2, 0, - -1, 201, - 9, 243, - 12, 243, - 13, 243, - 18, 243, - 19, 243, - 25, 243, - 41, 243, - 47, 243, - 48, 243, - 51, 243, - 57, 243, - 62, 243, - 63, 243, - 64, 243, - 65, 243, - 66, 243, - 67, 243, - 68, 243, - 69, 243, - 70, 243, - 71, 243, - 72, 243, - 73, 243, - 74, 243, - 75, 243, - 79, 243, - 83, 243, - 85, 243, - 88, 243, - 89, 243, + -1, 206, + 9, 246, + 12, 246, + 13, 246, + 18, 246, + 19, 246, + 25, 246, + 41, 246, + 47, 246, + 48, 246, + 51, 246, + 57, 246, + 62, 246, + 63, 246, + 64, 246, + 65, 246, + 66, 246, + 67, 246, + 68, 246, + 69, 246, + 70, 246, + 71, 246, + 72, 246, + 73, 246, + 74, 246, + 75, 246, + 79, 246, + 83, 246, + 85, 246, + 88, 246, + 89, 246, -2, 0, } const yyPrivate = 57344 -const yyLast = 799 +const yyLast = 804 var yyAct = [...]int16{ - 152, 334, 332, 155, 339, 226, 39, 192, 276, 44, - 291, 290, 118, 82, 178, 229, 107, 106, 346, 347, - 348, 349, 109, 108, 198, 239, 199, 156, 110, 105, - 6, 245, 200, 201, 133, 325, 111, 329, 228, 60, - 357, 293, 328, 304, 267, 160, 266, 128, 55, 151, - 302, 311, 302, 196, 340, 159, 55, 89, 54, 356, - 241, 242, 355, 113, 243, 114, 54, 98, 99, 265, - 112, 101, 256, 104, 88, 230, 232, 234, 235, 236, - 244, 246, 249, 250, 251, 252, 253, 257, 258, 105, - 333, 231, 233, 237, 238, 240, 247, 248, 103, 115, - 109, 254, 255, 324, 150, 218, 110, 264, 111, 270, - 77, 35, 7, 149, 188, 163, 322, 321, 173, 320, - 167, 170, 323, 165, 271, 166, 2, 3, 4, 5, - 263, 101, 194, 104, 180, 184, 197, 187, 186, 319, - 272, 202, 203, 204, 205, 206, 207, 208, 209, 210, - 211, 212, 213, 214, 215, 216, 195, 299, 103, 318, - 217, 36, 298, 1, 190, 219, 220, 317, 160, 160, - 316, 193, 160, 154, 182, 196, 229, 297, 159, 159, - 160, 358, 159, 268, 181, 183, 239, 260, 296, 262, - 159, 315, 245, 129, 314, 55, 225, 313, 161, 228, - 161, 161, 259, 312, 161, 54, 86, 295, 310, 288, - 289, 8, 161, 292, 162, 37, 162, 162, 49, 269, - 162, 241, 242, 309, 179, 243, 180, 127, 162, 126, - 308, 223, 294, 256, 48, 222, 230, 232, 234, 235, - 236, 244, 246, 249, 250, 251, 252, 253, 257, 258, - 221, 169, 231, 233, 237, 238, 240, 247, 248, 157, - 158, 164, 254, 255, 168, 10, 182, 300, 55, 301, - 303, 47, 305, 46, 132, 79, 181, 183, 54, 306, - 307, 45, 134, 135, 136, 137, 138, 139, 140, 141, - 142, 143, 144, 145, 146, 147, 148, 43, 59, 50, - 84, 9, 9, 121, 326, 78, 327, 130, 171, 121, - 83, 42, 131, 119, 335, 336, 337, 331, 185, 119, - 338, 261, 342, 341, 344, 343, 122, 117, 41, 177, - 350, 351, 122, 55, 176, 352, 53, 77, 40, 56, - 125, 354, 22, 54, 84, 124, 172, 175, 51, 57, - 191, 353, 273, 85, 83, 189, 359, 224, 123, 80, - 345, 120, 81, 153, 58, 75, 227, 52, 116, 0, - 0, 18, 19, 0, 0, 20, 0, 0, 0, 0, - 0, 76, 0, 0, 0, 0, 61, 62, 63, 64, - 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, - 0, 0, 0, 13, 0, 0, 0, 24, 0, 30, - 0, 0, 31, 32, 55, 38, 105, 53, 77, 0, - 56, 275, 0, 22, 54, 0, 0, 0, 274, 0, - 57, 0, 278, 279, 277, 284, 286, 283, 285, 280, - 281, 282, 287, 87, 89, 0, 75, 0, 0, 0, - 0, 0, 18, 19, 98, 99, 20, 0, 101, 102, - 104, 88, 76, 0, 0, 0, 0, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 0, 0, 0, 13, 103, 0, 0, 24, 0, - 30, 0, 55, 31, 32, 53, 77, 0, 56, 330, - 0, 22, 54, 0, 0, 0, 0, 0, 57, 0, - 278, 279, 277, 284, 286, 283, 285, 280, 281, 282, - 287, 0, 0, 0, 75, 0, 0, 0, 0, 0, - 18, 19, 0, 0, 20, 0, 0, 0, 17, 77, - 76, 0, 0, 0, 22, 61, 62, 63, 64, 65, - 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, - 0, 0, 13, 0, 0, 0, 24, 0, 30, 0, - 0, 31, 32, 18, 19, 0, 0, 20, 0, 0, - 0, 17, 35, 0, 0, 0, 0, 22, 11, 12, - 14, 15, 16, 21, 23, 25, 26, 27, 28, 29, - 33, 34, 0, 0, 0, 13, 0, 0, 0, 24, - 0, 30, 0, 0, 31, 32, 18, 19, 0, 0, - 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 11, 12, 14, 15, 16, 21, 23, 25, 26, - 27, 28, 29, 33, 34, 105, 0, 0, 13, 0, - 0, 0, 24, 174, 30, 0, 0, 31, 32, 0, - 0, 0, 0, 0, 105, 0, 0, 0, 0, 0, - 0, 0, 87, 89, 90, 0, 91, 92, 93, 94, - 95, 96, 97, 98, 99, 100, 0, 101, 102, 104, - 88, 87, 89, 90, 0, 91, 92, 93, 94, 95, - 96, 97, 98, 99, 100, 0, 101, 102, 104, 88, - 105, 0, 0, 0, 103, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 105, - 0, 0, 0, 103, 0, 0, 0, 87, 89, 90, - 0, 91, 92, 93, 0, 95, 96, 97, 98, 99, - 100, 0, 101, 102, 104, 88, 87, 89, 90, 0, - 91, 92, 0, 0, 95, 96, 0, 98, 99, 100, - 0, 101, 102, 104, 88, 0, 0, 0, 0, 103, + 155, 339, 337, 158, 344, 231, 39, 197, 281, 44, + 296, 295, 84, 120, 82, 181, 109, 108, 351, 352, + 353, 354, 107, 111, 203, 136, 204, 159, 154, 112, + 205, 206, 234, 6, 271, 55, 163, 163, 107, 334, + 333, 307, 244, 275, 309, 54, 162, 162, 250, 363, + 91, 272, 330, 131, 362, 233, 60, 270, 276, 110, + 100, 101, 298, 115, 103, 116, 106, 90, 164, 164, + 114, 265, 113, 361, 277, 307, 360, 246, 247, 338, + 103, 248, 106, 153, 165, 165, 264, 316, 201, 261, + 122, 105, 235, 237, 239, 240, 241, 249, 251, 254, + 255, 256, 257, 258, 262, 263, 273, 105, 236, 238, + 242, 243, 245, 252, 253, 152, 117, 166, 259, 260, + 176, 164, 170, 173, 163, 168, 223, 169, 172, 2, + 3, 4, 5, 107, 162, 199, 111, 165, 187, 202, + 189, 171, 112, 269, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 220, 221, 200, + 89, 91, 113, 222, 123, 193, 268, 329, 224, 225, + 183, 100, 101, 191, 121, 103, 104, 106, 90, 7, + 85, 234, 266, 182, 55, 183, 328, 86, 192, 123, + 83, 244, 122, 267, 54, 132, 190, 250, 188, 121, + 345, 230, 105, 86, 233, 77, 35, 119, 304, 10, + 185, 327, 86, 303, 293, 294, 157, 315, 297, 79, + 184, 186, 326, 163, 274, 185, 246, 247, 302, 325, + 248, 324, 314, 162, 323, 184, 186, 299, 261, 313, + 322, 235, 237, 239, 240, 241, 249, 251, 254, 255, + 256, 257, 258, 262, 263, 164, 321, 236, 238, 242, + 243, 245, 252, 253, 180, 126, 320, 259, 260, 179, + 125, 165, 305, 319, 306, 308, 318, 310, 317, 130, + 88, 129, 178, 124, 311, 312, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, + 151, 195, 160, 161, 50, 163, 36, 167, 198, 331, + 78, 332, 201, 228, 55, 162, 85, 227, 1, 340, + 341, 342, 336, 49, 54, 343, 83, 347, 346, 349, + 348, 48, 226, 47, 81, 355, 356, 164, 55, 86, + 357, 53, 77, 301, 56, 8, 359, 22, 54, 37, + 55, 175, 46, 165, 57, 128, 135, 127, 45, 43, + 54, 364, 300, 59, 133, 174, 9, 9, 42, 134, + 75, 41, 40, 51, 196, 358, 18, 19, 278, 87, + 20, 194, 229, 80, 350, 156, 76, 58, 232, 52, + 118, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 0, 0, 0, 13, 0, + 0, 0, 24, 0, 30, 0, 0, 31, 32, 55, + 38, 0, 53, 77, 0, 56, 280, 0, 22, 54, + 0, 0, 0, 279, 0, 57, 0, 283, 284, 282, + 289, 291, 288, 290, 285, 286, 287, 292, 0, 0, + 0, 75, 0, 0, 0, 0, 0, 18, 19, 0, + 0, 20, 0, 0, 0, 0, 0, 76, 0, 0, + 0, 0, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 0, 0, 0, 13, + 0, 0, 0, 24, 0, 30, 0, 55, 31, 32, + 53, 77, 0, 56, 335, 0, 22, 54, 0, 0, + 0, 0, 0, 57, 0, 283, 284, 282, 289, 291, + 288, 290, 285, 286, 287, 292, 0, 0, 0, 75, + 0, 0, 0, 0, 0, 18, 19, 0, 0, 20, + 0, 0, 0, 17, 77, 76, 0, 0, 0, 22, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 0, 0, 0, 13, 0, 0, + 0, 24, 0, 30, 0, 0, 31, 32, 18, 19, + 0, 0, 20, 0, 0, 0, 17, 35, 0, 0, + 0, 0, 22, 11, 12, 14, 15, 16, 21, 23, + 25, 26, 27, 28, 29, 33, 34, 0, 0, 0, + 13, 0, 0, 0, 24, 0, 30, 0, 0, 31, + 32, 18, 19, 0, 0, 20, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 11, 12, 14, 15, + 16, 21, 23, 25, 26, 27, 28, 29, 33, 34, + 107, 0, 0, 13, 0, 0, 0, 24, 177, 30, + 0, 0, 31, 32, 0, 0, 0, 0, 0, 107, + 0, 0, 0, 0, 0, 0, 0, 89, 91, 92, + 0, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 0, 103, 104, 106, 90, 89, 91, 92, 0, + 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, + 0, 103, 104, 106, 90, 107, 0, 0, 0, 105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 103, + 0, 0, 0, 0, 107, 0, 0, 0, 105, 0, + 0, 0, 89, 91, 92, 0, 93, 94, 95, 0, + 97, 98, 99, 100, 101, 102, 0, 103, 104, 106, + 90, 89, 91, 92, 0, 93, 94, 0, 0, 97, + 98, 0, 100, 101, 102, 0, 103, 104, 106, 90, + 0, 0, 0, 0, 105, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 105, } var yyPact = [...]int16{ - 28, 102, 569, 569, 405, 526, -1000, -1000, -1000, 98, + 31, 169, 574, 574, 410, 531, -1000, -1000, -1000, 193, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 342, -1000, 204, -1000, 650, + -1000, -1000, -1000, -1000, -1000, 314, -1000, 278, -1000, 655, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 21, 93, -1000, -1000, 483, -1000, 483, 97, + -1000, -1000, 57, 147, -1000, -1000, 488, -1000, 488, 192, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 307, -1000, -1000, - 338, -1000, -1000, 225, -1000, 23, -1000, -44, -44, -44, - -44, -44, -44, -44, -44, -44, -44, -44, -44, -44, - -44, -44, -44, 47, 171, 259, 93, -57, -1000, 249, - 249, 324, -1000, 631, 75, -1000, 327, -1000, -1000, 222, - 130, -1000, -1000, -1000, 298, -1000, 112, -1000, 159, 483, - -1000, -58, -48, -1000, 483, 483, 483, 483, 483, 483, - 483, 483, 483, 483, 483, 483, 483, 483, 483, -1000, - 39, -1000, -1000, 90, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 36, 36, 229, -1000, -1000, -1000, -1000, 174, -1000, - -1000, 180, -1000, 650, -1000, -1000, 301, -1000, 105, -1000, - -1000, -1000, -1000, -1000, 44, -1000, -1000, -1000, -1000, -1000, - 18, 157, 83, -1000, -1000, -1000, 404, 15, 249, 249, - 249, 249, 75, 75, 402, 402, 402, 715, 696, 402, - 402, 715, 75, 75, 402, 75, 15, -1000, 19, -1000, - -1000, -1000, 186, -1000, 155, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 187, -1000, -1000, + 263, -1000, -1000, 353, 277, -1000, -1000, 29, -1000, -53, + -53, -53, -53, -53, -53, -53, -53, -53, -53, -53, + -53, -53, -53, -53, -53, 26, 214, 305, 147, -56, + -1000, 126, 126, 329, -1000, 636, 24, -1000, 262, -1000, + -1000, 181, 166, -1000, -1000, 178, -1000, 171, -1000, 163, + -1000, 296, 488, -1000, -58, -50, -1000, 488, 488, 488, + 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, + 488, 488, -1000, 175, -1000, -1000, 111, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 115, 115, 311, -1000, -1000, -1000, + -1000, 179, -1000, -1000, 64, -1000, 655, -1000, -1000, 162, + -1000, 141, -1000, -1000, -1000, -1000, -1000, 32, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 25, 80, 17, -1000, -1000, + -1000, 409, 8, 126, 126, 126, 126, 24, 24, 119, + 119, 119, 720, 701, 119, 119, 720, 24, 24, 119, + 24, 8, -1000, 40, -1000, -1000, -1000, 341, -1000, 206, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 483, -1000, -1000, -1000, -1000, -1000, -1000, 31, 31, 17, - 31, 37, 37, 206, 34, -1000, -1000, 197, 191, 188, - 185, 164, 161, 153, 133, 113, 111, 110, -1000, -1000, - -1000, -1000, -1000, -1000, 101, -1000, -1000, -1000, 13, -1000, - 650, -1000, -1000, -1000, 31, -1000, 16, 11, 482, -1000, - -1000, -1000, 33, 163, 163, 163, 36, 40, 40, 33, - 40, 33, -74, -1000, -1000, -1000, -1000, -1000, 31, 31, - -1000, -1000, -1000, 31, -1000, -1000, -1000, -1000, -1000, -1000, - 163, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 38, -1000, 160, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 488, -1000, -1000, -1000, -1000, + -1000, -1000, 56, 56, 18, 56, 72, 72, 215, 70, + -1000, -1000, 272, 270, 267, 260, 250, 234, 228, 225, + 223, 216, 205, -1000, -1000, -1000, -1000, -1000, -1000, 165, + -1000, -1000, -1000, 30, -1000, 655, -1000, -1000, -1000, 56, + -1000, 14, 13, 487, -1000, -1000, -1000, 22, 27, 27, + 27, 115, 186, 186, 22, 186, 22, -74, -1000, -1000, + -1000, -1000, -1000, 56, 56, -1000, -1000, -1000, 56, -1000, + -1000, -1000, -1000, -1000, -1000, 27, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 52, -1000, + 28, -1000, -1000, -1000, -1000, } var yyPgo = [...]int16{ - 0, 368, 12, 367, 5, 14, 366, 298, 364, 363, - 361, 360, 265, 211, 359, 13, 357, 10, 11, 355, - 353, 7, 352, 8, 4, 351, 2, 1, 3, 350, - 27, 0, 348, 338, 17, 193, 328, 312, 6, 311, - 308, 16, 307, 39, 297, 9, 281, 274, 273, 271, - 234, 218, 299, 163, 161, + 0, 390, 13, 389, 5, 15, 388, 363, 387, 385, + 12, 384, 209, 345, 383, 14, 382, 10, 11, 381, + 379, 7, 378, 8, 4, 375, 2, 1, 3, 374, + 27, 0, 373, 372, 17, 195, 371, 369, 6, 368, + 365, 16, 364, 56, 359, 9, 358, 356, 352, 333, + 331, 323, 304, 318, 306, } var yyR1 = [...]int8{ @@ -554,18 +556,18 @@ var yyR1 = [...]int8{ 13, 13, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 12, 12, 12, 12, - 14, 14, 14, 15, 15, 15, 15, 54, 20, 20, - 20, 20, 19, 19, 19, 19, 19, 19, 19, 19, - 19, 29, 29, 29, 21, 21, 21, 21, 22, 22, - 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, - 23, 23, 24, 24, 25, 25, 25, 11, 11, 11, - 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, + 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, + 54, 20, 20, 20, 20, 19, 19, 19, 19, 19, + 19, 19, 19, 19, 29, 29, 29, 21, 21, 21, + 21, 22, 22, 22, 23, 23, 23, 23, 23, 23, + 23, 23, 23, 23, 23, 24, 24, 25, 25, 25, + 11, 11, 11, 11, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 8, 8, 5, 5, 5, 5, - 45, 45, 28, 28, 30, 30, 31, 31, 27, 26, - 26, 49, 10, 18, 18, + 6, 6, 6, 6, 6, 6, 6, 8, 8, 5, + 5, 5, 5, 45, 45, 28, 28, 30, 30, 31, + 31, 27, 26, 26, 49, 10, 18, 18, } var yyR2 = [...]int8{ @@ -582,18 +584,18 @@ var yyR2 = [...]int8{ 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 4, 2, 0, - 3, 1, 2, 3, 3, 2, 1, 2, 0, 3, - 2, 1, 1, 3, 1, 3, 4, 1, 3, 5, - 5, 1, 1, 1, 4, 3, 3, 2, 3, 1, - 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 4, 3, 3, 1, 2, 1, 1, 1, + 3, 1, 2, 3, 3, 3, 3, 2, 2, 1, + 2, 0, 3, 2, 1, 1, 3, 1, 3, 4, + 1, 3, 5, 5, 1, 1, 1, 4, 3, 3, + 2, 3, 1, 2, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 4, 3, 3, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, - 1, 1, 1, 0, 1, + 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, + 1, 1, 2, 1, 1, 1, 0, 1, } var yyChk = [...]int16{ @@ -605,34 +607,35 @@ var yyChk = [...]int16{ -52, -32, -3, 12, 19, 9, 15, 25, -8, -7, -43, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 41, 57, 13, -52, -12, - -14, 20, -15, 12, 2, -20, 2, 41, 59, 42, - 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 54, 56, 57, 83, 58, 14, -34, -41, 2, 79, - 85, 15, -41, -38, -38, -43, -1, 20, -2, 12, - -10, 2, 25, 20, 7, 2, 4, 2, 24, -35, - -42, -37, -47, 78, -35, -35, -35, -35, -35, -35, - -35, -35, -35, -35, -35, -35, -35, -35, -35, -45, - 57, 2, -31, -9, 2, -28, -30, 88, 89, 19, - 9, 41, 57, -45, 2, -41, -34, -17, 15, 2, - -17, -40, 22, -38, 22, 20, 7, 2, -5, 2, - 4, 54, 44, 55, -5, 20, -15, 25, 2, -19, - 5, -29, -21, 12, -28, -30, 16, -38, 82, 84, - 80, 81, -38, -38, -38, -38, -38, -38, -38, -38, - -38, -38, -38, -38, -38, -38, -38, -45, 15, -28, - -28, 21, 6, 2, -16, 22, -4, -6, 25, 2, - 62, 78, 63, 79, 64, 65, 66, 80, 81, 12, - 82, 47, 48, 51, 67, 18, 68, 83, 84, 69, - 70, 71, 72, 73, 88, 89, 59, 74, 75, 22, - 7, 20, -2, 25, 2, 25, 2, 26, 26, -30, - 26, 41, 57, -22, 24, 17, -23, 30, 28, 29, - 35, 36, 37, 33, 31, 34, 32, 38, -17, -17, - -18, -17, -18, 22, -45, 21, 2, 22, 7, 2, - -38, -27, 19, -27, 26, -27, -21, -21, 24, 17, - 2, 17, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 21, 2, 22, -4, -27, 26, 26, - 17, -23, -26, 57, -27, -31, -31, -31, -28, -24, - 14, -24, -26, -24, -26, -11, 92, 93, 94, 95, - -27, -27, -27, -25, -31, 24, 21, 2, 21, -31, + -14, 20, -15, 12, -10, 2, 25, -20, 2, 41, + 59, 42, 43, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 56, 57, 83, 58, 14, -34, -41, + 2, 79, 85, 15, -41, -38, -38, -43, -1, 20, + -2, 12, -10, 2, 20, 7, 2, 4, 2, 4, + 2, 24, -35, -42, -37, -47, 78, -35, -35, -35, + -35, -35, -35, -35, -35, -35, -35, -35, -35, -35, + -35, -35, -45, 57, 2, -31, -9, 2, -28, -30, + 88, 89, 19, 9, 41, 57, -45, 2, -41, -34, + -17, 15, 2, -17, -40, 22, -38, 22, 20, 7, + 2, -5, 2, 4, 54, 44, 55, -5, 20, -15, + 25, 2, 25, 2, -19, 5, -29, -21, 12, -28, + -30, 16, -38, 82, 84, 80, 81, -38, -38, -38, + -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, + -38, -38, -45, 15, -28, -28, 21, 6, 2, -16, + 22, -4, -6, 25, 2, 62, 78, 63, 79, 64, + 65, 66, 80, 81, 12, 82, 47, 48, 51, 67, + 18, 68, 83, 84, 69, 70, 71, 72, 73, 88, + 89, 59, 74, 75, 22, 7, 20, -2, 25, 2, + 25, 2, 26, 26, -30, 26, 41, 57, -22, 24, + 17, -23, 30, 28, 29, 35, 36, 37, 33, 31, + 34, 32, 38, -17, -17, -18, -17, -18, 22, -45, + 21, 2, 22, 7, 2, -38, -27, 19, -27, 26, + -27, -21, -21, 24, 17, 2, 17, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 21, 2, + 22, -4, -27, 26, 26, 17, -23, -26, 57, -27, + -31, -31, -31, -28, -24, 14, -24, -26, -24, -26, + -11, 92, 93, 94, 95, -27, -27, -27, -25, -31, + 24, 21, 2, 21, -31, } var yyDef = [...]int16{ @@ -641,37 +644,38 @@ var yyDef = [...]int16{ 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 0, 2, -2, 3, 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 0, 108, 230, 231, 0, 241, 0, 85, + 18, 19, 0, 108, 233, 234, 0, 244, 0, 85, 86, -2, -2, -2, -2, -2, -2, -2, -2, -2, - -2, -2, -2, -2, -2, 224, 225, 0, 5, 100, - 0, 128, 131, 0, 136, 137, 141, 43, 43, 43, + -2, -2, -2, -2, -2, 227, 228, 0, 5, 100, + 0, 128, 131, 0, 0, 139, 245, 140, 144, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, - 43, 43, 43, 0, 0, 0, 0, 22, 23, 0, - 0, 0, 61, 0, 83, 84, 0, 89, 91, 0, - 95, 99, 242, 126, 0, 132, 0, 135, 140, 0, - 42, 47, 48, 44, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 68, - 0, 70, 71, 0, 73, 236, 237, 74, 75, 232, - 233, 0, 0, 0, 82, 20, 21, 24, 0, 54, - 25, 0, 63, 65, 67, 87, 0, 92, 0, 98, - 226, 227, 228, 229, 0, 127, 130, 133, 134, 139, - 142, 144, 147, 151, 152, 153, 0, 26, 0, 0, - -2, -2, 27, 28, 29, 30, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 69, 0, 234, - 235, 76, 0, 81, 0, 53, 56, 58, 59, 60, - 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, - 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, - 215, 216, 217, 218, 219, 220, 221, 222, 223, 62, - 66, 88, 90, 93, 97, 94, 96, 0, 0, 0, - 0, 0, 0, 0, 0, 157, 159, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 45, 46, - 49, 244, 50, 72, 0, 78, 80, 51, 0, 57, - 64, 143, 238, 145, 0, 148, 0, 0, 0, 155, - 160, 156, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 77, 79, 52, 55, 146, 0, 0, - 154, 158, 161, 0, 240, 162, 163, 164, 165, 166, - 0, 167, 168, 169, 170, 171, 177, 178, 179, 180, - 149, 150, 239, 0, 175, 0, 173, 176, 172, 174, + 43, 43, 43, 43, 43, 0, 0, 0, 0, 22, + 23, 0, 0, 0, 61, 0, 83, 84, 0, 89, + 91, 0, 95, 99, 126, 0, 132, 0, 137, 0, + 138, 143, 0, 42, 47, 48, 44, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 68, 0, 70, 71, 0, 73, 239, 240, + 74, 75, 235, 236, 0, 0, 0, 82, 20, 21, + 24, 0, 54, 25, 0, 63, 65, 67, 87, 0, + 92, 0, 98, 229, 230, 231, 232, 0, 127, 130, + 133, 135, 134, 136, 142, 145, 147, 150, 154, 155, + 156, 0, 26, 0, 0, -2, -2, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 69, 0, 237, 238, 76, 0, 81, 0, + 53, 56, 58, 59, 60, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, 225, 226, 62, 66, 88, 90, 93, 97, + 94, 96, 0, 0, 0, 0, 0, 0, 0, 0, + 160, 162, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 45, 46, 49, 247, 50, 72, 0, + 78, 80, 51, 0, 57, 64, 146, 241, 148, 0, + 151, 0, 0, 0, 158, 163, 159, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 77, 79, + 52, 55, 149, 0, 0, 157, 161, 164, 0, 243, + 165, 166, 167, 168, 169, 0, 170, 171, 172, 173, + 174, 180, 181, 182, 183, 152, 153, 242, 0, 178, + 0, 176, 179, 175, 177, } var yyTok1 = [...]int8{ @@ -1614,24 +1618,41 @@ yydefault: yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } case 134: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} + } + case 135: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 135: + case 136: + yyDollar = yyS[yypt-3 : yypt+1] + { + yylex.(*parser).unexpected("label set", "string") + yyVAL.label = labels.Label{} + } + case 137: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } - case 136: + case 138: + yyDollar = yyS[yypt-2 : yypt+1] + { + yylex.(*parser).unexpected("label set", "\"=\"") + yyVAL.label = labels.Label{} + } + case 139: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } - case 137: + case 140: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).generatedParserResult = &seriesDescription{ @@ -1639,33 +1660,33 @@ yydefault: values: yyDollar[2].series, } } - case 138: + case 141: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.series = []SequenceValue{} } - case 139: + case 142: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } - case 140: + case 143: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.series = yyDollar[1].series } - case 141: + case 144: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } - case 142: + case 145: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Omitted: true}} } - case 143: + case 146: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1673,12 +1694,12 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true}) } } - case 144: + case 147: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } - case 145: + case 148: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1687,7 +1708,7 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float}) } } - case 146: + case 149: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1697,12 +1718,12 @@ yydefault: yyDollar[1].float += yyDollar[2].float } } - case 147: + case 150: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}} } - case 148: + case 151: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1712,7 +1733,7 @@ yydefault: //$1 += $2 } } - case 149: + case 152: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1721,7 +1742,7 @@ yydefault: } yyVAL.series = val } - case 150: + case 153: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1730,7 +1751,7 @@ yydefault: } yyVAL.series = val } - case 151: + case 154: yyDollar = yyS[yypt-1 : yypt+1] { if yyDollar[1].item.Val != "stale" { @@ -1738,130 +1759,130 @@ yydefault: } yyVAL.float = math.Float64frombits(value.StaleNaN) } - case 154: + case 157: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } - case 155: + case 158: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } - case 156: - yyDollar = yyS[yypt-3 : yypt+1] - { - m := yylex.(*parser).newMap() - yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) - } - case 157: - yyDollar = yyS[yypt-2 : yypt+1] - { - m := yylex.(*parser).newMap() - yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) - } - case 158: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors)) - } case 159: - yyDollar = yyS[yypt-1 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.descriptors = yyDollar[1].descriptors + m := yylex.(*parser).newMap() + yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } case 160: yyDollar = yyS[yypt-2 : yypt+1] { - yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]") + m := yylex.(*parser).newMap() + yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } case 161: yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["schema"] = yyDollar[3].int + yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors)) } case 162: - yyDollar = yyS[yypt-3 : yypt+1] + yyDollar = yyS[yypt-1 : yypt+1] { - yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["sum"] = yyDollar[3].float + yyVAL.descriptors = yyDollar[1].descriptors } case 163: - yyDollar = yyS[yypt-3 : yypt+1] + yyDollar = yyS[yypt-2 : yypt+1] { - yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["count"] = yyDollar[3].float + yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]") } case 164: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["z_bucket"] = yyDollar[3].float + yyVAL.descriptors["schema"] = yyDollar[3].int } case 165: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float + yyVAL.descriptors["sum"] = yyDollar[3].float } case 166: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set + yyVAL.descriptors["count"] = yyDollar[3].float } case 167: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set + yyVAL.descriptors["z_bucket"] = yyDollar[3].float } case 168: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["offset"] = yyDollar[3].int + yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float } case 169: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set + yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set } case 170: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["n_offset"] = yyDollar[3].int + yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set } case 171: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item + yyVAL.descriptors["offset"] = yyDollar[3].int } case 172: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = yyDollar[2].bucket_set + yyVAL.descriptors = yylex.(*parser).newMap() + yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set } case 173: yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = yyDollar[2].bucket_set + yyVAL.descriptors = yylex.(*parser).newMap() + yyVAL.descriptors["n_offset"] = yyDollar[3].int } case 174: yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) + yyVAL.descriptors = yylex.(*parser).newMap() + yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item } case 175: + yyDollar = yyS[yypt-4 : yypt+1] + { + yyVAL.bucket_set = yyDollar[2].bucket_set + } + case 176: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.bucket_set = yyDollar[2].bucket_set + } + case 177: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) + } + case 178: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.bucket_set = []float64{yyDollar[1].float} } - case 230: + case 233: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &NumberLiteral{ @@ -1869,7 +1890,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 231: + case 234: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1883,12 +1904,12 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 232: + case 235: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } - case 233: + case 236: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1899,17 +1920,17 @@ yydefault: } yyVAL.float = dur.Seconds() } - case 234: + case 237: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = yyDollar[2].float } - case 235: + case 238: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = -yyDollar[2].float } - case 238: + case 241: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1918,17 +1939,17 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 239: + case 242: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.int = -int64(yyDollar[2].uint) } - case 240: + case 243: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.int = int64(yyDollar[1].uint) } - case 241: + case 244: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &StringLiteral{ @@ -1936,7 +1957,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 242: + case 245: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.item = Item{ @@ -1945,7 +1966,7 @@ yydefault: Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), } } - case 243: + case 246: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.strings = nil diff --git a/web/api/v1/api.go b/web/api/v1/api.go index b37605f5d5..c61e7984fb 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -744,7 +744,8 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { ctx := r.Context() name := route.Param(ctx, "name") - if !model.LabelNameRE.MatchString(name) { + label := model.LabelName(name) + if !label.IsValid() { return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("invalid label name: %q", name)}, nil, nil} } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 35ad4a9ad3..4cf5ec36ec 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -387,6 +387,7 @@ func TestEndpoints(t *testing.T) { test_metric4{foo="bar", dup="1"} 1+0x100 test_metric4{foo="boo", dup="1"} 1+0x100 test_metric4{foo="boo"} 1+0x100 + test_metric5{"host.name"="localhost"} 1+0x100 `) t.Cleanup(func() { storage.Close() }) @@ -1117,6 +1118,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata []targetMetadata exemplars []exemplar.QueryResult zeroFunc func(interface{}) + nameValidationScheme model.ValidationScheme } rulesZeroFunc := func(i interface{}) { @@ -2996,6 +2998,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "test_metric2", "test_metric3", "test_metric4", + "test_metric5", }, }, { @@ -3008,13 +3011,25 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "boo", }, }, - // Bad name parameter. + // Bad name parameter for legacy validation. { endpoint: api.labelValues, params: map[string]string{ - "name": "not!!!allowed", + "name": "host.name", + }, + nameValidationScheme: model.LegacyValidation, + errType: errorBadData, + }, + // Valid utf8 name parameter for utf8 validation. + { + endpoint: api.labelValues, + params: map[string]string{ + "name": "host.name", + }, + nameValidationScheme: model.UTF8Validation, + response: []string{ + "localhost", }, - errType: errorBadData, }, // Start and end before LabelValues starts. { @@ -3250,15 +3265,15 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "name": "__name__", }, query: url.Values{ - "limit": []string{"4"}, + "limit": []string{"5"}, }, - responseLen: 4, // API does not specify which particular values will come back. + responseLen: 5, // API does not specify which particular values will come back. warningsCount: 0, // No warnings if limit isn't exceeded. }, // Label names. { endpoint: api.labelNames, - response: []string{"__name__", "dup", "foo"}, + response: []string{"__name__", "dup", "foo", "host.name"}, }, // Start and end before Label names starts. { @@ -3276,7 +3291,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "start": []string{"1"}, "end": []string{"100"}, }, - response: []string{"__name__", "dup", "foo"}, + response: []string{"__name__", "dup", "foo", "host.name"}, }, // Start before Label names, end within Label names. { @@ -3285,7 +3300,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "start": []string{"-1"}, "end": []string{"10"}, }, - response: []string{"__name__", "dup", "foo"}, + response: []string{"__name__", "dup", "foo", "host.name"}, }, // Start before Label names starts, end after Label names ends. @@ -3295,7 +3310,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "start": []string{"-1"}, "end": []string{"100000"}, }, - response: []string{"__name__", "dup", "foo"}, + response: []string{"__name__", "dup", "foo", "host.name"}, }, // Start with bad data for Label names, end within Label names. { @@ -3313,7 +3328,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "start": []string{"1"}, "end": []string{"1000000006"}, }, - response: []string{"__name__", "dup", "foo"}, + response: []string{"__name__", "dup", "foo", "host.name"}, }, // Start and end after Label names ends. { @@ -3330,7 +3345,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E query: url.Values{ "start": []string{"4"}, }, - response: []string{"__name__", "dup", "foo"}, + response: []string{"__name__", "dup", "foo", "host.name"}, }, // Only provide End within Label names, don't provide a start time. { @@ -3338,7 +3353,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E query: url.Values{ "end": []string{"20"}, }, - response: []string{"__name__", "dup", "foo"}, + response: []string{"__name__", "dup", "foo", "host.name"}, }, // Label names with bad matchers. { @@ -3406,9 +3421,9 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E { endpoint: api.labelNames, query: url.Values{ - "limit": []string{"3"}, + "limit": []string{"4"}, }, - responseLen: 3, // API does not specify which particular values will come back. + responseLen: 4, // API does not specify which particular values will come back. warningsCount: 0, // No warnings if limit isn't exceeded. }, }...) @@ -3444,6 +3459,8 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E ctx = route.WithParam(ctx, p, v) } + model.NameValidationScheme = test.nameValidationScheme + req, err := request(method, test.query) require.NoError(t, err) From a778a58bdf12d08fe5bbaab86a128b31f2c6e100 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Nov 2024 05:20:09 +0000 Subject: [PATCH 0966/1397] chore(deps-dev): bump rollup from 4.21.2 to 4.27.4 in /web/ui Bumps [rollup](https://github.com/rollup/rollup) from 4.21.2 to 4.27.4. - [Release notes](https://github.com/rollup/rollup/releases) - [Changelog](https://github.com/rollup/rollup/blob/master/CHANGELOG.md) - [Commits](https://github.com/rollup/rollup/compare/v4.21.2...v4.27.4) --- updated-dependencies: - dependency-name: rollup dependency-type: indirect ... Signed-off-by: dependabot[bot] --- web/ui/package-lock.json | 196 ++++++++++++++++++++------------------- 1 file changed, 100 insertions(+), 96 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index f5ae7642b0..8a0df78d5b 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -85,12 +85,6 @@ "vitest": "^2.1.1" } }, - "mantine-ui/node_modules/@types/estree": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", - "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", - "dev": true - }, "mantine-ui/node_modules/eslint": { "version": "9.11.1", "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.11.1.tgz", @@ -2442,224 +2436,234 @@ "license": "MIT" }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.21.2.tgz", - "integrity": "sha512-fSuPrt0ZO8uXeS+xP3b+yYTCBUd05MoSp2N/MFOgjhhUhMmchXlpTQrTpI8T+YAwAQuK7MafsCOxW7VrPMrJcg==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.27.4.tgz", + "integrity": "sha512-2Y3JT6f5MrQkICUyRVCw4oa0sutfAsgaSsb0Lmmy1Wi2y7X5vT9Euqw4gOsCyy0YfKURBg35nhUKZS4mDcfULw==", "cpu": [ "arm" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "android" ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.21.2.tgz", - "integrity": "sha512-xGU5ZQmPlsjQS6tzTTGwMsnKUtu0WVbl0hYpTPauvbRAnmIvpInhJtgjj3mcuJpEiuUw4v1s4BimkdfDWlh7gA==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.27.4.tgz", + "integrity": "sha512-wzKRQXISyi9UdCVRqEd0H4cMpzvHYt1f/C3CoIjES6cG++RHKhrBj2+29nPF0IB5kpy9MS71vs07fvrNGAl/iA==", "cpu": [ "arm64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "android" ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.21.2.tgz", - "integrity": "sha512-99AhQ3/ZMxU7jw34Sq8brzXqWH/bMnf7ZVhvLk9QU2cOepbQSVTns6qoErJmSiAvU3InRqC2RRZ5ovh1KN0d0Q==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.27.4.tgz", + "integrity": "sha512-PlNiRQapift4LNS8DPUHuDX/IdXiLjf8mc5vdEmUR0fF/pyy2qWwzdLjB+iZquGr8LuN4LnUoSEvKRwjSVYz3Q==", "cpu": [ "arm64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "darwin" ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.21.2.tgz", - "integrity": "sha512-ZbRaUvw2iN/y37x6dY50D8m2BnDbBjlnMPotDi/qITMJ4sIxNY33HArjikDyakhSv0+ybdUxhWxE6kTI4oX26w==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.27.4.tgz", + "integrity": "sha512-o9bH2dbdgBDJaXWJCDTNDYa171ACUdzpxSZt+u/AAeQ20Nk5x+IhA+zsGmrQtpkLiumRJEYef68gcpn2ooXhSQ==", "cpu": [ "x64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "darwin" ] }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.27.4.tgz", + "integrity": "sha512-NBI2/i2hT9Q+HySSHTBh52da7isru4aAAo6qC3I7QFVsuhxi2gM8t/EI9EVcILiHLj1vfi+VGGPaLOUENn7pmw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.27.4.tgz", + "integrity": "sha512-wYcC5ycW2zvqtDYrE7deary2P2UFmSh85PUpAx+dwTCO9uw3sgzD6Gv9n5X4vLaQKsrfTSZZ7Z7uynQozPVvWA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ] + }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.21.2.tgz", - "integrity": "sha512-ztRJJMiE8nnU1YFcdbd9BcH6bGWG1z+jP+IPW2oDUAPxPjo9dverIOyXz76m6IPA6udEL12reYeLojzW2cYL7w==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.27.4.tgz", + "integrity": "sha512-9OwUnK/xKw6DyRlgx8UizeqRFOfi9mf5TYCw1uolDaJSbUmBxP85DE6T4ouCMoN6pXw8ZoTeZCSEfSaYo+/s1w==", "cpu": [ "arm" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.21.2.tgz", - "integrity": "sha512-flOcGHDZajGKYpLV0JNc0VFH361M7rnV1ee+NTeC/BQQ1/0pllYcFmxpagltANYt8FYf9+kL6RSk80Ziwyhr7w==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.27.4.tgz", + "integrity": "sha512-Vgdo4fpuphS9V24WOV+KwkCVJ72u7idTgQaBoLRD0UxBAWTF9GWurJO9YD9yh00BzbkhpeXtm6na+MvJU7Z73A==", "cpu": [ "arm" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.21.2.tgz", - "integrity": "sha512-69CF19Kp3TdMopyteO/LJbWufOzqqXzkrv4L2sP8kfMaAQ6iwky7NoXTp7bD6/irKgknDKM0P9E/1l5XxVQAhw==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.27.4.tgz", + "integrity": "sha512-pleyNgyd1kkBkw2kOqlBx+0atfIIkkExOTiifoODo6qKDSpnc6WzUY5RhHdmTdIJXBdSnh6JknnYTtmQyobrVg==", "cpu": [ "arm64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.21.2.tgz", - "integrity": "sha512-48pD/fJkTiHAZTnZwR0VzHrao70/4MlzJrq0ZsILjLW/Ab/1XlVUStYyGt7tdyIiVSlGZbnliqmult/QGA2O2w==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.27.4.tgz", + "integrity": "sha512-caluiUXvUuVyCHr5DxL8ohaaFFzPGmgmMvwmqAITMpV/Q+tPoaHZ/PWa3t8B2WyoRcIIuu1hkaW5KkeTDNSnMA==", "cpu": [ "arm64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" ] }, "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.21.2.tgz", - "integrity": "sha512-cZdyuInj0ofc7mAQpKcPR2a2iu4YM4FQfuUzCVA2u4HI95lCwzjoPtdWjdpDKyHxI0UO82bLDoOaLfpZ/wviyQ==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.27.4.tgz", + "integrity": "sha512-FScrpHrO60hARyHh7s1zHE97u0KlT/RECzCKAdmI+LEoC1eDh/RDji9JgFqyO+wPDb86Oa/sXkily1+oi4FzJQ==", "cpu": [ "ppc64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.21.2.tgz", - "integrity": "sha512-RL56JMT6NwQ0lXIQmMIWr1SW28z4E4pOhRRNqwWZeXpRlykRIlEpSWdsgNWJbYBEWD84eocjSGDu/XxbYeCmwg==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.27.4.tgz", + "integrity": "sha512-qyyprhyGb7+RBfMPeww9FlHwKkCXdKHeGgSqmIXw9VSUtvyFZ6WZRtnxgbuz76FK7LyoN8t/eINRbPUcvXB5fw==", "cpu": [ "riscv64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.21.2.tgz", - "integrity": "sha512-PMxkrWS9z38bCr3rWvDFVGD6sFeZJw4iQlhrup7ReGmfn7Oukrr/zweLhYX6v2/8J6Cep9IEA/SmjXjCmSbrMQ==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.27.4.tgz", + "integrity": "sha512-PFz+y2kb6tbh7m3A7nA9++eInGcDVZUACulf/KzDtovvdTizHpZaJty7Gp0lFwSQcrnebHOqxF1MaKZd7psVRg==", "cpu": [ "s390x" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.21.2.tgz", - "integrity": "sha512-B90tYAUoLhU22olrafY3JQCFLnT3NglazdwkHyxNDYF/zAxJt5fJUB/yBoWFoIQ7SQj+KLe3iL4BhOMa9fzgpw==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.27.4.tgz", + "integrity": "sha512-Ni8mMtfo+o/G7DVtweXXV/Ol2TFf63KYjTtoZ5f078AUgJTmaIJnj4JFU7TK/9SVWTaSJGxPi5zMDgK4w+Ez7Q==", "cpu": [ "x64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.21.2.tgz", - "integrity": "sha512-7twFizNXudESmC9oneLGIUmoHiiLppz/Xs5uJQ4ShvE6234K0VB1/aJYU3f/4g7PhssLGKBVCC37uRkkOi8wjg==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.27.4.tgz", + "integrity": "sha512-5AeeAF1PB9TUzD+3cROzFTnAJAcVUGLuR8ng0E0WXGkYhp6RD6L+6szYVX+64Rs0r72019KHZS1ka1q+zU/wUw==", "cpu": [ "x64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.21.2.tgz", - "integrity": "sha512-9rRero0E7qTeYf6+rFh3AErTNU1VCQg2mn7CQcI44vNUWM9Ze7MSRS/9RFuSsox+vstRt97+x3sOhEey024FRQ==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.27.4.tgz", + "integrity": "sha512-yOpVsA4K5qVwu2CaS3hHxluWIK5HQTjNV4tWjQXluMiiiu4pJj4BN98CvxohNCpcjMeTXk/ZMJBRbgRg8HBB6A==", "cpu": [ "arm64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "win32" ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.21.2.tgz", - "integrity": "sha512-5rA4vjlqgrpbFVVHX3qkrCo/fZTj1q0Xxpg+Z7yIo3J2AilW7t2+n6Q8Jrx+4MrYpAnjttTYF8rr7bP46BPzRw==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.27.4.tgz", + "integrity": "sha512-KtwEJOaHAVJlxV92rNYiG9JQwQAdhBlrjNRp7P9L8Cb4Rer3in+0A+IPhJC9y68WAi9H0sX4AiG2NTsVlmqJeQ==", "cpu": [ "ia32" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "win32" ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.21.2.tgz", - "integrity": "sha512-6UUxd0+SKomjdzuAcp+HAmxw1FlGBnl1v2yEPSabtx4lBfdXHDVsW7+lQkgz9cNFJGY3AWR7+V8P5BqkD9L9nA==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.27.4.tgz", + "integrity": "sha512-3j4jx1TppORdTAoBJRd+/wJRGCPC0ETWkXOecJ6PPZLj6SptXkrXcNqdj0oclbKML6FkQltdz7bBA3rUSirZug==", "cpu": [ "x64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "win32" @@ -2881,11 +2885,10 @@ } }, "node_modules/@types/estree": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", - "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==", - "dev": true, - "license": "MIT" + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", + "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", + "dev": true }, "node_modules/@types/graceful-fs": { "version": "4.1.9", @@ -8306,13 +8309,12 @@ } }, "node_modules/rollup": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.21.2.tgz", - "integrity": "sha512-e3TapAgYf9xjdLvKQCkQTnbTKd4a6jwlpQSJJFokHGaX2IVjoEqkIIhiQfqsi0cdwlOD+tQGuOd5AJkc5RngBw==", + "version": "4.27.4", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.27.4.tgz", + "integrity": "sha512-RLKxqHEMjh/RGLsDxAEsaLO3mWgyoU6x9w6n1ikAzet4B3gI2/3yP6PWY2p9QzRTh6MfEIXB3MwsOY0Iv3vNrw==", "dev": true, - "license": "MIT", "dependencies": { - "@types/estree": "1.0.5" + "@types/estree": "1.0.6" }, "bin": { "rollup": "dist/bin/rollup" @@ -8322,22 +8324,24 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.21.2", - "@rollup/rollup-android-arm64": "4.21.2", - "@rollup/rollup-darwin-arm64": "4.21.2", - "@rollup/rollup-darwin-x64": "4.21.2", - "@rollup/rollup-linux-arm-gnueabihf": "4.21.2", - "@rollup/rollup-linux-arm-musleabihf": "4.21.2", - "@rollup/rollup-linux-arm64-gnu": "4.21.2", - "@rollup/rollup-linux-arm64-musl": "4.21.2", - "@rollup/rollup-linux-powerpc64le-gnu": "4.21.2", - "@rollup/rollup-linux-riscv64-gnu": "4.21.2", - "@rollup/rollup-linux-s390x-gnu": "4.21.2", - "@rollup/rollup-linux-x64-gnu": "4.21.2", - "@rollup/rollup-linux-x64-musl": "4.21.2", - "@rollup/rollup-win32-arm64-msvc": "4.21.2", - "@rollup/rollup-win32-ia32-msvc": "4.21.2", - "@rollup/rollup-win32-x64-msvc": "4.21.2", + "@rollup/rollup-android-arm-eabi": "4.27.4", + "@rollup/rollup-android-arm64": "4.27.4", + "@rollup/rollup-darwin-arm64": "4.27.4", + "@rollup/rollup-darwin-x64": "4.27.4", + "@rollup/rollup-freebsd-arm64": "4.27.4", + "@rollup/rollup-freebsd-x64": "4.27.4", + "@rollup/rollup-linux-arm-gnueabihf": "4.27.4", + "@rollup/rollup-linux-arm-musleabihf": "4.27.4", + "@rollup/rollup-linux-arm64-gnu": "4.27.4", + "@rollup/rollup-linux-arm64-musl": "4.27.4", + "@rollup/rollup-linux-powerpc64le-gnu": "4.27.4", + "@rollup/rollup-linux-riscv64-gnu": "4.27.4", + "@rollup/rollup-linux-s390x-gnu": "4.27.4", + "@rollup/rollup-linux-x64-gnu": "4.27.4", + "@rollup/rollup-linux-x64-musl": "4.27.4", + "@rollup/rollup-win32-arm64-msvc": "4.27.4", + "@rollup/rollup-win32-ia32-msvc": "4.27.4", + "@rollup/rollup-win32-x64-msvc": "4.27.4", "fsevents": "~2.3.2" } }, From 1939123b5b1c90fc690245ad51cd2fb2f1b28eba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Nov 2024 07:25:28 +0100 Subject: [PATCH 0967/1397] chore(deps): bump braces from 3.0.2 to 3.0.3 in /web/ui/react-app (#15457) Bumps [braces](https://github.com/micromatch/braces) from 3.0.2 to 3.0.3. - [Changelog](https://github.com/micromatch/braces/blob/master/CHANGELOG.md) - [Commits](https://github.com/micromatch/braces/compare/3.0.2...3.0.3) --- updated-dependencies: - dependency-name: braces dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index bdc2d72bee..bce699f156 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -6798,11 +6798,11 @@ } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" @@ -10031,9 +10031,9 @@ } }, "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dependencies": { "to-regex-range": "^5.0.1" }, From 1bd16db101933f70308beb5b85e7fa3968a5228c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Nov 2024 07:26:06 +0100 Subject: [PATCH 0968/1397] chore(deps): bump body-parser and express in /web/ui/react-app (#15460) Bumps [body-parser](https://github.com/expressjs/body-parser) and [express](https://github.com/expressjs/express). These dependencies needed to be updated together. Updates `body-parser` from 1.20.2 to 1.20.3 - [Release notes](https://github.com/expressjs/body-parser/releases) - [Changelog](https://github.com/expressjs/body-parser/blob/master/HISTORY.md) - [Commits](https://github.com/expressjs/body-parser/compare/1.20.2...1.20.3) Updates `express` from 4.19.2 to 4.21.1 - [Release notes](https://github.com/expressjs/express/releases) - [Changelog](https://github.com/expressjs/express/blob/4.21.1/History.md) - [Commits](https://github.com/expressjs/express/compare/4.19.2...4.21.1) --- updated-dependencies: - dependency-name: body-parser dependency-type: indirect - dependency-name: express dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 102 ++++++++++++++++------------- 1 file changed, 57 insertions(+), 45 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index bce699f156..b76f23d21c 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -6693,9 +6693,9 @@ "dev": true }, "node_modules/body-parser": { - "version": "1.20.2", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", - "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", "dev": true, "dependencies": { "bytes": "3.1.2", @@ -6706,7 +6706,7 @@ "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.11.0", + "qs": "6.13.0", "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" @@ -7386,9 +7386,9 @@ "dev": true }, "node_modules/cookie": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", - "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", + "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", "dev": true, "engines": { "node": ">= 0.6" @@ -8624,9 +8624,9 @@ } }, "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", "dev": true, "engines": { "node": ">= 0.8" @@ -9827,37 +9827,37 @@ } }, "node_modules/express": { - "version": "4.19.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", - "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", + "version": "4.21.1", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.1.tgz", + "integrity": "sha512-YSFlK1Ee0/GC8QaO91tHcDxJiE/X4FbpAyQWkxAvG6AXCuR65YzK8ua6D9hvi/TzUfZMpc+BwuM1IPw8fmQBiQ==", "dev": true, "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.2", + "body-parser": "1.20.3", "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.6.0", + "cookie": "0.7.1", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "etag": "~1.8.1", - "finalhandler": "1.2.0", + "finalhandler": "1.3.1", "fresh": "0.5.2", "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", + "merge-descriptors": "1.0.3", "methods": "~1.1.2", "on-finished": "2.4.1", "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", + "path-to-regexp": "0.1.10", "proxy-addr": "~2.0.7", - "qs": "6.11.0", + "qs": "6.13.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", + "send": "0.19.0", + "serve-static": "1.16.2", "setprototypeof": "1.2.0", "statuses": "2.0.1", "type-is": "~1.6.18", @@ -9884,9 +9884,9 @@ "dev": true }, "node_modules/express/node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==", + "version": "0.1.10", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", + "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==", "dev": true }, "node_modules/fast-deep-equal": { @@ -10042,13 +10042,13 @@ } }, "node_modules/finalhandler": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", - "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", "dev": true, "dependencies": { "debug": "2.6.9", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "on-finished": "2.4.1", "parseurl": "~1.3.3", @@ -15808,10 +15808,13 @@ } }, "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==", - "dev": true + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, "node_modules/merge-stream": { "version": "2.0.0", @@ -18491,12 +18494,12 @@ } }, "node_modules/qs": { - "version": "6.11.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", - "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", "dev": true, "dependencies": { - "side-channel": "^1.0.4" + "side-channel": "^1.0.6" }, "engines": { "node": ">=0.6" @@ -21007,9 +21010,9 @@ "dev": true }, "node_modules/send": { - "version": "0.18.0", - "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", - "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", "dev": true, "dependencies": { "debug": "2.6.9", @@ -21045,6 +21048,15 @@ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", "dev": true }, + "node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "dev": true, + "engines": { + "node": ">= 0.8" + } + }, "node_modules/send/node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", @@ -21139,15 +21151,15 @@ } }, "node_modules/serve-static": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", - "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", "dev": true, "dependencies": { - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "parseurl": "~1.3.3", - "send": "0.18.0" + "send": "0.19.0" }, "engines": { "node": ">= 0.8.0" From 8e7e7a040690ca278d268d4aba6ac9062f9edaa4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Nov 2024 06:37:30 +0000 Subject: [PATCH 0969/1397] chore(deps): bump micromatch from 4.0.5 to 4.0.8 in /web/ui/react-app (#15462) Bumps [micromatch](https://github.com/micromatch/micromatch) from 4.0.5 to 4.0.8. - [Release notes](https://github.com/micromatch/micromatch/releases) - [Changelog](https://github.com/micromatch/micromatch/blob/master/CHANGELOG.md) - [Commits](https://github.com/micromatch/micromatch/compare/4.0.5...4.0.8) --- updated-dependencies: - dependency-name: micromatch dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index b76f23d21c..662e233129 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -15841,11 +15841,11 @@ } }, "node_modules/micromatch": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", "dependencies": { - "braces": "^3.0.2", + "braces": "^3.0.3", "picomatch": "^2.3.1" }, "engines": { From c08edd3ff3836e28b8d6d9669303f9dbf1288250 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Nov 2024 08:00:25 +0100 Subject: [PATCH 0970/1397] chore(deps-dev): bump rollup from 2.79.1 to 2.79.2 in /web/ui/react-app (#15461) Bumps [rollup](https://github.com/rollup/rollup) from 2.79.1 to 2.79.2. - [Release notes](https://github.com/rollup/rollup/releases) - [Changelog](https://github.com/rollup/rollup/blob/master/CHANGELOG.md) - [Commits](https://github.com/rollup/rollup/compare/v2.79.1...v2.79.2) --- updated-dependencies: - dependency-name: rollup dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 662e233129..01d23458dd 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -20685,9 +20685,9 @@ } }, "node_modules/rollup": { - "version": "2.79.1", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-2.79.1.tgz", - "integrity": "sha512-uKxbd0IhMZOhjAiD5oAFp7BqvkA4Dv47qpOCtaNvng4HBwdbWtdOh8f5nZNuk2rp51PMGk3bzfWu5oayNEuYnw==", + "version": "2.79.2", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-2.79.2.tgz", + "integrity": "sha512-fS6iqSPZDs3dr/y7Od6y5nha8dW1YnbgtsyotCVvoFGKbERG++CVRFv1meyGDE1SNItQA8BrnCw7ScdAhRJ3XQ==", "dev": true, "bin": { "rollup": "dist/bin/rollup" From c3cc25f55243bbe4e6b84c4e1617dd473695251b Mon Sep 17 00:00:00 2001 From: Konrad Date: Tue, 26 Nov 2024 12:15:01 +0100 Subject: [PATCH 0971/1397] document scraping change introduced in 2.52.0 (#15148) * CHANGELOG - scraping change introduced in 2.52.0 Change introduced in #12933; old behavior partially recoved in #14685 Signed-off-by: Konrad --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f9050cc96..1b36d3c384 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -115,7 +115,7 @@ request accessed a block on disk at about the same time as TSDB created a new bl ## 2.54.1 / 2024-08-27 -* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps. #14685 +* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps (mixing samples of the same series with and without timestamps is still rejected). #14685 * [BUGFIX] Docker SD: fix crash in `match_first_network` mode when container is reconnected to a new network. #14654 * [BUGFIX] PromQL: fix experimental native histograms getting corrupted due to vector selector bug in range queries. #14538 * [BUGFIX] PromQL: fix experimental native histogram counter reset detection on stale samples. #14514 @@ -197,6 +197,7 @@ This release changes the default for GOGC, the Go runtime control for the trade- ## 2.52.0 / 2024-05-07 * [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633 +* [CHANGE] Scrape: Multiple samples (even with different timestamps) are treated as duplicates during one scrape. * [FEATURE] Kubernetes SD: Add a new metric `prometheus_sd_kubernetes_failures_total` to track failed requests to Kubernetes API. #13554 * [FEATURE] Kubernetes SD: Add node and zone metadata labels when using the endpointslice role. #13935 * [FEATURE] Azure SD/Remote Write: Allow usage of Azure authorization SDK. #13099 From d535d501d1e1bdb186eb0b5061cd445ef1ac9e7f Mon Sep 17 00:00:00 2001 From: DC <413331538@qq.com> Date: Tue, 26 Nov 2024 19:48:17 +0800 Subject: [PATCH 0972/1397] [DOCS] Improve description of WAL record format (#14936) Signed-off-by: DC <413331538@qq.com> --- tsdb/docs/format/wal.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tsdb/docs/format/wal.md b/tsdb/docs/format/wal.md index 39be2a808c..db1ce97a8b 100644 --- a/tsdb/docs/format/wal.md +++ b/tsdb/docs/format/wal.md @@ -17,7 +17,15 @@ Notable deviations are that the record fragment is encoded as: └───────────┴──────────┴────────────┴──────────────┘ ``` -The type flag has the following states: +The initial type byte is made up of three components: a 3-bit reserved field, a 1-bit zstd compression flag, a 1-bit snappy compression flag, and a 3-bit type flag. + +``` +┌─────────────────┬──────────────────┬────────────────────┬──────────────────┐ +│ reserved <3bit> │ zstd_flag <1bit> │ snappy_flag <1bit> │ type_flag <3bit> │ +└─────────────────┴──────────────────┴────────────────────┴──────────────────┘ +``` + +The lowest 3 bits within this flag represent the record type as follows: * `0`: rest of page will be empty * `1`: a full record encoded in a single fragment From eb9ce70c3e7fcaf6b748168110c0527804998d2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 26 Nov 2024 13:56:22 +0100 Subject: [PATCH 0973/1397] Set hasCount after setting count to be consistent MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- util/convertnhcb/convertnhcb.go | 1 + 1 file changed, 1 insertion(+) diff --git a/util/convertnhcb/convertnhcb.go b/util/convertnhcb/convertnhcb.go index b3dc851daa..21ae62b3cb 100644 --- a/util/convertnhcb/convertnhcb.go +++ b/util/convertnhcb/convertnhcb.go @@ -142,6 +142,7 @@ func (h TempHistogram) Convert() (*histogram.Histogram, *histogram.FloatHistogra if !h.hasCount && len(h.buckets) > 0 { // No count, so set count to the highest known bucket's count. h.count = h.buckets[len(h.buckets)-1].count + h.hasCount = true } if len(h.buckets) == 0 || h.buckets[len(h.buckets)-1].le != math.Inf(1) { From e01c5cefac73db460a5c8fec53020a42a9b6bfbe Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 20 Nov 2024 12:58:03 +0100 Subject: [PATCH 0974/1397] notifier: fix increment of metric prometheus_notifications_errors_total Previously, prometheus_notifications_errors_total was incremented by one whenever a batch of alerts was affected by an error during sending to a specific alertmanager. However, the corresponding metric prometheus_notifications_sent_total, counting all alerts that were sent (including those where the sent ended in error), is incremented by the batch size, i.e. the number of alerts. Therefore, the ratio used in the mixin for the PrometheusErrorSendingAlertsToSomeAlertmanagers alert is inconsistent. This commit changes the increment of prometheus_notifications_errors_total to the number of alerts that were sent in the attempt that ended in an error. It also adjusts the metrics help string accordingly and makes the wording in the alert in the mixin more precise. Signed-off-by: beorn7 --- CHANGELOG.md | 1 + documentation/prometheus-mixin/alerts.libsonnet | 4 ++-- notifier/notifier.go | 8 ++++---- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f9050cc96..41d2f9b928 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## unreleased +* [CHANGE] Notifier: Increment the prometheus_notifications_errors_total metric by the number of affected alerts rather than by one per batch of affected alerts. #15428 * [ENHANCEMENT] OTLP receiver: Convert also metric metadata. #15416 ## 3.0.0 / 2024-11-14 diff --git a/documentation/prometheus-mixin/alerts.libsonnet b/documentation/prometheus-mixin/alerts.libsonnet index 563daab801..9a6de90d82 100644 --- a/documentation/prometheus-mixin/alerts.libsonnet +++ b/documentation/prometheus-mixin/alerts.libsonnet @@ -84,8 +84,8 @@ severity: 'warning', }, annotations: { - summary: 'Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager.', - description: '{{ printf "%%.1f" $value }}%% errors while sending alerts from Prometheus %(prometheusName)s to Alertmanager {{$labels.alertmanager}}.' % $._config, + summary: 'More than 1% of alerts sent by Prometheus to a specific Alertmanager were affected by errors.', + description: '{{ printf "%%.1f" $value }}%% of alerts sent by Prometheus %(prometheusName)s to Alertmanager {{$labels.alertmanager}} were affected by errors.' % $._config, }, }, { diff --git a/notifier/notifier.go b/notifier/notifier.go index 09a2005a36..956fd4652a 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -160,7 +160,7 @@ func newAlertMetrics(r prometheus.Registerer, queueCap int, queueLen, alertmanag Namespace: namespace, Subsystem: subsystem, Name: "errors_total", - Help: "Total number of errors sending alert notifications.", + Help: "Total number of sent alerts affected by errors.", }, []string{alertmanagerLabel}, ), @@ -619,13 +619,13 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) { if err := n.sendOne(ctx, client, url, payload); err != nil { - n.logger.Error("Error sending alert", "alertmanager", url, "count", count, "err", err) - n.metrics.errors.WithLabelValues(url).Inc() + n.logger.Error("Error sending alerts", "alertmanager", url, "count", count, "err", err) + n.metrics.errors.WithLabelValues(url).Add(float64(count)) } else { numSuccess.Inc() } n.metrics.latency.WithLabelValues(url).Observe(time.Since(begin).Seconds()) - n.metrics.sent.WithLabelValues(url).Add(float64(len(amAlerts))) + n.metrics.sent.WithLabelValues(url).Add(float64(count)) wg.Done() }(ctx, ams.client, am.url().String(), payload, len(amAlerts)) From 9aa6e041d3b6cc9b7041c93d1bf34b3b11d2294e Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 26 Nov 2024 16:03:45 +0100 Subject: [PATCH 0975/1397] MemPostings: allocate ListPostings once in PFALV (#15465) Same as #15427 but for the new method added in #14144 Instead of allocating each ListPostings one by one, allocate them all in one go. Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index f211bade5f..a0e7d035ad 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -454,10 +454,14 @@ func (p *MemPostings) PostingsForAllLabelValues(ctx context.Context, name string e := p.m[name] its := make([]Postings, 0, len(e)) + lps := make([]ListPostings, len(e)) + i := 0 for _, refs := range e { if len(refs) > 0 { - its = append(its, NewListPostings(refs)) + lps[i] = ListPostings{list: refs} + its = append(its, &lps[i]) } + i++ } // Let the mutex go before merging. From 6f1f0aa6789418b01a3e920b196d5eaf3ea23324 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 26 Nov 2024 17:12:15 +0100 Subject: [PATCH 0976/1397] Revert "chore(deps): bump github.com/fsnotify/fsnotify from 1.7.0 to 1.8.0 (#15300)" (#15466) This reverts commit e243ed66b9638fe78d334b7e3fb9fc41863fb3cd. --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1a8f4c36f3..5f45dd2327 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/envoyproxy/go-control-plane v0.13.1 github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb - github.com/fsnotify/fsnotify v1.8.0 + github.com/fsnotify/fsnotify v1.7.0 github.com/go-openapi/strfmt v0.23.0 github.com/go-zookeeper/zk v1.0.4 github.com/gogo/protobuf v1.3.2 diff --git a/go.sum b/go.sum index 80e4532aac..7b4c18f782 100644 --- a/go.sum +++ b/go.sum @@ -126,8 +126,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= From fd4797d756ef13d11617c1760863747d8813a939 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 15 Nov 2024 17:47:31 +0000 Subject: [PATCH 0977/1397] [PERF] RW: Remove interning hook It does nothing for standard Prometheus builds with -tags stringlabels, but the check to see if labels are being replaced has a cost. Signed-off-by: Bryan Boreham --- storage/remote/queue_manager.go | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 9f27c333a6..f6d6cbc7e9 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -957,13 +957,6 @@ func (t *QueueManager) Stop() { if t.mcfg.Send { t.metadataWatcher.Stop() } - - // On shutdown, release the strings in the labels from the intern pool. - t.seriesMtx.Lock() - for _, labels := range t.seriesLabels { - t.releaseLabels(labels) - } - t.seriesMtx.Unlock() t.metrics.unregister() } @@ -985,14 +978,6 @@ func (t *QueueManager) StoreSeries(series []record.RefSeries, index int) { continue } lbls := t.builder.Labels() - t.internLabels(lbls) - - // We should not ever be replacing a series labels in the map, but just - // in case we do we need to ensure we do not leak the replaced interned - // strings. - if orig, ok := t.seriesLabels[s.Ref]; ok { - t.releaseLabels(orig) - } t.seriesLabels[s.Ref] = lbls } } @@ -1037,7 +1022,6 @@ func (t *QueueManager) SeriesReset(index int) { for k, v := range t.seriesSegmentIndexes { if v < index { delete(t.seriesSegmentIndexes, k) - t.releaseLabels(t.seriesLabels[k]) delete(t.seriesLabels, k) delete(t.seriesMetadata, k) delete(t.droppedSeries, k) @@ -1059,14 +1043,6 @@ func (t *QueueManager) client() WriteClient { return t.storeClient } -func (t *QueueManager) internLabels(lbls labels.Labels) { - lbls.InternStrings(t.interner.intern) -} - -func (t *QueueManager) releaseLabels(ls labels.Labels) { - ls.ReleaseStrings(t.interner.release) -} - // processExternalLabels merges externalLabels into b. If b contains // a label in externalLabels, the value in b wins. func processExternalLabels(b *labels.Builder, externalLabels []labels.Label) { From c8f0e9d9c963a504278877f2be00b20c35246850 Mon Sep 17 00:00:00 2001 From: Chris Gray Date: Tue, 26 Nov 2024 09:05:39 -0800 Subject: [PATCH 0978/1397] Fix typo Signed-off-by: Chris Gray --- docs/migration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/migration.md b/docs/migration.md index 73de5bcaaf..e2d53472f3 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -112,7 +112,7 @@ may now fail if this fallback protocol is not specified. The TSDB format has been changed slightly in Prometheus v2.55 in preparation for changes to the index format. Consequently, a Prometheus v3 TSDB can only be read by a Prometheus v2.55 or newer. Keep that in mind when upgrading to v3 -- you will be only -able to downgrade to v2.55, not lower, without loosing your TSDB persitent data. +able to downgrade to v2.55, not lower, without losing your TSDB persitent data. As an extra safety measure, you could optionally consider upgrading to v2.55 first and confirm Prometheus works as expected, before upgrading to v3. From e98c19c1cef9df1db42a2b830e8d1d579a44890d Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sun, 24 Nov 2024 15:59:44 +0000 Subject: [PATCH 0979/1397] [PERF] TSDB: Cache all symbols for compaction Trade a bit more memory for a lot less CPU spent looking up symbols. Signed-off-by: Bryan Boreham --- tsdb/index/index.go | 52 +++++++++++++++------------------------------ 1 file changed, 17 insertions(+), 35 deletions(-) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 2708b07b14..69ecd59b3c 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -110,12 +110,6 @@ func newCRC32() hash.Hash32 { return crc32.New(castagnoliTable) } -type symbolCacheEntry struct { - index uint32 - lastValueIndex uint32 - lastValue string -} - type PostingsEncoder func(*encoding.Encbuf, []uint32) error type PostingsDecoder func(encoding.Decbuf) (int, Postings, error) @@ -146,7 +140,7 @@ type Writer struct { symbols *Symbols symbolFile *fileutil.MmapFile lastSymbol string - symbolCache map[string]symbolCacheEntry + symbolCache map[string]uint32 // From symbol to index in table. labelIndexes []labelIndexHashEntry // Label index offsets. labelNames map[string]uint64 // Label names, and their usage. @@ -246,7 +240,7 @@ func NewWriterWithEncoder(ctx context.Context, fn string, encoder PostingsEncode buf1: encoding.Encbuf{B: make([]byte, 0, 1<<22)}, buf2: encoding.Encbuf{B: make([]byte, 0, 1<<22)}, - symbolCache: make(map[string]symbolCacheEntry, 1<<8), + symbolCache: make(map[string]uint32, 1<<16), labelNames: make(map[string]uint64, 1<<8), crc32: newCRC32(), postingsEncoder: encoder, @@ -478,29 +472,16 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... w.buf2.PutUvarint(lset.Len()) if err := lset.Validate(func(l labels.Label) error { - var err error - cacheEntry, ok := w.symbolCache[l.Name] - nameIndex := cacheEntry.index + nameIndex, ok := w.symbolCache[l.Name] if !ok { - nameIndex, err = w.symbols.ReverseLookup(l.Name) - if err != nil { - return fmt.Errorf("symbol entry for %q does not exist, %w", l.Name, err) - } + return fmt.Errorf("symbol entry for %q does not exist", l.Name) } w.labelNames[l.Name]++ w.buf2.PutUvarint32(nameIndex) - valueIndex := cacheEntry.lastValueIndex - if !ok || cacheEntry.lastValue != l.Value { - valueIndex, err = w.symbols.ReverseLookup(l.Value) - if err != nil { - return fmt.Errorf("symbol entry for %q does not exist, %w", l.Value, err) - } - w.symbolCache[l.Name] = symbolCacheEntry{ - index: nameIndex, - lastValueIndex: valueIndex, - lastValue: l.Value, - } + valueIndex, ok := w.symbolCache[l.Value] + if !ok { + return fmt.Errorf("symbol entry for %q does not exist", l.Value) } w.buf2.PutUvarint32(valueIndex) return nil @@ -559,6 +540,7 @@ func (w *Writer) AddSymbol(sym string) error { return fmt.Errorf("symbol %q out-of-order", sym) } w.lastSymbol = sym + w.symbolCache[sym] = uint32(w.numSymbols) w.numSymbols++ w.buf1.Reset() w.buf1.PutUvarintStr(sym) @@ -644,9 +626,9 @@ func (w *Writer) writeLabelIndices() error { values = values[:0] } current = name - sid, err := w.symbols.ReverseLookup(value) - if err != nil { - return err + sid, ok := w.symbolCache[value] + if !ok { + return fmt.Errorf("symbol entry for %q does not exist", string(value)) } values = append(values, sid) } @@ -918,9 +900,9 @@ func (w *Writer) writePostingsToTmpFiles() error { nameSymbols := map[uint32]string{} for _, name := range batchNames { - sid, err := w.symbols.ReverseLookup(name) - if err != nil { - return err + sid, ok := w.symbolCache[name] + if !ok { + return fmt.Errorf("symbol entry for %q does not exist", name) } nameSymbols[sid] = name } @@ -957,9 +939,9 @@ func (w *Writer) writePostingsToTmpFiles() error { for _, name := range batchNames { // Write out postings for this label name. - sid, err := w.symbols.ReverseLookup(name) - if err != nil { - return err + sid, ok := w.symbolCache[name] + if !ok { + return fmt.Errorf("symbol entry for %q does not exist", name) } values := make([]uint32, 0, len(postings[sid])) for v := range postings[sid] { From ca3119bd24c451851c15e47642e85b449552d85c Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 25 Nov 2024 16:51:55 +0000 Subject: [PATCH 0980/1397] TSDB: eliminate one yolostring When the only use of a []byte->string conversion is as a map key, Go doesn't allocate. Signed-off-by: Bryan Boreham --- tsdb/index/index.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 69ecd59b3c..911b1a6ecc 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -610,10 +610,10 @@ func (w *Writer) writeLabelIndices() error { values := []uint32{} for d.Err() == nil && cnt > 0 { cnt-- - d.Uvarint() // Keycount. - name := d.UvarintBytes() // Label name. - value := yoloString(d.UvarintBytes()) // Label value. - d.Uvarint64() // Offset. + d.Uvarint() // Keycount. + name := d.UvarintBytes() // Label name. + value := d.UvarintBytes() // Label value. + d.Uvarint64() // Offset. if len(name) == 0 { continue // All index is ignored. } @@ -626,7 +626,7 @@ func (w *Writer) writeLabelIndices() error { values = values[:0] } current = name - sid, ok := w.symbolCache[value] + sid, ok := w.symbolCache[string(value)] if !ok { return fmt.Errorf("symbol entry for %q does not exist", string(value)) } From 38bb6ece254024d3b490056e92aa7b777c8b845f Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Tue, 26 Nov 2024 23:42:36 +0530 Subject: [PATCH 0981/1397] [BUGFIX] PromQL: Fix behaviour of some aggregations with histograms (#15432) promql: fix some aggregations for histograms This PR fixes the behaviour of `topk`,`bottomk`, `limitk` and `limit_ratio` with histograms. The fixed behaviour are as follows: - For `topk` and `bottomk` histograms are ignored and add info annotations added. - For `limitk` and `limit_ratio` histograms are included in the results(if applicable). Signed-off-by: Neeraj Gartia --- promql/engine.go | 53 ++++++++-- promql/promqltest/testdata/aggregators.test | 43 ++++++++ promql/promqltest/testdata/limit.test | 111 +++++++++++++------- 3 files changed, 161 insertions(+), 46 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index fc8256dd79..88fa5e8141 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3187,18 +3187,19 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, r float64, inp seriesLoop: for si := range inputMatrix { - f, _, ok := ev.nextValues(enh.Ts, &inputMatrix[si]) + f, h, ok := ev.nextValues(enh.Ts, &inputMatrix[si]) if !ok { continue } - s = Sample{Metric: inputMatrix[si].Metric, F: f, DropName: inputMatrix[si].DropName} + s = Sample{Metric: inputMatrix[si].Metric, F: f, H: h, DropName: inputMatrix[si].DropName} group := &groups[seriesToResult[si]] // Initialize this group if it's the first time we've seen it. if !group.seen { // LIMIT_RATIO is a special case, as we may not add this very sample to the heap, // while we also don't know the final size of it. - if op == parser.LIMIT_RATIO { + switch op { + case parser.LIMIT_RATIO: *group = groupedAggregation{ seen: true, heap: make(vectorByValueHeap, 0), @@ -3206,12 +3207,34 @@ seriesLoop: if ratiosampler.AddRatioSample(r, &s) { heap.Push(&group.heap, &s) } - } else { + case parser.LIMITK: *group = groupedAggregation{ seen: true, heap: make(vectorByValueHeap, 1, k), } group.heap[0] = s + case parser.TOPK: + *group = groupedAggregation{ + seen: true, + heap: make(vectorByValueHeap, 0, k), + } + if s.H != nil { + group.seen = false + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("topk", e.PosRange)) + } else { + heap.Push(&group.heap, &s) + } + case parser.BOTTOMK: + *group = groupedAggregation{ + seen: true, + heap: make(vectorByValueHeap, 0, k), + } + if s.H != nil { + group.seen = false + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("bottomk", e.PosRange)) + } else { + heap.Push(&group.heap, &s) + } } continue } @@ -3220,6 +3243,9 @@ seriesLoop: case parser.TOPK: // We build a heap of up to k elements, with the smallest element at heap[0]. switch { + case s.H != nil: + // Ignore histogram sample and add info annotation. + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("topk", e.PosRange)) case len(group.heap) < k: heap.Push(&group.heap, &s) case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)): @@ -3233,6 +3259,9 @@ seriesLoop: case parser.BOTTOMK: // We build a heap of up to k elements, with the biggest element at heap[0]. switch { + case s.H != nil: + // Ignore histogram sample and add info annotation. + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("bottomk", e.PosRange)) case len(group.heap) < k: heap.Push((*vectorByReverseValueHeap)(&group.heap), &s) case group.heap[0].F > s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)): @@ -3275,10 +3304,14 @@ seriesLoop: mat = make(Matrix, 0, len(groups)) } - add := func(lbls labels.Labels, f float64, dropName bool) { + add := func(lbls labels.Labels, f float64, h *histogram.FloatHistogram, dropName bool) { // If this could be an instant query, add directly to the matrix so the result is in consistent order. if ev.endTimestamp == ev.startTimestamp { - mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}, DropName: dropName}) + if h != nil { + mat = append(mat, Series{Metric: lbls, Histograms: []HPoint{{T: enh.Ts, H: h}}, DropName: dropName}) + } else { + mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}, DropName: dropName}) + } } else { // Otherwise the results are added into seriess elements. hash := lbls.Hash() @@ -3286,7 +3319,7 @@ seriesLoop: if !ok { ss = Series{Metric: lbls, DropName: dropName} } - addToSeries(&ss, enh.Ts, f, nil, numSteps) + addToSeries(&ss, enh.Ts, f, h, numSteps) seriess[hash] = ss } } @@ -3301,7 +3334,7 @@ seriesLoop: sort.Sort(sort.Reverse(aggr.heap)) } for _, v := range aggr.heap { - add(v.Metric, v.F, v.DropName) + add(v.Metric, v.F, v.H, v.DropName) } case parser.BOTTOMK: @@ -3310,12 +3343,12 @@ seriesLoop: sort.Sort(sort.Reverse((*vectorByReverseValueHeap)(&aggr.heap))) } for _, v := range aggr.heap { - add(v.Metric, v.F, v.DropName) + add(v.Metric, v.F, v.H, v.DropName) } case parser.LIMITK, parser.LIMIT_RATIO: for _, v := range aggr.heap { - add(v.Metric, v.F, v.DropName) + add(v.Metric, v.F, v.H, v.DropName) } } } diff --git a/promql/promqltest/testdata/aggregators.test b/promql/promqltest/testdata/aggregators.test index 00f393a865..244e69273b 100644 --- a/promql/promqltest/testdata/aggregators.test +++ b/promql/promqltest/testdata/aggregators.test @@ -272,6 +272,8 @@ load 5m http_requests{job="app-server", instance="1", group="production"} 0+60x10 http_requests{job="app-server", instance="0", group="canary"} 0+70x10 http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + http_requests_histogram{job="app-server", instance="2", group="canary"} {{schema:0 sum:10 count:10}}x11 + http_requests_histogram{job="api-server", instance="3", group="production"} {{schema:0 sum:20 count:20}}x11 foo 3+0x10 eval_ordered instant at 50m topk(3, http_requests) @@ -338,6 +340,47 @@ eval_ordered instant at 50m topk(scalar(foo), http_requests) http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="1", job="app-server"} 600 +# Tests for histogram: should ignore histograms. +eval_info instant at 50m topk(100, http_requests_histogram) + #empty + +eval_info range from 0 to 50m step 5m topk(100, http_requests_histogram) + #empty + +eval_info instant at 50m topk(1, {__name__=~"http_requests(_histogram)?"}) + {__name__="http_requests", group="canary", instance="1", job="app-server"} 800 + +eval_info instant at 50m count(topk(1000, {__name__=~"http_requests(_histogram)?"})) + {} 9 + +eval_info range from 0 to 50m step 5m count(topk(1000, {__name__=~"http_requests(_histogram)?"})) + {} 9x10 + +eval_info instant at 50m topk by (instance) (1, {__name__=~"http_requests(_histogram)?"}) + {__name__="http_requests", group="canary", instance="0", job="app-server"} 700 + {__name__="http_requests", group="canary", instance="1", job="app-server"} 800 + {__name__="http_requests", group="production", instance="2", job="api-server"} NaN + +eval_info instant at 50m bottomk(100, http_requests_histogram) + #empty + +eval_info range from 0 to 50m step 5m bottomk(100, http_requests_histogram) + #empty + +eval_info instant at 50m bottomk(1, {__name__=~"http_requests(_histogram)?"}) + {__name__="http_requests", group="production", instance="0", job="api-server"} 100 + +eval_info instant at 50m count(bottomk(1000, {__name__=~"http_requests(_histogram)?"})) + {} 9 + +eval_info range from 0 to 50m step 5m count(bottomk(1000, {__name__=~"http_requests(_histogram)?"})) + {} 9x10 + +eval_info instant at 50m bottomk by (instance) (1, {__name__=~"http_requests(_histogram)?"}) + {__name__="http_requests", group="production", instance="0", job="api-server"} 100 + {__name__="http_requests", group="production", instance="1", job="api-server"} 200 + {__name__="http_requests", group="production", instance="2", job="api-server"} NaN + clear # Tests for count_values. diff --git a/promql/promqltest/testdata/limit.test b/promql/promqltest/testdata/limit.test index 0ab363f9ae..e6dd007af4 100644 --- a/promql/promqltest/testdata/limit.test +++ b/promql/promqltest/testdata/limit.test @@ -9,6 +9,8 @@ load 5m http_requests{job="api-server", instance="1", group="canary"} 0+40x10 http_requests{job="api-server", instance="2", group="canary"} 0+50x10 http_requests{job="api-server", instance="3", group="canary"} 0+60x10 + http_requests{job="api-server", instance="histogram_1", group="canary"} {{schema:0 sum:10 count:10}}x11 + http_requests{job="api-server", instance="histogram_2", group="canary"} {{schema:0 sum:20 count:20}}x11 eval instant at 50m count(limitk by (group) (0, http_requests)) # empty @@ -16,25 +18,56 @@ eval instant at 50m count(limitk by (group) (0, http_requests)) eval instant at 50m count(limitk by (group) (-1, http_requests)) # empty -# Exercise k==1 special case (as sample is added before the main series loop +# Exercise k==1 special case (as sample is added before the main series loop). eval instant at 50m count(limitk by (group) (1, http_requests) and http_requests) - {} 2 + {} 2 eval instant at 50m count(limitk by (group) (2, http_requests) and http_requests) - {} 4 + {} 4 eval instant at 50m count(limitk(100, http_requests) and http_requests) - {} 6 + {} 8 -# Exercise k==1 special case (as sample is added before the main series loop +# Exercise k==1 special case (as sample is added before the main series loop). eval instant at 50m count(limitk by (group) (1, http_requests) and http_requests) - {} 2 + {} 2 eval instant at 50m count(limitk by (group) (2, http_requests) and http_requests) - {} 4 + {} 4 eval instant at 50m count(limitk(100, http_requests) and http_requests) - {} 6 + {} 8 + +# Test for histograms. +# k==1: verify that histogram is included in the result. +eval instant at 50m limitk(1, http_requests{instance="histogram_1"}) + {__name__="http_requests", group="canary", instance="histogram_1", job="api-server"} {{count:10 sum:10}} + +eval range from 0 to 50m step 5m limitk(1, http_requests{instance="histogram_1"}) + {__name__="http_requests", group="canary", instance="histogram_1", job="api-server"} {{count:10 sum:10}}x10 + +# Histogram is included with mix of floats as well. +eval instant at 50m limitk(8, http_requests{instance=~"(histogram_2|0)"}) + {__name__="http_requests", group="canary", instance="histogram_2", job="api-server"} {{count:20 sum:20}} + {__name__="http_requests", group="production", instance="0", job="api-server"} 100 + {__name__="http_requests", group="canary", instance="0", job="api-server"} 300 + +eval range from 0 to 50m step 5m limitk(8, http_requests{instance=~"(histogram_2|0)"}) + {__name__="http_requests", group="canary", instance="histogram_2", job="api-server"} {{count:20 sum:20}}x10 + {__name__="http_requests", group="production", instance="0", job="api-server"} 0+10x10 + {__name__="http_requests", group="canary", instance="0", job="api-server"} 0+30x10 + +eval instant at 50m count(limitk(2, http_requests{instance=~"histogram_[0-9]"})) + {} 2 + +eval range from 0 to 50m step 5m count(limitk(2, http_requests{instance=~"histogram_[0-9]"})) + {} 2+0x10 + +eval instant at 50m count(limitk(1000, http_requests{instance=~"histogram_[0-9]"})) + {} 2 + +eval range from 0 to 50m step 5m count(limitk(1000, http_requests{instance=~"histogram_[0-9]"})) + {} 2+0x10 # limit_ratio eval range from 0 to 50m step 5m count(limit_ratio(0.0, http_requests)) @@ -42,9 +75,9 @@ eval range from 0 to 50m step 5m count(limit_ratio(0.0, http_requests)) # limitk(2, ...) should always return a 2-count subset of the timeseries (hence the AND'ing) eval range from 0 to 50m step 5m count(limitk(2, http_requests) and http_requests) - {} 2+0x10 + {} 2+0x10 -# Tests for limit_ratio +# Tests for limit_ratio. # # NB: below 0.5 ratio will depend on some hashing "luck" (also there's no guarantee that # an integer comes from: total number of series * ratio), as it depends on: @@ -56,50 +89,50 @@ eval range from 0 to 50m step 5m count(limitk(2, http_requests) and http_request # # See `AddRatioSample()` in promql/engine.go for more details. -# Half~ish samples: verify we get "near" 3 (of 0.5 * 6) -eval range from 0 to 50m step 5m count(limit_ratio(0.5, http_requests) and http_requests) <= bool (3+1) - {} 1+0x10 +# Half~ish samples: verify we get "near" 3 (of 0.5 * 6). +eval range from 0 to 50m step 5m count(limit_ratio(0.5, http_requests) and http_requests) <= bool (4+1) + {} 1+0x10 -eval range from 0 to 50m step 5m count(limit_ratio(0.5, http_requests) and http_requests) >= bool (3-1) - {} 1+0x10 +eval range from 0 to 50m step 5m count(limit_ratio(0.5, http_requests) and http_requests) >= bool (4-1) + {} 1+0x10 -# All samples +# All samples. eval range from 0 to 50m step 5m count(limit_ratio(1.0, http_requests) and http_requests) - {} 6+0x10 + {} 8+0x10 -# All samples +# All samples. eval range from 0 to 50m step 5m count(limit_ratio(-1.0, http_requests) and http_requests) - {} 6+0x10 + {} 8+0x10 -# Capped to 1.0 -> all samples +# Capped to 1.0 -> all samples. eval_warn range from 0 to 50m step 5m count(limit_ratio(1.1, http_requests) and http_requests) - {} 6+0x10 + {} 8+0x10 -# Capped to -1.0 -> all samples +# Capped to -1.0 -> all samples. eval_warn range from 0 to 50m step 5m count(limit_ratio(-1.1, http_requests) and http_requests) - {} 6+0x10 + {} 8+0x10 -# Verify that limit_ratio(value) and limit_ratio(1.0-value) return the "complement" of each other -# Complement below for [0.2, -0.8] +# Verify that limit_ratio(value) and limit_ratio(1.0-value) return the "complement" of each other. +# Complement below for [0.2, -0.8]. # -# Complement 1of2: `or` should return all samples +# Complement 1of2: `or` should return all samples. eval range from 0 to 50m step 5m count(limit_ratio(0.2, http_requests) or limit_ratio(-0.8, http_requests)) - {} 6+0x10 + {} 8+0x10 -# Complement 2of2: `and` should return no samples +# Complement 2of2: `and` should return no samples. eval range from 0 to 50m step 5m count(limit_ratio(0.2, http_requests) and limit_ratio(-0.8, http_requests)) # empty -# Complement below for [0.5, -0.5] +# Complement below for [0.5, -0.5]. eval range from 0 to 50m step 5m count(limit_ratio(0.5, http_requests) or limit_ratio(-0.5, http_requests)) - {} 6+0x10 + {} 8+0x10 eval range from 0 to 50m step 5m count(limit_ratio(0.5, http_requests) and limit_ratio(-0.5, http_requests)) # empty -# Complement below for [0.8, -0.2] +# Complement below for [0.8, -0.2]. eval range from 0 to 50m step 5m count(limit_ratio(0.8, http_requests) or limit_ratio(-0.2, http_requests)) - {} 6+0x10 + {} 8+0x10 eval range from 0 to 50m step 5m count(limit_ratio(0.8, http_requests) and limit_ratio(-0.2, http_requests)) # empty @@ -107,13 +140,19 @@ eval range from 0 to 50m step 5m count(limit_ratio(0.8, http_requests) and limit # Complement below for [some_ratio, 1.0 - some_ratio], some_ratio derived from time(), # using a small prime number to avoid rounded ratio values, and a small set of them. eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) or limit_ratio(1.0 - (time() % 17/17), http_requests)) - {} 6+0x10 + {} 8+0x10 eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) and limit_ratio(1.0 - (time() % 17/17), http_requests)) # empty -# Poor man's normality check: ok (loaded samples follow a nice linearity over labels and time) -# The check giving: 1 (i.e. true) -eval range from 0 to 50m step 5m abs(avg(limit_ratio(0.5, http_requests)) - avg(limit_ratio(-0.5, http_requests))) <= bool stddev(http_requests) +# Poor man's normality check: ok (loaded samples follow a nice linearity over labels and time). +# The check giving: 1 (i.e. true). +eval range from 0 to 50m step 5m abs(avg(limit_ratio(0.5, http_requests{instance!~"histogram_[0-9]"})) - avg(limit_ratio(-0.5, http_requests{instance!~"histogram_[0-9]"}))) <= bool stddev(http_requests{instance!~"histogram_[0-9]"}) {} 1+0x10 +# All specified histograms are included for r=1. +eval instant at 50m limit_ratio(1, http_requests{instance="histogram_1"}) + {__name__="http_requests", group="canary", instance="histogram_1", job="api-server"} {{count:10 sum:10}} + +eval range from 0 to 50m step 5m limit_ratio(1, http_requests{instance="histogram_1"}) + {__name__="http_requests", group="canary", instance="histogram_1", job="api-server"} {{count:10 sum:10}}x10 From 9ad93ba8df6de2a9ad752e2e53a1bdbf99f34db4 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Wed, 27 Nov 2024 12:33:20 +0100 Subject: [PATCH 0982/1397] Optimize l=~".+" matcher (#15474) Since dot is matching newline now, `l=~".+"` is "any non empty label value", and #14144 added a specific method in the index for that so we don't need to run the matcher on each one of the label values. Signed-off-by: Oleg Zaytsev --- tsdb/querier.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/tsdb/querier.go b/tsdb/querier.go index be2dea2de4..34e9a81231 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -256,10 +256,23 @@ func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matc // We already handled the case at the top of the function, // and it is unexpected to get all postings again here. return nil, errors.New("unexpected all postings") + case m.Type == labels.MatchRegexp && m.Value == ".*": // .* regexp matches any string: do nothing. case m.Type == labels.MatchNotRegexp && m.Value == ".*": return index.EmptyPostings(), nil + + case m.Type == labels.MatchRegexp && m.Value == ".+": + // .+ regexp matches any non-empty string: get postings for all label values. + it := ix.PostingsForAllLabelValues(ctx, m.Name) + if index.IsEmptyPostingsType(it) { + return index.EmptyPostings(), nil + } + its = append(its, it) + case m.Type == labels.MatchNotRegexp && m.Value == ".+": + // .+ regexp matches any non-empty string: get postings for all label values and remove them. + its = append(notIts, ix.PostingsForAllLabelValues(ctx, m.Name)) + case labelMustBeSet[m.Name]: // If this matcher must be non-empty, we can be smarter. matchesEmpty := m.Matches("") @@ -294,7 +307,7 @@ func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matc return index.EmptyPostings(), nil } its = append(its, it) - default: // l="a" + default: // l="a", l=~"a|b", l=~"a.b", etc. // Non-Not matcher, use normal postingsForMatcher. it, err := postingsForMatcher(ctx, ix, m) if err != nil { From a44c449155038bc5d42d6337480c597b85e4975e Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 27 Nov 2024 13:08:34 +0100 Subject: [PATCH 0983/1397] promql: Add NaN tests for avg and avg_over_time Signed-off-by: beorn7 --- promql/promqltest/testdata/aggregators.test | 5 +++++ promql/promqltest/testdata/functions.test | 14 ++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/promql/promqltest/testdata/aggregators.test b/promql/promqltest/testdata/aggregators.test index 244e69273b..7479d2c3d3 100644 --- a/promql/promqltest/testdata/aggregators.test +++ b/promql/promqltest/testdata/aggregators.test @@ -543,6 +543,7 @@ load 10s data{test="bigzero",point="b"} -9.988465674311579e+307 data{test="bigzero",point="c"} 9.988465674311579e+307 data{test="bigzero",point="d"} 9.988465674311579e+307 + data{test="value is nan"} NaN eval instant at 1m avg(data{test="ten"}) {} 10 @@ -577,6 +578,10 @@ eval instant at 1m avg(data{test="-big"}) eval instant at 1m avg(data{test="bigzero"}) {} 0 +# If NaN is in the mix, the result is NaN. +eval instant at 1m avg(data) + {} NaN + # Test summing and averaging extreme values. clear diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index e4caf0e363..1c832c89de 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -713,6 +713,7 @@ load 10s metric8 9.988465674311579e+307 9.988465674311579e+307 metric9 -9.988465674311579e+307 -9.988465674311579e+307 -9.988465674311579e+307 metric10 -9.988465674311579e+307 9.988465674311579e+307 + metric11 1 2 3 NaN NaN eval instant at 55s avg_over_time(metric[1m]) {} 3 @@ -806,6 +807,19 @@ eval instant at 45s sum_over_time(metric10[1m])/count_over_time(metric10[1m]) eval instant at 1m sum_over_time(metric10[2m])/count_over_time(metric10[2m]) {} 0 +# NaN behavior. +eval instant at 20s avg_over_time(metric11[1m]) + {} 2 + +eval instant at 30s avg_over_time(metric11[1m]) + {} NaN + +eval instant at 1m avg_over_time(metric11[1m]) + {} NaN + +eval instant at 1m sum_over_time(metric11[1m])/count_over_time(metric11[1m]) + {} NaN + # Test if very big intermediate values cause loss of detail. clear load 10s From 8e3301eb44b2b94dac152dd55c08751f62f6bd41 Mon Sep 17 00:00:00 2001 From: Joshua Hesketh Date: Wed, 27 Nov 2024 23:20:23 +1100 Subject: [PATCH 0984/1397] Export quantile functions (#15190) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Export quantile functions For use in Mimir's query engine, it would be helpful if these functions were exported. Co-authored-by: Björn Rabenstein Signed-off-by: Joshua Hesketh --------- Signed-off-by: Joshua Hesketh Signed-off-by: Joshua Hesketh Co-authored-by: Björn Rabenstein --- promql/functions.go | 8 +- promql/quantile.go | 90 +++++++------ promql/quantile_test.go | 276 ++++++++++++++++++++-------------------- 3 files changed, 196 insertions(+), 178 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index 4be743040d..cadac19b4b 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -1300,7 +1300,7 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev } enh.Out = append(enh.Out, Sample{ Metric: sample.Metric, - F: histogramFraction(lower, upper, sample.H), + F: HistogramFraction(lower, upper, sample.H), DropName: true, }) } @@ -1352,7 +1352,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev mb = &metricWithBuckets{sample.Metric, nil} enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb } - mb.buckets = append(mb.buckets, bucket{upperBound, sample.F}) + mb.buckets = append(mb.buckets, Bucket{upperBound, sample.F}) } // Now deal with the histograms. @@ -1374,14 +1374,14 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev } enh.Out = append(enh.Out, Sample{ Metric: sample.Metric, - F: histogramQuantile(q, sample.H), + F: HistogramQuantile(q, sample.H), DropName: true, }) } for _, mb := range enh.signatureToMetricWithBuckets { if len(mb.buckets) > 0 { - res, forcedMonotonicity, _ := bucketQuantile(q, mb.buckets) + res, forcedMonotonicity, _ := BucketQuantile(q, mb.buckets) enh.Out = append(enh.Out, Sample{ Metric: mb.metric, F: res, diff --git a/promql/quantile.go b/promql/quantile.go index 06775d3ae6..a6851810c5 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -51,20 +51,22 @@ var excludedLabels = []string{ labels.BucketLabel, } -type bucket struct { - upperBound float64 - count float64 +// Bucket represents a bucket of a classic histogram. It is used internally by the promql +// package, but it is nevertheless exported for potential use in other PromQL engines. +type Bucket struct { + UpperBound float64 + Count float64 } -// buckets implements sort.Interface. -type buckets []bucket +// Buckets implements sort.Interface. +type Buckets []Bucket type metricWithBuckets struct { metric labels.Labels - buckets buckets + buckets Buckets } -// bucketQuantile calculates the quantile 'q' based on the given buckets. The +// BucketQuantile calculates the quantile 'q' based on the given buckets. The // buckets will be sorted by upperBound by this function (i.e. no sorting // needed before calling this function). The quantile value is interpolated // assuming a linear distribution within a bucket. However, if the quantile @@ -95,7 +97,14 @@ type metricWithBuckets struct { // and another bool to indicate if small differences between buckets (that // are likely artifacts of floating point precision issues) have been // ignored. -func bucketQuantile(q float64, buckets buckets) (float64, bool, bool) { +// +// Generically speaking, BucketQuantile is for calculating the +// histogram_quantile() of classic histograms. See also: HistogramQuantile +// for native histograms. +// +// BucketQuantile is exported as a useful quantile function over a set of +// given buckets. It may be used by other PromQL engine implementations. +func BucketQuantile(q float64, buckets Buckets) (float64, bool, bool) { if math.IsNaN(q) { return math.NaN(), false, false } @@ -105,17 +114,17 @@ func bucketQuantile(q float64, buckets buckets) (float64, bool, bool) { if q > 1 { return math.Inf(+1), false, false } - slices.SortFunc(buckets, func(a, b bucket) int { + slices.SortFunc(buckets, func(a, b Bucket) int { // We don't expect the bucket boundary to be a NaN. - if a.upperBound < b.upperBound { + if a.UpperBound < b.UpperBound { return -1 } - if a.upperBound > b.upperBound { + if a.UpperBound > b.UpperBound { return +1 } return 0 }) - if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) { + if !math.IsInf(buckets[len(buckets)-1].UpperBound, +1) { return math.NaN(), false, false } @@ -125,33 +134,33 @@ func bucketQuantile(q float64, buckets buckets) (float64, bool, bool) { if len(buckets) < 2 { return math.NaN(), false, false } - observations := buckets[len(buckets)-1].count + observations := buckets[len(buckets)-1].Count if observations == 0 { return math.NaN(), false, false } rank := q * observations - b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank }) + b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].Count >= rank }) if b == len(buckets)-1 { - return buckets[len(buckets)-2].upperBound, forcedMonotonic, fixedPrecision + return buckets[len(buckets)-2].UpperBound, forcedMonotonic, fixedPrecision } - if b == 0 && buckets[0].upperBound <= 0 { - return buckets[0].upperBound, forcedMonotonic, fixedPrecision + if b == 0 && buckets[0].UpperBound <= 0 { + return buckets[0].UpperBound, forcedMonotonic, fixedPrecision } var ( bucketStart float64 - bucketEnd = buckets[b].upperBound - count = buckets[b].count + bucketEnd = buckets[b].UpperBound + count = buckets[b].Count ) if b > 0 { - bucketStart = buckets[b-1].upperBound - count -= buckets[b-1].count - rank -= buckets[b-1].count + bucketStart = buckets[b-1].UpperBound + count -= buckets[b-1].Count + rank -= buckets[b-1].Count } return bucketStart + (bucketEnd-bucketStart)*(rank/count), forcedMonotonic, fixedPrecision } -// histogramQuantile calculates the quantile 'q' based on the given histogram. +// HistogramQuantile calculates the quantile 'q' based on the given histogram. // // For custom buckets, the result is interpolated linearly, i.e. it is assumed // the observations are uniformly distributed within each bucket. (This is a @@ -186,7 +195,13 @@ func bucketQuantile(q float64, buckets buckets) (float64, bool, bool) { // If q>1, +Inf is returned. // // If q is NaN, NaN is returned. -func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { +// +// HistogramQuantile is for calculating the histogram_quantile() of native +// histograms. See also: BucketQuantile for classic histograms. +// +// HistogramQuantile is exported as it may be used by other PromQL engine +// implementations. +func HistogramQuantile(q float64, h *histogram.FloatHistogram) float64 { if q < 0 { return math.Inf(-1) } @@ -297,11 +312,11 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { return -math.Exp2(logUpper + (logLower-logUpper)*(1-fraction)) } -// histogramFraction calculates the fraction of observations between the +// HistogramFraction calculates the fraction of observations between the // provided lower and upper bounds, based on the provided histogram. // -// histogramFraction is in a certain way the inverse of histogramQuantile. If -// histogramQuantile(0.9, h) returns 123.4, then histogramFraction(-Inf, 123.4, h) +// HistogramFraction is in a certain way the inverse of histogramQuantile. If +// HistogramQuantile(0.9, h) returns 123.4, then HistogramFraction(-Inf, 123.4, h) // returns 0.9. // // The same notes with regard to interpolation and assumptions about the zero @@ -328,7 +343,10 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { // If lower or upper is NaN, NaN is returned. // // If lower >= upper and the histogram has at least 1 observation, zero is returned. -func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float64 { +// +// HistogramFraction is exported as it may be used by other PromQL engine +// implementations. +func HistogramFraction(lower, upper float64, h *histogram.FloatHistogram) float64 { if h.Count == 0 || math.IsNaN(lower) || math.IsNaN(upper) { return math.NaN() } @@ -434,12 +452,12 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 // coalesceBuckets merges buckets with the same upper bound. // // The input buckets must be sorted. -func coalesceBuckets(buckets buckets) buckets { +func coalesceBuckets(buckets Buckets) Buckets { last := buckets[0] i := 0 for _, b := range buckets[1:] { - if b.upperBound == last.upperBound { - last.count += b.count + if b.UpperBound == last.UpperBound { + last.Count += b.Count } else { buckets[i] = last last = b @@ -476,11 +494,11 @@ func coalesceBuckets(buckets buckets) buckets { // // We return a bool to indicate if this monotonicity was forced or not, and // another bool to indicate if small deltas were ignored or not. -func ensureMonotonicAndIgnoreSmallDeltas(buckets buckets, tolerance float64) (bool, bool) { +func ensureMonotonicAndIgnoreSmallDeltas(buckets Buckets, tolerance float64) (bool, bool) { var forcedMonotonic, fixedPrecision bool - prev := buckets[0].count + prev := buckets[0].Count for i := 1; i < len(buckets); i++ { - curr := buckets[i].count // Assumed always positive. + curr := buckets[i].Count // Assumed always positive. if curr == prev { // No correction needed if the counts are identical between buckets. continue @@ -489,14 +507,14 @@ func ensureMonotonicAndIgnoreSmallDeltas(buckets buckets, tolerance float64) (bo // Silently correct numerically insignificant differences from floating // point precision errors, regardless of direction. // Do not update the 'prev' value as we are ignoring the difference. - buckets[i].count = prev + buckets[i].Count = prev fixedPrecision = true continue } if curr < prev { // Force monotonicity by removing any decreases regardless of magnitude. // Do not update the 'prev' value as we are ignoring the decrease. - buckets[i].count = prev + buckets[i].Count = prev forcedMonotonic = true continue } diff --git a/promql/quantile_test.go b/promql/quantile_test.go index 84f4c0b06d..a1047d73f4 100644 --- a/promql/quantile_test.go +++ b/promql/quantile_test.go @@ -24,29 +24,29 @@ func TestBucketQuantile_ForcedMonotonicity(t *testing.T) { eps := 1e-12 for name, tc := range map[string]struct { - getInput func() buckets // The buckets can be modified in-place so return a new one each time. + getInput func() Buckets // The buckets can be modified in-place so return a new one each time. expectedForced bool expectedFixed bool expectedValues map[float64]float64 }{ "simple - monotonic": { - getInput: func() buckets { - return buckets{ + getInput: func() Buckets { + return Buckets{ { - upperBound: 10, - count: 10, + UpperBound: 10, + Count: 10, }, { - upperBound: 15, - count: 15, + UpperBound: 15, + Count: 15, }, { - upperBound: 20, - count: 15, + UpperBound: 20, + Count: 15, }, { - upperBound: 30, - count: 15, + UpperBound: 30, + Count: 15, }, { - upperBound: math.Inf(1), - count: 15, + UpperBound: math.Inf(1), + Count: 15, }, } }, @@ -60,23 +60,23 @@ func TestBucketQuantile_ForcedMonotonicity(t *testing.T) { }, }, "simple - non-monotonic middle": { - getInput: func() buckets { - return buckets{ + getInput: func() Buckets { + return Buckets{ { - upperBound: 10, - count: 10, + UpperBound: 10, + Count: 10, }, { - upperBound: 15, - count: 15, + UpperBound: 15, + Count: 15, }, { - upperBound: 20, - count: 15.00000000001, // Simulate the case there's a small imprecision in float64. + UpperBound: 20, + Count: 15.00000000001, // Simulate the case there's a small imprecision in float64. }, { - upperBound: 30, - count: 15, + UpperBound: 30, + Count: 15, }, { - upperBound: math.Inf(1), - count: 15, + UpperBound: math.Inf(1), + Count: 15, }, } }, @@ -90,41 +90,41 @@ func TestBucketQuantile_ForcedMonotonicity(t *testing.T) { }, }, "real example - monotonic": { - getInput: func() buckets { - return buckets{ + getInput: func() Buckets { + return Buckets{ { - upperBound: 1, - count: 6454661.3014166197, + UpperBound: 1, + Count: 6454661.3014166197, }, { - upperBound: 5, - count: 8339611.2001912938, + UpperBound: 5, + Count: 8339611.2001912938, }, { - upperBound: 10, - count: 14118319.2444762159, + UpperBound: 10, + Count: 14118319.2444762159, }, { - upperBound: 25, - count: 14130031.5272856522, + UpperBound: 25, + Count: 14130031.5272856522, }, { - upperBound: 50, - count: 46001270.3030008152, + UpperBound: 50, + Count: 46001270.3030008152, }, { - upperBound: 64, - count: 46008473.8585563600, + UpperBound: 64, + Count: 46008473.8585563600, }, { - upperBound: 80, - count: 46008473.8585563600, + UpperBound: 80, + Count: 46008473.8585563600, }, { - upperBound: 100, - count: 46008473.8585563600, + UpperBound: 100, + Count: 46008473.8585563600, }, { - upperBound: 250, - count: 46008473.8585563600, + UpperBound: 250, + Count: 46008473.8585563600, }, { - upperBound: 1000, - count: 46008473.8585563600, + UpperBound: 1000, + Count: 46008473.8585563600, }, { - upperBound: math.Inf(1), - count: 46008473.8585563600, + UpperBound: math.Inf(1), + Count: 46008473.8585563600, }, } }, @@ -138,41 +138,41 @@ func TestBucketQuantile_ForcedMonotonicity(t *testing.T) { }, }, "real example - non-monotonic": { - getInput: func() buckets { - return buckets{ + getInput: func() Buckets { + return Buckets{ { - upperBound: 1, - count: 6454661.3014166225, + UpperBound: 1, + Count: 6454661.3014166225, }, { - upperBound: 5, - count: 8339611.2001912957, + UpperBound: 5, + Count: 8339611.2001912957, }, { - upperBound: 10, - count: 14118319.2444762159, + UpperBound: 10, + Count: 14118319.2444762159, }, { - upperBound: 25, - count: 14130031.5272856504, + UpperBound: 25, + Count: 14130031.5272856504, }, { - upperBound: 50, - count: 46001270.3030008227, + UpperBound: 50, + Count: 46001270.3030008227, }, { - upperBound: 64, - count: 46008473.8585563824, + UpperBound: 64, + Count: 46008473.8585563824, }, { - upperBound: 80, - count: 46008473.8585563898, + UpperBound: 80, + Count: 46008473.8585563898, }, { - upperBound: 100, - count: 46008473.8585563824, + UpperBound: 100, + Count: 46008473.8585563824, }, { - upperBound: 250, - count: 46008473.8585563824, + UpperBound: 250, + Count: 46008473.8585563824, }, { - upperBound: 1000, - count: 46008473.8585563898, + UpperBound: 1000, + Count: 46008473.8585563898, }, { - upperBound: math.Inf(1), - count: 46008473.8585563824, + UpperBound: math.Inf(1), + Count: 46008473.8585563824, }, } }, @@ -186,53 +186,53 @@ func TestBucketQuantile_ForcedMonotonicity(t *testing.T) { }, }, "real example 2 - monotonic": { - getInput: func() buckets { - return buckets{ + getInput: func() Buckets { + return Buckets{ { - upperBound: 0.005, - count: 9.6, + UpperBound: 0.005, + Count: 9.6, }, { - upperBound: 0.01, - count: 9.688888889, + UpperBound: 0.01, + Count: 9.688888889, }, { - upperBound: 0.025, - count: 9.755555556, + UpperBound: 0.025, + Count: 9.755555556, }, { - upperBound: 0.05, - count: 9.844444444, + UpperBound: 0.05, + Count: 9.844444444, }, { - upperBound: 0.1, - count: 9.888888889, + UpperBound: 0.1, + Count: 9.888888889, }, { - upperBound: 0.25, - count: 9.888888889, + UpperBound: 0.25, + Count: 9.888888889, }, { - upperBound: 0.5, - count: 9.888888889, + UpperBound: 0.5, + Count: 9.888888889, }, { - upperBound: 1, - count: 9.888888889, + UpperBound: 1, + Count: 9.888888889, }, { - upperBound: 2.5, - count: 9.888888889, + UpperBound: 2.5, + Count: 9.888888889, }, { - upperBound: 5, - count: 9.888888889, + UpperBound: 5, + Count: 9.888888889, }, { - upperBound: 10, - count: 9.888888889, + UpperBound: 10, + Count: 9.888888889, }, { - upperBound: 25, - count: 9.888888889, + UpperBound: 25, + Count: 9.888888889, }, { - upperBound: 50, - count: 9.888888889, + UpperBound: 50, + Count: 9.888888889, }, { - upperBound: 100, - count: 9.888888889, + UpperBound: 100, + Count: 9.888888889, }, { - upperBound: math.Inf(1), - count: 9.888888889, + UpperBound: math.Inf(1), + Count: 9.888888889, }, } }, @@ -246,53 +246,53 @@ func TestBucketQuantile_ForcedMonotonicity(t *testing.T) { }, }, "real example 2 - non-monotonic": { - getInput: func() buckets { - return buckets{ + getInput: func() Buckets { + return Buckets{ { - upperBound: 0.005, - count: 9.6, + UpperBound: 0.005, + Count: 9.6, }, { - upperBound: 0.01, - count: 9.688888889, + UpperBound: 0.01, + Count: 9.688888889, }, { - upperBound: 0.025, - count: 9.755555556, + UpperBound: 0.025, + Count: 9.755555556, }, { - upperBound: 0.05, - count: 9.844444444, + UpperBound: 0.05, + Count: 9.844444444, }, { - upperBound: 0.1, - count: 9.888888889, + UpperBound: 0.1, + Count: 9.888888889, }, { - upperBound: 0.25, - count: 9.888888889, + UpperBound: 0.25, + Count: 9.888888889, }, { - upperBound: 0.5, - count: 9.888888889, + UpperBound: 0.5, + Count: 9.888888889, }, { - upperBound: 1, - count: 9.888888889, + UpperBound: 1, + Count: 9.888888889, }, { - upperBound: 2.5, - count: 9.888888889, + UpperBound: 2.5, + Count: 9.888888889, }, { - upperBound: 5, - count: 9.888888889, + UpperBound: 5, + Count: 9.888888889, }, { - upperBound: 10, - count: 9.888888889001, // Simulate the case there's a small imprecision in float64. + UpperBound: 10, + Count: 9.888888889001, // Simulate the case there's a small imprecision in float64. }, { - upperBound: 25, - count: 9.888888889, + UpperBound: 25, + Count: 9.888888889, }, { - upperBound: 50, - count: 9.888888888999, // Simulate the case there's a small imprecision in float64. + UpperBound: 50, + Count: 9.888888888999, // Simulate the case there's a small imprecision in float64. }, { - upperBound: 100, - count: 9.888888889, + UpperBound: 100, + Count: 9.888888889, }, { - upperBound: math.Inf(1), - count: 9.888888889, + UpperBound: math.Inf(1), + Count: 9.888888889, }, } }, @@ -308,7 +308,7 @@ func TestBucketQuantile_ForcedMonotonicity(t *testing.T) { } { t.Run(name, func(t *testing.T) { for q, v := range tc.expectedValues { - res, forced, fixed := bucketQuantile(q, tc.getInput()) + res, forced, fixed := BucketQuantile(q, tc.getInput()) require.Equal(t, tc.expectedForced, forced) require.Equal(t, tc.expectedFixed, fixed) require.InEpsilon(t, v, res, eps) From 12577e385103d10c481f58d3ced1b9c3942e9629 Mon Sep 17 00:00:00 2001 From: Owen Williams Date: Thu, 14 Nov 2024 10:51:59 -0500 Subject: [PATCH 0985/1397] Add support for values unescaping on `/v1/label/:name/values` endpoint Signed-off-by: Owen Williams --- docs/querying/api.md | 32 +++++++++++++++++++++++++++----- web/api/v1/api.go | 4 ++++ web/api/v1/api_test.go | 30 +++++++++++++++++++++--------- 3 files changed, 52 insertions(+), 14 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index 0352496f18..87de463288 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -433,18 +433,40 @@ URL query parameters: series from which to read the label values. Optional. - `limit=`: Maximum number of returned series. Optional. 0 means disabled. - The `data` section of the JSON response is a list of string label values. -This example queries for all label values for the `job` label: +This example queries for all label values for the `http_status_code` label: ```json -$ curl http://localhost:9090/api/v1/label/job/values +$ curl http://localhost:9090/api/v1/label/http_status_code/values { "status" : "success", "data" : [ - "node", - "prometheus" + "200", + "504" + ] +} +``` + +Label names can optionally be encoded using the Values Escaping method, and is necessary if a name includes the `/` character. To encode a name in this way: + +* Prepend the label with `U__`. +* Letters, numbers, and colons appear as-is. +* Convert single underscores to double underscores. +* For all other characters, use the UTF-8 codepoint as a hex integer, surrounded + by underscores. So ` ` becomes `_20_` and a `.` becomes `_2e_`. + + More information about text escaping can be found in the original UTF-8 [Proposal document](https://github.com/prometheus/proposals/blob/main/proposals/2023-08-21-utf8.md#text-escaping). + +This example queries for all label values for the `http.status_code` label: + +```json +$ curl http://localhost:9090/api/v1/label/U__http_2e_status_code/values +{ + "status" : "success", + "data" : [ + "200", + "404" ] } ``` diff --git a/web/api/v1/api.go b/web/api/v1/api.go index c61e7984fb..633ca8393e 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -744,6 +744,10 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { ctx := r.Context() name := route.Param(ctx, "name") + if strings.HasPrefix(name, "U__") { + name = model.UnescapeName(name, model.ValueEncodingEscaping) + } + label := model.LabelName(name) if !label.IsValid() { return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("invalid label name: %q", name)}, nil, nil} diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 4cf5ec36ec..e89e284e4b 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -388,6 +388,7 @@ func TestEndpoints(t *testing.T) { test_metric4{foo="boo", dup="1"} 1+0x100 test_metric4{foo="boo"} 1+0x100 test_metric5{"host.name"="localhost"} 1+0x100 + test_metric5{"junk\n{},=: chars"="bar"} 1+0x100 `) t.Cleanup(func() { storage.Close() }) @@ -3031,6 +3032,17 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "localhost", }, }, + // Valid escaped utf8 name parameter for utf8 validation. + { + endpoint: api.labelValues, + params: map[string]string{ + "name": "U__junk_0a__7b__7d__2c__3d_:_20__20_chars", + }, + nameValidationScheme: model.UTF8Validation, + response: []string{ + "bar", + }, + }, // Start and end before LabelValues starts. { endpoint: api.labelValues, @@ -3273,7 +3285,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E // Label names. { endpoint: api.labelNames, - response: []string{"__name__", "dup", "foo", "host.name"}, + response: []string{"__name__", "dup", "foo", "host.name", "junk\n{},=: chars"}, }, // Start and end before Label names starts. { @@ -3291,7 +3303,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "start": []string{"1"}, "end": []string{"100"}, }, - response: []string{"__name__", "dup", "foo", "host.name"}, + response: []string{"__name__", "dup", "foo", "host.name", "junk\n{},=: chars"}, }, // Start before Label names, end within Label names. { @@ -3300,7 +3312,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "start": []string{"-1"}, "end": []string{"10"}, }, - response: []string{"__name__", "dup", "foo", "host.name"}, + response: []string{"__name__", "dup", "foo", "host.name", "junk\n{},=: chars"}, }, // Start before Label names starts, end after Label names ends. @@ -3310,7 +3322,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "start": []string{"-1"}, "end": []string{"100000"}, }, - response: []string{"__name__", "dup", "foo", "host.name"}, + response: []string{"__name__", "dup", "foo", "host.name", "junk\n{},=: chars"}, }, // Start with bad data for Label names, end within Label names. { @@ -3328,7 +3340,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "start": []string{"1"}, "end": []string{"1000000006"}, }, - response: []string{"__name__", "dup", "foo", "host.name"}, + response: []string{"__name__", "dup", "foo", "host.name", "junk\n{},=: chars"}, }, // Start and end after Label names ends. { @@ -3345,7 +3357,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E query: url.Values{ "start": []string{"4"}, }, - response: []string{"__name__", "dup", "foo", "host.name"}, + response: []string{"__name__", "dup", "foo", "host.name", "junk\n{},=: chars"}, }, // Only provide End within Label names, don't provide a start time. { @@ -3353,7 +3365,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E query: url.Values{ "end": []string{"20"}, }, - response: []string{"__name__", "dup", "foo", "host.name"}, + response: []string{"__name__", "dup", "foo", "host.name", "junk\n{},=: chars"}, }, // Label names with bad matchers. { @@ -3421,9 +3433,9 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E { endpoint: api.labelNames, query: url.Values{ - "limit": []string{"4"}, + "limit": []string{"5"}, }, - responseLen: 4, // API does not specify which particular values will come back. + responseLen: 5, // API does not specify which particular values will come back. warningsCount: 0, // No warnings if limit isn't exceeded. }, }...) From 36e0897f0f6cc456915860d7f3ac57fb60ecf6cb Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Thu, 28 Nov 2024 01:35:08 +0530 Subject: [PATCH 0986/1397] [BUGFIX] PromQL: Fix behaviour of `changes()` for mix of histograms and floats (#15469) PromQL: Fix behaviour of changes() for mix of histograms and floats --------- Signed-off-by: Neeraj Gartia --- promql/functions.go | 51 +++++++++++++++++++---- promql/promqltest/testdata/functions.test | 20 ++++++++- 2 files changed, 61 insertions(+), 10 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index cadac19b4b..83834afe54 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -1429,20 +1429,53 @@ func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe // === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { floats := vals[0].(Matrix)[0].Floats + histograms := vals[0].(Matrix)[0].Histograms changes := 0 - - if len(floats) == 0 { - // TODO(beorn7): Only histogram values, still need to add support. + if len(floats) == 0 && len(histograms) == 0 { return enh.Out, nil } - prev := floats[0].F - for _, sample := range floats[1:] { - current := sample.F - if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) { - changes++ + var prevSample, curSample Sample + for iFloat, iHistogram := 0, 0; iFloat < len(floats) || iHistogram < len(histograms); { + switch { + // Process a float sample if no histogram sample remains or its timestamp is earlier. + // Process a histogram sample if no float sample remains or its timestamp is earlier. + case iHistogram >= len(histograms) || iFloat < len(floats) && floats[iFloat].T < histograms[iHistogram].T: + { + curSample.F = floats[iFloat].F + curSample.H = nil + iFloat++ + } + case iFloat >= len(floats) || iHistogram < len(histograms) && floats[iFloat].T > histograms[iHistogram].T: + { + curSample.H = histograms[iHistogram].H + iHistogram++ + } } - prev = current + // Skip the comparison for the first sample, just initialize prevSample. + if iFloat+iHistogram == 1 { + prevSample = curSample + continue + } + switch { + case prevSample.H == nil && curSample.H == nil: + { + if curSample.F != prevSample.F && !(math.IsNaN(curSample.F) && math.IsNaN(prevSample.F)) { + changes++ + } + } + case prevSample.H != nil && curSample.H == nil, prevSample.H == nil && curSample.H != nil: + { + changes++ + } + case prevSample.H != nil && curSample.H != nil: + { + if !curSample.H.Equals(prevSample.H) { + changes++ + } + } + } + prevSample = curSample } return append(enh.Out, Sample{F: float64(changes)}), nil diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 1c832c89de..c8a2ea34a3 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -2,7 +2,10 @@ load 5m http_requests{path="/foo"} 1 2 3 0 1 0 0 1 2 0 http_requests{path="/bar"} 1 2 3 4 5 1 2 3 4 5 - http_requests{path="/biz"} 0 0 0 0 0 1 1 1 1 1 + http_requests{path="/biz"} 0 0 0 0 0 1 1 1 1 1 + http_requests_histogram{path="/foo"} {{schema:0 sum:1 count:1}}x9 + http_requests_histogram{path="/bar"} 0 0 0 0 0 0 0 0 {{schema:0 sum:1 count:1}} {{schema:0 sum:1 count:1}} + http_requests_histogram{path="/biz"} 0 1 0 2 0 3 0 {{schema:0 sum:1 count:1}} {{schema:0 sum:2 count:2}} {{schema:0 sum:1 count:1}} # Tests for resets(). eval instant at 50m resets(http_requests[5m]) @@ -69,6 +72,21 @@ eval instant at 50m changes((http_requests[50m])) eval instant at 50m changes(nonexistent_metric[50m]) +# Test for mix of floats and histograms. +# Because of bug #14172 we are not able to test more complex cases like below: +# 0 1 2 {{schema:0 sum:1 count:1}} 3 {{schema:0 sum:2 count:2}} 4 {{schema:0 sum:3 count:3}}. +eval instant at 50m changes(http_requests_histogram[5m]) + +eval instant at 50m changes(http_requests_histogram[6m]) + {path="/foo"} 0 + {path="/bar"} 0 + {path="/biz"} 0 + +eval instant at 50m changes(http_requests_histogram[60m]) + {path="/foo"} 0 + {path="/bar"} 1 + {path="/biz"} 9 + clear load 5m From 67d4be7a6ac83f62c684f1f42fef37a2dce9bf42 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Wed, 27 Nov 2024 21:15:04 +0100 Subject: [PATCH 0987/1397] prepare release 3.0.1 Signed-off-by: Jan Fajerski --- CHANGELOG.md | 8 ++++++++ VERSION | 2 +- web/ui/mantine-ui/package.json | 4 ++-- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/package.json | 2 +- web/ui/react-app/package-lock.json | 4 ++-- web/ui/react-app/package.json | 2 +- 9 files changed, 25 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fcfde9a988..83d6d4926c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,14 @@ ## unreleased +## 3.0.1 / 2024-11-28 + +The first bug fix release for Prometheus 3. + +* [BUGFIX] Promql: Make subqueries left open. #15431 +* [BUGFIX] Fix memory leak when query log is enabled. #15434 +* [BUGFIX] Support utf8 names on /v1/label/:name/values endpoint. #15399 + ## 3.0.0 / 2024-11-14 This release includes new features such as a brand new UI and UTF-8 support enabled by default. As this marks the first new major version in seven years, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. For users that want to upgrade we recommend to read through our [migration guide](https://prometheus.io/docs/prometheus/3.0/migration/). diff --git a/VERSION b/VERSION index 4a36342fca..cb2b00e4f7 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.0.0 +3.0.1 diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index dc32eee96d..66d8d93a17 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,7 +1,7 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "0.300.0", + "version": "0.300.1", "type": "module", "scripts": { "start": "vite", @@ -28,7 +28,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.300.0", + "@prometheus-io/codemirror-promql": "0.300.1", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.59.0", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 8fb6dc4ba7..dbb23c2522 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.300.0", + "version": "0.300.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.300.0", + "@prometheus-io/lezer-promql": "0.300.1", "lru-cache": "^11.0.1" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 6564d3fa56..d0ed463937 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.300.0", + "version": "0.300.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index f5ae7642b0..68cd3539ec 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.300.0", + "version": "0.300.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.300.0", + "version": "0.300.1", "workspaces": [ "mantine-ui", "module/*" @@ -24,7 +24,7 @@ }, "mantine-ui": { "name": "@prometheus-io/mantine-ui", - "version": "0.300.0", + "version": "0.300.1", "dependencies": { "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.2", @@ -42,7 +42,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.300.0", + "@prometheus-io/codemirror-promql": "0.300.1", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.59.0", @@ -155,10 +155,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.300.0", + "version": "0.300.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.300.0", + "@prometheus-io/lezer-promql": "0.300.1", "lru-cache": "^11.0.1" }, "devDependencies": { @@ -188,7 +188,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.300.0", + "version": "0.300.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.1", diff --git a/web/ui/package.json b/web/ui/package.json index ef5bdb81fc..37a98b0a20 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,7 +1,7 @@ { "name": "prometheus-io", "description": "Monorepo for the Prometheus UI", - "version": "0.300.0", + "version": "0.300.1", "private": true, "scripts": { "build": "bash build_ui.sh --all", diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index bdc2d72bee..67425bcc7e 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -1,12 +1,12 @@ { "name": "@prometheus-io/app", - "version": "0.300.0", + "version": "0.300.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@prometheus-io/app", - "version": "0.300.0", + "version": "0.300.1", "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 30db488ccc..4ea2734934 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.300.0", + "version": "0.300.1", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.17.0", From 0de340c1b4527da0d1c71ec271b4fbc983d2fafa Mon Sep 17 00:00:00 2001 From: Neeraj Gartia Date: Thu, 28 Nov 2024 01:57:04 +0530 Subject: [PATCH 0988/1397] ignore histogram in dateWrapper Signed-off-by: Neeraj Gartia --- promql/functions.go | 4 ++++ promql/promqltest/testdata/functions.test | 20 ++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/promql/functions.go b/promql/functions.go index 4be743040d..3fdb81e811 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -1553,6 +1553,10 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo } for _, el := range vals[0].(Vector) { + if el.H != nil { + // Ignore histogram sample. + continue + } t := time.Unix(int64(el.F), 0).UTC() if !enh.enableDelayedNameRemoval { el.Metric = el.Metric.DropMetricName() diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index e4caf0e363..e97ef9a174 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -901,6 +901,9 @@ eval_warn instant at 1m (quantile_over_time(2, (data[2m]))) clear # Test time-related functions. +load 5m + histogram_sample {{schema:0 sum:1 count:1}} + eval instant at 0m year() {} 1970 @@ -982,6 +985,23 @@ eval instant at 0m days_in_month(vector(1454284800)) eval instant at 0m days_in_month(vector(1485907200)) {} 28 +# Test for histograms. +eval instant at 0m day_of_month(histogram_sample) + +eval instant at 0m day_of_week(histogram_sample) + +eval instant at 0m day_of_year(histogram_sample) + +eval instant at 0m days_in_month(histogram_sample) + +eval instant at 0m hour(histogram_sample) + +eval instant at 0m minute(histogram_sample) + +eval instant at 0m month(histogram_sample) + +eval instant at 0m year(histogram_sample) + clear # Test duplicate labelset in promql output. From e0104a6b7e8a7250c8b96a7026a633b650013f36 Mon Sep 17 00:00:00 2001 From: TJ Hoplock Date: Sat, 23 Nov 2024 23:30:23 -0500 Subject: [PATCH 0989/1397] ref: JSONFileLogger slog handler, add scrape.FailureLogger interface Improves upon #15434, better resolves #15433. This commit introduces a few changes to ensure safer handling of the JSONFileLogger: - the JSONFileLogger struct now implements the slog.Handler interface, so it can directly be used to create slog Loggers. This pattern more closely aligns with upstream slog usage (which is generally based around handlers), as well as making it clear that devs are creating a whole new logger when interacting with it (vs silently modifying internal configs like it did previously). - updates the `promql.QueryLogger` interface to be a union of the methods of both the `io.Closer` interface and the `slog.Handler` interface. This allows for plugging in/using slog-compatible loggers other than the JSONFileLogger, if desired (ie, for downstream projects). - introduces new `scrape.FailureLogger` interface; just like `promql.QueryLogger`, it is a union of `io.Closer` and `slog.Handler` interfaces. Similar logic applies to reasoning. - updates tests where needed; have the `FakeQueryLogger` from promql's engine_test implement the `slog.Handler`, improve JSONFileLogger test suite, etc. Signed-off-by: TJ Hoplock --- promql/engine.go | 15 ++++++--- promql/engine_test.go | 44 ++++++++++++++++++++------ scrape/manager.go | 6 ++-- scrape/scrape.go | 27 ++++++++++------ scrape/scrape_test.go | 3 +- util/logging/file.go | 57 +++++++++++++++++++++++++--------- util/logging/file_test.go | 65 +++++++++++++++++++++++---------------- 7 files changed, 148 insertions(+), 69 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 13f8b06972..2ef0fabe7c 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -47,6 +47,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/zeropool" ) @@ -123,12 +124,13 @@ type QueryEngine interface { NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) } +var _ QueryLogger = (*logging.JSONFileLogger)(nil) + // QueryLogger is an interface that can be used to log all the queries logged // by the engine. type QueryLogger interface { - Log(context.Context, slog.Level, string, ...any) - With(args ...any) - Close() error + slog.Handler + io.Closer } // A Query is derived from an a raw query string and can be run against an engine @@ -626,6 +628,9 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota defer func() { ng.queryLoggerLock.RLock() if l := ng.queryLogger; l != nil { + logger := slog.New(l) + f := make([]any, 0, 16) // Probably enough up front to not need to reallocate on append. + params := make(map[string]interface{}, 4) params["query"] = q.q if eq, ok := q.Statement().(*parser.EvalStmt); ok { @@ -634,7 +639,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota // The step provided by the user is in seconds. params["step"] = int64(eq.Interval / (time.Second / time.Nanosecond)) } - f := []interface{}{"params", params} + f = append(f, "params", params) if err != nil { f = append(f, "error", err) } @@ -647,7 +652,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota f = append(f, k, v) } } - l.Log(context.Background(), slog.LevelInfo, "promql query logged", f...) + logger.Info("promql query logged", f...) // TODO: @tjhop -- do we still need this metric/error log if logger doesn't return errors? // ng.metrics.queryLogFailures.Inc() // ng.logger.Error("can't log query", "err", err) diff --git a/promql/engine_test.go b/promql/engine_test.go index 8b8dc3909c..b8e7cd71c3 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "io" "log/slog" "math" "sort" @@ -2147,9 +2148,13 @@ func TestSubquerySelector(t *testing.T) { } } +var _ io.Closer = (*FakeQueryLogger)(nil) + +var _ slog.Handler = (*FakeQueryLogger)(nil) + type FakeQueryLogger struct { closed bool - logs []interface{} + logs []any attrs []any } @@ -2161,26 +2166,47 @@ func NewFakeQueryLogger() *FakeQueryLogger { } } -// It implements the promql.QueryLogger interface. func (f *FakeQueryLogger) Close() error { f.closed = true return nil } -// It implements the promql.QueryLogger interface. -func (f *FakeQueryLogger) Log(ctx context.Context, level slog.Level, msg string, args ...any) { +func (f *FakeQueryLogger) Enabled(ctx context.Context, level slog.Level) bool { + return true +} + +func (f *FakeQueryLogger) Handle(ctx context.Context, r slog.Record) error { // Test usage only really cares about existence of keyvals passed in // via args, just append in the log message before handling the // provided args and any embedded kvs added via `.With()` on f.attrs. - log := append([]any{msg}, args...) - log = append(log, f.attrs...) + log := append([]any{r.Message}, f.attrs...) f.attrs = f.attrs[:0] + + r.Attrs(func(a slog.Attr) bool { + log = append(log, a.Key, a.Value.Any()) + return true + }) + f.logs = append(f.logs, log...) + return nil } -// It implements the promql.QueryLogger interface. -func (f *FakeQueryLogger) With(args ...any) { - f.attrs = append(f.attrs, args...) +func (f *FakeQueryLogger) WithAttrs(attrs []slog.Attr) slog.Handler { + attrPairs := []any{} + for _, a := range attrs { + attrPairs = append(attrPairs, a.Key, a.Value.Any()) + } + + return &FakeQueryLogger{ + closed: f.closed, + logs: f.logs, + attrs: append(f.attrs, attrPairs...), + } +} + +func (f *FakeQueryLogger) WithGroup(name string) slog.Handler { + // We don't need to support groups for this fake handler in testing. + return f } func TestQueryLogger_basic(t *testing.T) { diff --git a/scrape/manager.go b/scrape/manager.go index 04da3162e6..5ef5dccb99 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -107,7 +107,7 @@ type Manager struct { scrapeConfigs map[string]*config.ScrapeConfig scrapePools map[string]*scrapePool newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error) - scrapeFailureLoggers map[string]*logging.JSONFileLogger + scrapeFailureLoggers map[string]FailureLogger targetSets map[string][]*targetgroup.Group buffers *pool.Pool @@ -249,7 +249,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { } c := make(map[string]*config.ScrapeConfig) - scrapeFailureLoggers := map[string]*logging.JSONFileLogger{ + scrapeFailureLoggers := map[string]FailureLogger{ "": nil, // Emptying the file name sets the scrape logger to nil. } for _, scfg := range scfgs { @@ -257,7 +257,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { if _, ok := scrapeFailureLoggers[scfg.ScrapeFailureLogFile]; !ok { // We promise to reopen the file on each reload. var ( - logger *logging.JSONFileLogger + logger FailureLogger err error ) if m.newScrapeFailureLogger != nil { diff --git a/scrape/scrape.go b/scrape/scrape.go index eeab208d65..f445e8d2df 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -61,6 +61,15 @@ var AlignScrapeTimestamps = true var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels.MetricName) +var _ FailureLogger = (*logging.JSONFileLogger)(nil) + +// FailureLogger is an interface that can be used to log all failed +// scrapes. +type FailureLogger interface { + slog.Handler + io.Closer +} + // scrapePool manages scrapes for sets of targets. type scrapePool struct { appendable storage.Appendable @@ -90,7 +99,7 @@ type scrapePool struct { metrics *scrapeMetrics - scrapeFailureLogger *logging.JSONFileLogger + scrapeFailureLogger FailureLogger scrapeFailureLoggerMtx sync.RWMutex } @@ -223,11 +232,11 @@ func (sp *scrapePool) DroppedTargetsCount() int { return sp.droppedTargetsCount } -func (sp *scrapePool) SetScrapeFailureLogger(l *logging.JSONFileLogger) { +func (sp *scrapePool) SetScrapeFailureLogger(l FailureLogger) { sp.scrapeFailureLoggerMtx.Lock() defer sp.scrapeFailureLoggerMtx.Unlock() if l != nil { - l.With("job_name", sp.config.JobName) + l = slog.New(l).With("job_name", sp.config.JobName).Handler().(FailureLogger) } sp.scrapeFailureLogger = l @@ -238,7 +247,7 @@ func (sp *scrapePool) SetScrapeFailureLogger(l *logging.JSONFileLogger) { } } -func (sp *scrapePool) getScrapeFailureLogger() *logging.JSONFileLogger { +func (sp *scrapePool) getScrapeFailureLogger() FailureLogger { sp.scrapeFailureLoggerMtx.RLock() defer sp.scrapeFailureLoggerMtx.RUnlock() return sp.scrapeFailureLogger @@ -860,7 +869,7 @@ func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w type loop interface { run(errc chan<- error) setForcedError(err error) - setScrapeFailureLogger(*logging.JSONFileLogger) + setScrapeFailureLogger(FailureLogger) stop() getCache() *scrapeCache disableEndOfRunStalenessMarkers() @@ -876,7 +885,7 @@ type cacheEntry struct { type scrapeLoop struct { scraper scraper l *slog.Logger - scrapeFailureLogger *logging.JSONFileLogger + scrapeFailureLogger FailureLogger scrapeFailureLoggerMtx sync.RWMutex cache *scrapeCache lastScrapeSize int @@ -1267,11 +1276,11 @@ func newScrapeLoop(ctx context.Context, return sl } -func (sl *scrapeLoop) setScrapeFailureLogger(l *logging.JSONFileLogger) { +func (sl *scrapeLoop) setScrapeFailureLogger(l FailureLogger) { sl.scrapeFailureLoggerMtx.Lock() defer sl.scrapeFailureLoggerMtx.Unlock() if ts, ok := sl.scraper.(fmt.Stringer); ok && l != nil { - l.With("target", ts.String()) + l = slog.New(l).With("target", ts.String()).Handler().(FailureLogger) } sl.scrapeFailureLogger = l } @@ -1421,7 +1430,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er sl.l.Debug("Scrape failed", "err", scrapeErr) sl.scrapeFailureLoggerMtx.RLock() if sl.scrapeFailureLogger != nil { - sl.scrapeFailureLogger.Log(context.Background(), slog.LevelError, scrapeErr.Error()) + slog.New(sl.scrapeFailureLogger).Error(scrapeErr.Error()) } sl.scrapeFailureLoggerMtx.RUnlock() if errc != nil { diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index ce1686feca..571b27086c 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -55,7 +55,6 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/pool" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" @@ -252,7 +251,7 @@ type testLoop struct { timeout time.Duration } -func (l *testLoop) setScrapeFailureLogger(*logging.JSONFileLogger) { +func (l *testLoop) setScrapeFailureLogger(FailureLogger) { } func (l *testLoop) run(errc chan<- error) { diff --git a/util/logging/file.go b/util/logging/file.go index 9db7fb722b..27fdec2758 100644 --- a/util/logging/file.go +++ b/util/logging/file.go @@ -16,16 +16,22 @@ package logging import ( "context" "fmt" + "io" "log/slog" "os" "github.com/prometheus/common/promslog" ) -// JSONFileLogger represents a logger that writes JSON to a file. It implements the promql.QueryLogger interface. +var _ slog.Handler = (*JSONFileLogger)(nil) + +var _ io.Closer = (*JSONFileLogger)(nil) + +// JSONFileLogger represents a logger that writes JSON to a file. It implements +// the slog.Handler interface, as well as the io.Closer interface. type JSONFileLogger struct { - logger *slog.Logger - file *os.File + handler slog.Handler + file *os.File } // NewJSONFileLogger returns a new JSONFileLogger. @@ -42,24 +48,47 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) { jsonFmt := &promslog.AllowedFormat{} _ = jsonFmt.Set("json") return &JSONFileLogger{ - logger: promslog.New(&promslog.Config{Format: jsonFmt, Writer: f}), - file: f, + handler: promslog.New(&promslog.Config{Format: jsonFmt, Writer: f}).Handler(), + file: f, }, nil } -// Close closes the underlying file. It implements the promql.QueryLogger interface. +// Close closes the underlying file. It implements the io.Closer interface. func (l *JSONFileLogger) Close() error { return l.file.Close() } -// With calls the `With()` method on the underlying `log/slog.Logger` with the -// provided msg and args. It implements the promql.QueryLogger interface. -func (l *JSONFileLogger) With(args ...any) { - l.logger = l.logger.With(args...) +// Enabled returns true if and only if the internal slog.Handler is enabled. It +// implements the slog.Handler interface. +func (l *JSONFileLogger) Enabled(ctx context.Context, level slog.Level) bool { + return l.handler.Enabled(ctx, level) } -// Log calls the `Log()` method on the underlying `log/slog.Logger` with the -// provided msg and args. It implements the promql.QueryLogger interface. -func (l *JSONFileLogger) Log(ctx context.Context, level slog.Level, msg string, args ...any) { - l.logger.Log(ctx, level, msg, args...) +// Handle takes record created by an slog.Logger and forwards it to the +// internal slog.Handler for dispatching the log call to the backing file. It +// implements the slog.Handler interface. +func (l *JSONFileLogger) Handle(ctx context.Context, r slog.Record) error { + return l.handler.Handle(ctx, r.Clone()) +} + +// WithAttrs returns a new *JSONFileLogger with a new internal handler that has +// the provided attrs attached as attributes on all further log calls. It +// implements the slog.Handler interface. +func (l *JSONFileLogger) WithAttrs(attrs []slog.Attr) slog.Handler { + if len(attrs) == 0 { + return l + } + + return &JSONFileLogger{file: l.file, handler: l.handler.WithAttrs(attrs)} +} + +// WithGroup returns a new *JSONFileLogger with a new internal handler that has +// the provided group name attached, to group all other attributes added to the +// logger. It implements the slog.Handler interface. +func (l *JSONFileLogger) WithGroup(name string) slog.Handler { + if name == "" { + return l + } + + return &JSONFileLogger{file: l.file, handler: l.handler.WithGroup(name)} } diff --git a/util/logging/file_test.go b/util/logging/file_test.go index c9f7240fee..bd34bc2a3a 100644 --- a/util/logging/file_test.go +++ b/util/logging/file_test.go @@ -14,7 +14,6 @@ package logging import ( - "context" "log/slog" "os" "strings" @@ -24,6 +23,19 @@ import ( "github.com/stretchr/testify/require" ) +func getLogLines(t *testing.T, name string) []string { + content, err := os.ReadFile(name) + require.NoError(t, err) + + lines := strings.Split(string(content), "\n") + for i := len(lines) - 1; i >= 0; i-- { + if lines[i] == "" { + lines = append(lines[:i], lines[i+1:]...) + } + } + return lines +} + func TestJSONFileLogger_basic(t *testing.T) { f, err := os.CreateTemp("", "logging") require.NoError(t, err) @@ -32,24 +44,23 @@ func TestJSONFileLogger_basic(t *testing.T) { require.NoError(t, os.Remove(f.Name())) }() - l, err := NewJSONFileLogger(f.Name()) + logHandler, err := NewJSONFileLogger(f.Name()) require.NoError(t, err) - require.NotNil(t, l, "logger can't be nil") + require.NotNil(t, logHandler, "logger handler can't be nil") - l.Log(context.Background(), slog.LevelInfo, "test", "hello", "world") - require.NoError(t, err) - r := make([]byte, 1024) - _, err = f.Read(r) - require.NoError(t, err) + logger := slog.New(logHandler) + logger.Info("test", "hello", "world") - result, err := regexp.Match(`^{"time":"[^"]+","level":"INFO","source":"file.go:\d+","msg":"test","hello":"world"}\n`, r) + r := getLogLines(t, f.Name()) + require.Len(t, r, 1, "expected 1 log line") + result, err := regexp.Match(`^{"time":"[^"]+","level":"INFO","source":"\w+.go:\d+","msg":"test","hello":"world"}`, []byte(r[0])) require.NoError(t, err) require.True(t, result, "unexpected content: %s", r) - err = l.Close() + err = logHandler.Close() require.NoError(t, err) - err = l.file.Close() + err = logHandler.file.Close() require.Error(t, err) require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed") } @@ -62,31 +73,31 @@ func TestJSONFileLogger_parallel(t *testing.T) { require.NoError(t, os.Remove(f.Name())) }() - l, err := NewJSONFileLogger(f.Name()) + logHandler, err := NewJSONFileLogger(f.Name()) require.NoError(t, err) - require.NotNil(t, l, "logger can't be nil") + require.NotNil(t, logHandler, "logger handler can't be nil") - l.Log(context.Background(), slog.LevelInfo, "test", "hello", "world") + logger := slog.New(logHandler) + logger.Info("test", "hello", "world") + + logHandler2, err := NewJSONFileLogger(f.Name()) + require.NoError(t, err) + require.NotNil(t, logHandler2, "logger handler can't be nil") + + logger2 := slog.New(logHandler2) + logger2.Info("test", "hello", "world") + + err = logHandler.Close() require.NoError(t, err) - l2, err := NewJSONFileLogger(f.Name()) - require.NoError(t, err) - require.NotNil(t, l, "logger can't be nil") - - l2.Log(context.Background(), slog.LevelInfo, "test", "hello", "world") - require.NoError(t, err) - - err = l.Close() - require.NoError(t, err) - - err = l.file.Close() + err = logHandler.file.Close() require.Error(t, err) require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed") - err = l2.Close() + err = logHandler2.Close() require.NoError(t, err) - err = l2.file.Close() + err = logHandler2.file.Close() require.Error(t, err) require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed") } From 96adc410ba13b05e361172926235264f3fa54a66 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Fri, 29 Nov 2024 16:26:34 +1100 Subject: [PATCH 0990/1397] tsdb/chunkenc: don't reuse custom value slices between histograms Signed-off-by: Charles Korn --- tsdb/chunkenc/float_histogram.go | 10 ++- tsdb/chunkenc/float_histogram_test.go | 46 +++++++++++++ tsdb/chunkenc/histogram.go | 12 +++- tsdb/chunkenc/histogram_test.go | 94 ++++++++++++++++++++++++++- 4 files changed, 158 insertions(+), 4 deletions(-) diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index 1c7e2c3ace..0da00dcda4 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -976,6 +976,7 @@ func (it *floatHistogramIterator) Reset(b []byte) { it.atFloatHistogramCalled = false it.pBuckets, it.nBuckets = nil, nil it.pSpans, it.nSpans = nil, nil + it.customValues = nil } else { it.pBuckets, it.nBuckets = it.pBuckets[:0], it.nBuckets[:0] } @@ -1071,7 +1072,7 @@ func (it *floatHistogramIterator) Next() ValueType { // The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code, // so we don't need a separate single delta logic for the 2nd sample. - // Recycle bucket and span slices that have not been returned yet. Otherwise, copy them. + // Recycle bucket, span and custom value slices that have not been returned yet. Otherwise, copy them. // We can always recycle the slices for leading and trailing bits as they are // never returned to the caller. if it.atFloatHistogramCalled { @@ -1104,6 +1105,13 @@ func (it *floatHistogramIterator) Next() ValueType { } else { it.nSpans = nil } + if len(it.customValues) > 0 { + newCustomValues := make([]float64, len(it.customValues)) + copy(newCustomValues, it.customValues) + it.customValues = newCustomValues + } else { + it.customValues = nil + } } tDod, err := readVarbitInt(&it.br) diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go index 1416b8d085..e99fd1c160 100644 --- a/tsdb/chunkenc/float_histogram_test.go +++ b/tsdb/chunkenc/float_histogram_test.go @@ -1368,6 +1368,52 @@ func TestFloatHistogramUniqueSpansAfterNext(t *testing.T) { require.NotSame(t, &rh1.NegativeSpans[0], &rh2.NegativeSpans[0], "NegativeSpans should be unique between histograms") } +func TestFloatHistogramUniqueCustomValuesAfterNext(t *testing.T) { + // Create two histograms with the same schema and custom values. + h1 := &histogram.FloatHistogram{ + Schema: -53, + Count: 10, + Sum: 15.0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2, 3, 4}, + CustomValues: []float64{10, 11, 12, 13}, + } + + h2 := h1.Copy() + + // Create a chunk and append both histograms. + c := NewFloatHistogramChunk() + app, err := c.Appender() + require.NoError(t, err) + + _, _, _, err = app.AppendFloatHistogram(nil, 0, h1, false) + require.NoError(t, err) + + _, _, _, err = app.AppendFloatHistogram(nil, 1, h2, false) + require.NoError(t, err) + + // Create an iterator and advance to the first histogram. + it := c.Iterator(nil) + require.Equal(t, ValFloatHistogram, it.Next()) + _, rh1 := it.AtFloatHistogram(nil) + + // Advance to the second histogram and retrieve it. + require.Equal(t, ValFloatHistogram, it.Next()) + _, rh2 := it.AtFloatHistogram(nil) + + require.Equal(t, rh1.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh1.CustomValues, h1.CustomValues, "Returned custom values are as expected") + require.Equal(t, rh2.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh2.CustomValues, h1.CustomValues, "Returned custom values are as expected") + + // Check that the spans and custom values for h1 and h2 are unique slices. + require.NotSame(t, &rh1.PositiveSpans[0], &rh2.PositiveSpans[0], "PositiveSpans should be unique between histograms") + require.NotSame(t, &rh1.CustomValues[0], &rh2.CustomValues[0], "CustomValues should be unique between histograms") +} + func assertFirstFloatHistogramSampleHint(t *testing.T, chunk Chunk, expected histogram.CounterResetHint) { it := chunk.Iterator(nil) require.Equal(t, ValFloatHistogram, it.Next()) diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index bb747e135f..d2eec6b75a 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -1075,6 +1075,7 @@ func (it *histogramIterator) Reset(b []byte) { it.atHistogramCalled = false it.pBuckets, it.nBuckets = nil, nil it.pSpans, it.nSpans = nil, nil + it.customValues = nil } else { it.pBuckets = it.pBuckets[:0] it.nBuckets = it.nBuckets[:0] @@ -1082,6 +1083,7 @@ func (it *histogramIterator) Reset(b []byte) { if it.atFloatHistogramCalled { it.atFloatHistogramCalled = false it.pFloatBuckets, it.nFloatBuckets = nil, nil + it.customValues = nil } else { it.pFloatBuckets = it.pFloatBuckets[:0] it.nFloatBuckets = it.nFloatBuckets[:0] @@ -1187,8 +1189,7 @@ func (it *histogramIterator) Next() ValueType { // The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code, // so we don't need a separate single delta logic for the 2nd sample. - // Recycle bucket and span slices that have not been returned yet. Otherwise, copy them. - // copy them. + // Recycle bucket, span and custom value slices that have not been returned yet. Otherwise, copy them. if it.atFloatHistogramCalled || it.atHistogramCalled { if len(it.pSpans) > 0 { newSpans := make([]histogram.Span, len(it.pSpans)) @@ -1204,6 +1205,13 @@ func (it *histogramIterator) Next() ValueType { } else { it.nSpans = nil } + if len(it.customValues) > 0 { + newCustomValues := make([]float64, len(it.customValues)) + copy(newCustomValues, it.customValues) + it.customValues = newCustomValues + } else { + it.customValues = nil + } } if it.atHistogramCalled { diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index 9ce2bf4e60..7e8807963b 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -1497,7 +1497,7 @@ func TestHistogramAppendOnlyErrors(t *testing.T) { }) } -func TestHistogramUniqueSpansAfterNext(t *testing.T) { +func TestHistogramUniqueSpansAfterNextWithAtHistogram(t *testing.T) { // Create two histograms with the same schema and spans. h1 := &histogram.Histogram{ Schema: 1, @@ -1599,6 +1599,98 @@ func TestHistogramUniqueSpansAfterNextWithAtFloatHistogram(t *testing.T) { require.NotSame(t, &rh1.NegativeSpans[0], &rh2.NegativeSpans[0], "NegativeSpans should be unique between histograms") } +func TestHistogramUniqueCustomValuesAfterNextWithAtHistogram(t *testing.T) { + // Create two histograms with the same schema and custom values. + h1 := &histogram.Histogram{ + Schema: -53, + Count: 10, + Sum: 15.0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 2, 3, 4}, + CustomValues: []float64{10, 11, 12, 13}, + } + + h2 := h1.Copy() + + // Create a chunk and append both histograms. + c := NewHistogramChunk() + app, err := c.Appender() + require.NoError(t, err) + + _, _, _, err = app.AppendHistogram(nil, 0, h1, false) + require.NoError(t, err) + + _, _, _, err = app.AppendHistogram(nil, 1, h2, false) + require.NoError(t, err) + + // Create an iterator and advance to the first histogram. + it := c.Iterator(nil) + require.Equal(t, ValHistogram, it.Next()) + _, rh1 := it.AtHistogram(nil) + + // Advance to the second histogram and retrieve it. + require.Equal(t, ValHistogram, it.Next()) + _, rh2 := it.AtHistogram(nil) + + require.Equal(t, rh1.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh1.CustomValues, h1.CustomValues, "Returned custom values are as expected") + require.Equal(t, rh2.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh2.CustomValues, h1.CustomValues, "Returned custom values are as expected") + + // Check that the spans and custom values for h1 and h2 are unique slices. + require.NotSame(t, &rh1.PositiveSpans[0], &rh2.PositiveSpans[0], "PositiveSpans should be unique between histograms") + require.NotSame(t, &rh1.CustomValues[0], &rh2.CustomValues[0], "CustomValues should be unique between histograms") +} + +func TestHistogramUniqueCustomValuesAfterNextWithAtFloatHistogram(t *testing.T) { + // Create two histograms with the same schema and custom values. + h1 := &histogram.Histogram{ + Schema: -53, + Count: 10, + Sum: 15.0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 2, 3, 4}, + CustomValues: []float64{10, 11, 12, 13}, + } + + h2 := h1.Copy() + + // Create a chunk and append both histograms. + c := NewHistogramChunk() + app, err := c.Appender() + require.NoError(t, err) + + _, _, _, err = app.AppendHistogram(nil, 0, h1, false) + require.NoError(t, err) + + _, _, _, err = app.AppendHistogram(nil, 1, h2, false) + require.NoError(t, err) + + // Create an iterator and advance to the first histogram. + it := c.Iterator(nil) + require.Equal(t, ValHistogram, it.Next()) + _, rh1 := it.AtFloatHistogram(nil) + + // Advance to the second histogram and retrieve it. + require.Equal(t, ValHistogram, it.Next()) + _, rh2 := it.AtFloatHistogram(nil) + + require.Equal(t, rh1.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh1.CustomValues, h1.CustomValues, "Returned custom values are as expected") + require.Equal(t, rh2.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh2.CustomValues, h1.CustomValues, "Returned custom values are as expected") + + // Check that the spans and custom values for h1 and h2 are unique slices. + require.NotSame(t, &rh1.PositiveSpans[0], &rh2.PositiveSpans[0], "PositiveSpans should be unique between histograms") + require.NotSame(t, &rh1.CustomValues[0], &rh2.CustomValues[0], "CustomValues should be unique between histograms") +} + func BenchmarkAppendable(b *testing.B) { // Create a histogram with a bunch of spans and buckets. const ( From cd1f8ac129a289be8e7d98b6de57a9ba5814c406 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Fri, 29 Nov 2024 12:52:56 +0100 Subject: [PATCH 0991/1397] MemPostings: keep a map of label values slices (#15426) While investigating lock contention on `MemPostings`, we saw that lots of locking is happening in `LabelValues` and `PostingsForLabelsMatching`, both copying the label values slices while holding the mutex. This adds an extra map that holds an append-only label values slice for each one of the label names. Since the slice is append-only, it can be copied without holding the mutex. Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 169 +++++++++++++++++++++++++---------------- 1 file changed, 104 insertions(+), 65 deletions(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index a0e7d035ad..ea32ba5632 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -18,6 +18,7 @@ import ( "context" "encoding/binary" "fmt" + "maps" "math" "runtime" "slices" @@ -32,6 +33,8 @@ import ( "github.com/prometheus/prometheus/storage" ) +const exponentialSliceGrowthFactor = 2 + var allPostingsKey = labels.Label{} // AllPostingsKey returns the label key that is used to store the postings list of all existing IDs. @@ -55,15 +58,33 @@ var ensureOrderBatchPool = sync.Pool{ // EnsureOrder() must be called once before any reads are done. This allows for quick // unordered batch fills on startup. type MemPostings struct { - mtx sync.RWMutex - m map[string]map[string][]storage.SeriesRef + mtx sync.RWMutex + + // m holds the postings lists for each label-value pair, indexed first by label name, and then by label value. + // + // mtx must be held when interacting with m (the appropriate one for reading or writing). + // It is safe to retain a reference to a postings list after releasing the lock. + // + // BUG: There's currently a data race in addFor, which might modify the tail of the postings list: + // https://github.com/prometheus/prometheus/issues/15317 + m map[string]map[string][]storage.SeriesRef + + // lvs holds the label values for each label name. + // lvs[name] is essentially an unsorted append-only list of all keys in m[name] + // mtx must be held when interacting with lvs. + // Since it's append-only, it is safe to the label values slice after releasing the lock. + lvs map[string][]string + ordered bool } +const defaultLabelNamesMapSize = 512 + // NewMemPostings returns a memPostings that's ready for reads and writes. func NewMemPostings() *MemPostings { return &MemPostings{ - m: make(map[string]map[string][]storage.SeriesRef, 512), + m: make(map[string]map[string][]storage.SeriesRef, defaultLabelNamesMapSize), + lvs: make(map[string][]string, defaultLabelNamesMapSize), ordered: true, } } @@ -72,7 +93,8 @@ func NewMemPostings() *MemPostings { // until EnsureOrder() was called once. func NewUnorderedMemPostings() *MemPostings { return &MemPostings{ - m: make(map[string]map[string][]storage.SeriesRef, 512), + m: make(map[string]map[string][]storage.SeriesRef, defaultLabelNamesMapSize), + lvs: make(map[string][]string, defaultLabelNamesMapSize), ordered: false, } } @@ -80,16 +102,19 @@ func NewUnorderedMemPostings() *MemPostings { // Symbols returns an iterator over all unique name and value strings, in order. func (p *MemPostings) Symbols() StringIter { p.mtx.RLock() + // Make a quick clone of the map to avoid holding the lock while iterating. + // It's safe to use the values of the map after releasing the lock, as they're append-only slices. + lvs := maps.Clone(p.lvs) + p.mtx.RUnlock() // Add all the strings to a map to de-duplicate. - symbols := make(map[string]struct{}, 512) - for n, e := range p.m { + symbols := make(map[string]struct{}, defaultLabelNamesMapSize) + for n, labelValues := range lvs { symbols[n] = struct{}{} - for v := range e { + for _, v := range labelValues { symbols[v] = struct{}{} } } - p.mtx.RUnlock() res := make([]string, 0, len(symbols)) for k := range symbols { @@ -145,13 +170,14 @@ func (p *MemPostings) LabelNames() []string { // LabelValues returns label values for the given name. func (p *MemPostings) LabelValues(_ context.Context, name string) []string { p.mtx.RLock() - defer p.mtx.RUnlock() + values := p.lvs[name] + p.mtx.RUnlock() - values := make([]string, 0, len(p.m[name])) - for v := range p.m[name] { - values = append(values, v) - } - return values + // The slice from p.lvs[name] is shared between all readers, and it is append-only. + // Since it's shared, we need to make a copy of it before returning it to make + // sure that no caller modifies the original one by sorting it or filtering it. + // Since it's append-only, we can do this while not holding the mutex anymore. + return slices.Clone(values) } // PostingsStats contains cardinality based statistics for postings. @@ -294,6 +320,7 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma p.mtx.Lock() defer p.mtx.Unlock() + affectedLabelNames := map[string]struct{}{} process := func(l labels.Label) { orig := p.m[l.Name][l.Value] repl := make([]storage.SeriesRef, 0, len(orig)) @@ -306,10 +333,7 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma p.m[l.Name][l.Value] = repl } else { delete(p.m[l.Name], l.Value) - // Delete the key if we removed all values. - if len(p.m[l.Name]) == 0 { - delete(p.m, l.Name) - } + affectedLabelNames[l.Name] = struct{}{} } } @@ -323,22 +347,52 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma // Note that a read query will most likely want to read multiple postings lists, say 5, 10 or 20 (depending on the number of matchers) // And that read query will most likely evaluate only one of those matchers before we unpause here, so we want to pause often. if i%512 == 0 { - p.mtx.Unlock() - // While it's tempting to just do a `time.Sleep(time.Millisecond)` here, - // it wouldn't ensure use that readers actually were able to get the read lock, - // because if there are writes waiting on same mutex, readers won't be able to get it. - // So we just grab one RLock ourselves. - p.mtx.RLock() - // We shouldn't wait here, because we would be blocking a potential write for no reason. - // Note that if there's a writer waiting for us to unlock, no reader will be able to get the read lock. - p.mtx.RUnlock() //nolint:staticcheck // SA2001: this is an intentionally empty critical section. - // Now we can wait a little bit just to increase the chance of a reader getting the lock. - // If we were deleting 100M series here, pausing every 512 with 1ms sleeps would be an extra of 200s, which is negligible. - time.Sleep(time.Millisecond) - p.mtx.Lock() + p.unlockWaitAndLockAgain() } } process(allPostingsKey) + + // Now we need to update the label values slices. + i = 0 + for name := range affectedLabelNames { + i++ + // From time to time we want some readers to go through and read their postings. + if i%512 == 0 { + p.unlockWaitAndLockAgain() + } + + if len(p.m[name]) == 0 { + // Delete the label name key if we deleted all values. + delete(p.m, name) + delete(p.lvs, name) + continue + } + + // Create the new slice with enough room to grow without reallocating. + // We have deleted values here, so there's definitely some churn, so be prepared for it. + lvs := make([]string, 0, exponentialSliceGrowthFactor*len(p.m[name])) + for v := range p.m[name] { + lvs = append(lvs, v) + } + p.lvs[name] = lvs + } +} + +// unlockWaitAndLockAgain will unlock an already locked p.mtx.Lock() and then wait a little bit before locking it again, +// letting the RLock()-waiting goroutines to get the lock. +func (p *MemPostings) unlockWaitAndLockAgain() { + p.mtx.Unlock() + // While it's tempting to just do a `time.Sleep(time.Millisecond)` here, + // it wouldn't ensure use that readers actually were able to get the read lock, + // because if there are writes waiting on same mutex, readers won't be able to get it. + // So we just grab one RLock ourselves. + p.mtx.RLock() + // We shouldn't wait here, because we would be blocking a potential write for no reason. + // Note that if there's a writer waiting for us to unlock, no reader will be able to get the read lock. + p.mtx.RUnlock() //nolint:staticcheck // SA2001: this is an intentionally empty critical section. + // Now we can wait a little bit just to increase the chance of a reader getting the lock. + time.Sleep(time.Millisecond) + p.mtx.Lock() } // Iter calls f for each postings list. It aborts if f returns an error and returns it. @@ -370,7 +424,7 @@ func (p *MemPostings) Add(id storage.SeriesRef, lset labels.Labels) { func appendWithExponentialGrowth[T any](a []T, v T) []T { if cap(a) < len(a)+1 { - newList := make([]T, len(a), len(a)*2+1) + newList := make([]T, len(a), len(a)*exponentialSliceGrowthFactor+1) copy(newList, a) a = newList } @@ -383,7 +437,11 @@ func (p *MemPostings) addFor(id storage.SeriesRef, l labels.Label) { nm = map[string][]storage.SeriesRef{} p.m[l.Name] = nm } - list := appendWithExponentialGrowth(nm[l.Value], id) + vm, ok := nm[l.Value] + if !ok { + p.lvs[l.Name] = appendWithExponentialGrowth(p.lvs[l.Name], l.Value) + } + list := appendWithExponentialGrowth(vm, id) nm[l.Value] = list if !p.ordered { @@ -402,25 +460,27 @@ func (p *MemPostings) addFor(id storage.SeriesRef, l labels.Label) { } func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) Postings { - // We'll copy the values into a slice and then match over that, + // We'll take the label values slice and then match over that, // this way we don't need to hold the mutex while we're matching, // which can be slow (seconds) if the match function is a huge regex. // Holding this lock prevents new series from being added (slows down the write path) // and blocks the compaction process. - vals := p.labelValues(name) - for i, count := 0, 1; i < len(vals); count++ { - if count%checkContextEveryNIterations == 0 && ctx.Err() != nil { + // + // We just need to make sure we don't modify the slice we took, + // so we'll append matching values to a different one. + p.mtx.RLock() + readOnlyLabelValues := p.lvs[name] + p.mtx.RUnlock() + + vals := make([]string, 0, len(readOnlyLabelValues)) + for i, v := range readOnlyLabelValues { + if i%checkContextEveryNIterations == 0 && ctx.Err() != nil { return ErrPostings(ctx.Err()) } - if match(vals[i]) { - i++ - continue + if match(v) { + vals = append(vals, v) } - - // Didn't match, bring the last value to this position, make the slice shorter and check again. - // The order of the slice doesn't matter as it comes from a map iteration. - vals[i], vals = vals[len(vals)-1], vals[:len(vals)-1] } // If none matched (or this label had no values), no need to grab the lock again. @@ -469,27 +529,6 @@ func (p *MemPostings) PostingsForAllLabelValues(ctx context.Context, name string return Merge(ctx, its...) } -// labelValues returns a slice of label values for the given label name. -// It will take the read lock. -func (p *MemPostings) labelValues(name string) []string { - p.mtx.RLock() - defer p.mtx.RUnlock() - - e := p.m[name] - if len(e) == 0 { - return nil - } - - vals := make([]string, 0, len(e)) - for v, srs := range e { - if len(srs) > 0 { - vals = append(vals, v) - } - } - - return vals -} - // ExpandPostings returns the postings expanded as a slice. func ExpandPostings(p Postings) (res []storage.SeriesRef, err error) { for p.Next() { From ffabc048295680c96abf13ced2bf80eccb11ab1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Fri, 29 Nov 2024 15:12:57 +0100 Subject: [PATCH 0992/1397] Make the behavior configurable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- config/config.go | 1 + config/config_test.go | 14 +++ ...allow_service_name_in_target_info.good.yml | 2 + .../prometheusremotewrite/helper.go | 17 +++- .../prometheusremotewrite/helper_test.go | 43 ++++++++- .../prometheusremotewrite/histograms.go | 1 + .../prometheusremotewrite/metrics_to_prw.go | 1 + .../metrics_to_prw_test.go | 93 +++++++++++++------ .../number_data_points.go | 2 + storage/remote/write_handler.go | 1 + 10 files changed, 143 insertions(+), 32 deletions(-) create mode 100644 config/testdata/otlp_allow_service_name_in_target_info.good.yml diff --git a/config/config.go b/config/config.go index a94342d841..dfa3c7cd61 100644 --- a/config/config.go +++ b/config/config.go @@ -1431,6 +1431,7 @@ var ( type OTLPConfig struct { PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"` + ServiceNameInTargetInfo bool `yaml:"service_name_in_target_info,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/config/config_test.go b/config/config_test.go index 77cbf9b2eb..2fed3a06ee 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1554,6 +1554,20 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) { }) } +func TestOTLPAllowServiceNameInTargetInfo(t *testing.T) { + t.Run("good config", func(t *testing.T) { + want, err := LoadFile(filepath.Join("testdata", "otlp_allow_service_name_in_target_info.good.yml"), false, promslog.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + require.NoError(t, err) + var got Config + require.NoError(t, yaml.UnmarshalStrict(out, &got)) + + require.Equal(t, true, got.OTLPConfig.ServiceNameInTargetInfo) + }) +} + func TestOTLPAllowUTF8(t *testing.T) { t.Run("good config", func(t *testing.T) { fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml") diff --git a/config/testdata/otlp_allow_service_name_in_target_info.good.yml b/config/testdata/otlp_allow_service_name_in_target_info.good.yml new file mode 100644 index 0000000000..3bd389d2eb --- /dev/null +++ b/config/testdata/otlp_allow_service_name_in_target_info.good.yml @@ -0,0 +1,2 @@ +otlp: + service_name_in_target_info: true diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 89e00f3759..e15ca4b56b 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -22,6 +22,7 @@ import ( "fmt" "log" "math" + "slices" "sort" "strconv" "unicode/utf8" @@ -116,7 +117,7 @@ var seps = []byte{'\xff'} // if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized. // If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels. func createAttributes(resource pcommon.Resource, attributes pcommon.Map, settings Settings, - logOnOverwrite bool, extras ...string) []prompb.Label { + ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label { resourceAttrs := resource.Attributes() serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName) instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID) @@ -146,7 +147,9 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting // XXX: Should we always drop service namespace/service name/service instance ID from the labels // (as they get mapped to other Prometheus labels)? attributes.Range(func(key string, value pcommon.Value) bool { - labels = append(labels, prompb.Label{Name: key, Value: value.AsString()}) + if !slices.Contains(ignoreAttrs, key) { + labels = append(labels, prompb.Label{Name: key, Value: value.AsString()}) + } return true }) sort.Stable(ByLabelName(labels)) @@ -248,7 +251,7 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) - baseLabels := createAttributes(resource, pt.Attributes(), settings, false) + baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false) // If the sum is unset, it indicates the _sum metric point should be // omitted @@ -448,7 +451,7 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) - baseLabels := createAttributes(resource, pt.Attributes(), settings, false) + baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false) // treat sum as a sample in an individual TimeSeries sum := &prompb.Sample{ @@ -597,7 +600,11 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta } settings.PromoteResourceAttributes = nil - labels := createAttributes(resource, attributes, settings, false, model.MetricNameLabel, name) + if settings.ServiceNameInTargetInfo { + // Do not pass identifying attributes as ignoreAttrs below. + identifyingAttrs = nil + } + labels := createAttributes(resource, attributes, settings, identifyingAttrs, false, model.MetricNameLabel, name) haveIdentifier := false for _, l := range labels { if l.Name == model.JobLabel || l.Name == model.InstanceLabel { diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go index b3b93dcc06..b4bc704d4e 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go @@ -49,15 +49,44 @@ func TestCreateAttributes(t *testing.T) { } attrs := pcommon.NewMap() attrs.PutStr("metric-attr", "metric value") + attrs.PutStr("metric-attr-other", "metric value other") testCases := []struct { name string promoteResourceAttributes []string + ignoreAttrs []string expectedLabels []prompb.Label }{ { name: "Successful conversion without resource attribute promotion", promoteResourceAttributes: nil, + expectedLabels: []prompb.Label{ + { + Name: "__name__", + Value: "test_metric", + }, + { + Name: "instance", + Value: "service ID", + }, + { + Name: "job", + Value: "service name", + }, + { + Name: "metric_attr", + Value: "metric value", + }, + { + Name: "metric_attr_other", + Value: "metric value other", + }, + }, + }, + { + name: "Successful conversion with some attributes ignored", + promoteResourceAttributes: nil, + ignoreAttrs: []string{"metric-attr-other"}, expectedLabels: []prompb.Label{ { Name: "__name__", @@ -97,6 +126,10 @@ func TestCreateAttributes(t *testing.T) { Name: "metric_attr", Value: "metric value", }, + { + Name: "metric_attr_other", + Value: "metric value other", + }, { Name: "existent_attr", Value: "resource value", @@ -127,6 +160,10 @@ func TestCreateAttributes(t *testing.T) { Name: "metric_attr", Value: "metric value", }, + { + Name: "metric_attr_other", + Value: "metric value other", + }, }, }, { @@ -153,6 +190,10 @@ func TestCreateAttributes(t *testing.T) { Name: "metric_attr", Value: "metric value", }, + { + Name: "metric_attr_other", + Value: "metric value other", + }, }, }, } @@ -161,7 +202,7 @@ func TestCreateAttributes(t *testing.T) { settings := Settings{ PromoteResourceAttributes: tc.promoteResourceAttributes, } - lbls := createAttributes(resource, attrs, settings, false, model.MetricNameLabel, "test_metric") + lbls := createAttributes(resource, attrs, settings, tc.ignoreAttrs, false, model.MetricNameLabel, "test_metric") assert.ElementsMatch(t, lbls, tc.expectedLabels) }) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index dfe0c9f90e..8349d4f907 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -54,6 +54,7 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont resource, pt.Attributes(), settings, + nil, true, model.MetricNameLabel, promName, diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 63862c4a70..1979b9185a 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -39,6 +39,7 @@ type Settings struct { AddMetricSuffixes bool AllowUTF8 bool PromoteResourceAttributes []string + ServiceNameInTargetInfo bool } // PrometheusConverter converts from OTel write format to Prometheus remote write format. diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go index 64d0ebd6f5..2bb29ecd7a 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go @@ -28,41 +28,73 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" ) func TestFromMetrics(t *testing.T) { - t.Run("successful", func(t *testing.T) { - converter := NewPrometheusConverter() - payload := createExportRequest(5, 128, 128, 2, 0) - var expMetadata []prompb.MetricMetadata - resourceMetricsSlice := payload.Metrics().ResourceMetrics() - for i := 0; i < resourceMetricsSlice.Len(); i++ { - scopeMetricsSlice := resourceMetricsSlice.At(i).ScopeMetrics() - for j := 0; j < scopeMetricsSlice.Len(); j++ { - metricSlice := scopeMetricsSlice.At(j).Metrics() - for k := 0; k < metricSlice.Len(); k++ { - metric := metricSlice.At(k) - promName := prometheustranslator.BuildCompliantName(metric, "", false, false) - expMetadata = append(expMetadata, prompb.MetricMetadata{ - Type: otelMetricTypeToPromMetricType(metric), - MetricFamilyName: promName, - Help: metric.Description(), - Unit: metric.Unit(), - }) + + for _, serviceNameInTargetInfo := range []bool{false, true} { + t.Run(fmt.Sprintf("successful/keepIdentifyingAttributes=%v", serviceNameInTargetInfo), func(t *testing.T) { + converter := NewPrometheusConverter() + payload := createExportRequest(5, 128, 128, 2, 0) + var expMetadata []prompb.MetricMetadata + resourceMetricsSlice := payload.Metrics().ResourceMetrics() + for i := 0; i < resourceMetricsSlice.Len(); i++ { + scopeMetricsSlice := resourceMetricsSlice.At(i).ScopeMetrics() + for j := 0; j < scopeMetricsSlice.Len(); j++ { + metricSlice := scopeMetricsSlice.At(j).Metrics() + for k := 0; k < metricSlice.Len(); k++ { + metric := metricSlice.At(k) + promName := prometheustranslator.BuildCompliantName(metric, "", false, false) + expMetadata = append(expMetadata, prompb.MetricMetadata{ + Type: otelMetricTypeToPromMetricType(metric), + MetricFamilyName: promName, + Help: metric.Description(), + Unit: metric.Unit(), + }) + } } } - } - annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), Settings{}) - require.NoError(t, err) - require.Empty(t, annots) + annots, err := converter.FromMetrics( + context.Background(), + payload.Metrics(), + Settings{ServiceNameInTargetInfo: serviceNameInTargetInfo}, + ) + require.NoError(t, err) + require.Empty(t, annots) - if diff := cmp.Diff(expMetadata, converter.Metadata()); diff != "" { - t.Errorf("mismatch (-want +got):\n%s", diff) - } - }) + if diff := cmp.Diff(expMetadata, converter.Metadata()); diff != "" { + t.Errorf("mismatch (-want +got):\n%s", diff) + } + + ts := converter.TimeSeries() + require.Len(t, ts, 1408+1) // +1 for the target_info. + + target_info_count := 0 + for _, s := range ts { + b := labels.NewScratchBuilder(2) + lbls := s.ToLabels(&b, nil) + if lbls.Get(labels.MetricName) == "target_info" { + target_info_count++ + require.Equal(t, "test-namespace/test-service", lbls.Get("job")) + require.Equal(t, "id1234", lbls.Get("instance")) + if serviceNameInTargetInfo { + require.Equal(t, "test-service", lbls.Get("service_name")) + require.Equal(t, "test-namespace", lbls.Get("service_namespace")) + require.Equal(t, "id1234", lbls.Get("service_instance_id")) + } else { + require.False(t, lbls.Has("service_name")) + require.False(t, lbls.Has("service_namespace")) + require.False(t, lbls.Has("service_instance_id")) + } + } + } + require.Equal(t, 1, target_info_count) + }) + } t.Run("context cancellation", func(t *testing.T) { converter := NewPrometheusConverter() @@ -169,6 +201,15 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou rm := request.Metrics().ResourceMetrics().AppendEmpty() generateAttributes(rm.Resource().Attributes(), "resource", resourceAttributeCount) + // Fake some resource attributes. + for k, v := range map[string]string{ + "service.name": "test-service", + "service.namespace": "test-namespace", + "service.instance.id": "id1234", + } { + rm.Resource().Attributes().PutStr(k, v) + } + metrics := rm.ScopeMetrics().AppendEmpty().Metrics() ts := pcommon.NewTimestampFromTime(time.Now()) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index 035c0676c4..6cdab450e1 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -40,6 +40,7 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data resource, pt.Attributes(), settings, + nil, true, model.MetricNameLabel, name, @@ -75,6 +76,7 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo resource, pt.Attributes(), settings, + nil, true, model.MetricNameLabel, name, diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index f719ea6a1f..67c7f7627b 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -515,6 +515,7 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { AddMetricSuffixes: true, AllowUTF8: otlpCfg.TranslationStrategy == config.NoUTF8EscapingWithSuffixes, PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, + ServiceNameInTargetInfo: otlpCfg.ServiceNameInTargetInfo, }) if err != nil { h.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) From 174f828d7008c3189e5740e60461469762bc1bd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Fri, 29 Nov 2024 15:19:14 +0100 Subject: [PATCH 0993/1397] Rename to keep_identifying_resource_attributes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- config/config.go | 6 +++--- config/config_test.go | 4 ++-- ...keep_identifying_resource_attributes.good.yml | 2 ++ ...lp_allow_service_name_in_target_info.good.yml | 2 -- .../prometheusremotewrite/helper.go | 2 +- .../prometheusremotewrite/metrics_to_prw.go | 16 ++++++++-------- .../prometheusremotewrite/metrics_to_prw_test.go | 8 ++++---- storage/remote/write_handler.go | 8 ++++---- 8 files changed, 24 insertions(+), 24 deletions(-) create mode 100644 config/testdata/otlp_allow_keep_identifying_resource_attributes.good.yml delete mode 100644 config/testdata/otlp_allow_service_name_in_target_info.good.yml diff --git a/config/config.go b/config/config.go index dfa3c7cd61..86d8563536 100644 --- a/config/config.go +++ b/config/config.go @@ -1429,9 +1429,9 @@ var ( // OTLPConfig is the configuration for writing to the OTLP endpoint. type OTLPConfig struct { - PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` - TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"` - ServiceNameInTargetInfo bool `yaml:"service_name_in_target_info,omitempty"` + PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` + TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"` + KeepIdentifyingResourceAttributes bool `yaml:"keep_identifying_resource_attributes,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/config/config_test.go b/config/config_test.go index 2fed3a06ee..01eeb6235e 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1556,7 +1556,7 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) { func TestOTLPAllowServiceNameInTargetInfo(t *testing.T) { t.Run("good config", func(t *testing.T) { - want, err := LoadFile(filepath.Join("testdata", "otlp_allow_service_name_in_target_info.good.yml"), false, promslog.NewNopLogger()) + want, err := LoadFile(filepath.Join("testdata", "otlp_allow_keep_identifying_resource_attributes.good.yml"), false, promslog.NewNopLogger()) require.NoError(t, err) out, err := yaml.Marshal(want) @@ -1564,7 +1564,7 @@ func TestOTLPAllowServiceNameInTargetInfo(t *testing.T) { var got Config require.NoError(t, yaml.UnmarshalStrict(out, &got)) - require.Equal(t, true, got.OTLPConfig.ServiceNameInTargetInfo) + require.Equal(t, true, got.OTLPConfig.KeepIdentifyingResourceAttributes) }) } diff --git a/config/testdata/otlp_allow_keep_identifying_resource_attributes.good.yml b/config/testdata/otlp_allow_keep_identifying_resource_attributes.good.yml new file mode 100644 index 0000000000..63151e2a77 --- /dev/null +++ b/config/testdata/otlp_allow_keep_identifying_resource_attributes.good.yml @@ -0,0 +1,2 @@ +otlp: + keep_identifying_resource_attributes: true diff --git a/config/testdata/otlp_allow_service_name_in_target_info.good.yml b/config/testdata/otlp_allow_service_name_in_target_info.good.yml deleted file mode 100644 index 3bd389d2eb..0000000000 --- a/config/testdata/otlp_allow_service_name_in_target_info.good.yml +++ /dev/null @@ -1,2 +0,0 @@ -otlp: - service_name_in_target_info: true diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index e15ca4b56b..316dd60c63 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -600,7 +600,7 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta } settings.PromoteResourceAttributes = nil - if settings.ServiceNameInTargetInfo { + if settings.KeepIdentifyingResourceAttributes { // Do not pass identifying attributes as ignoreAttrs below. identifyingAttrs = nil } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 1979b9185a..6779c9ed80 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -32,14 +32,14 @@ import ( ) type Settings struct { - Namespace string - ExternalLabels map[string]string - DisableTargetInfo bool - ExportCreatedMetric bool - AddMetricSuffixes bool - AllowUTF8 bool - PromoteResourceAttributes []string - ServiceNameInTargetInfo bool + Namespace string + ExternalLabels map[string]string + DisableTargetInfo bool + ExportCreatedMetric bool + AddMetricSuffixes bool + AllowUTF8 bool + PromoteResourceAttributes []string + KeepIdentifyingResourceAttributes bool } // PrometheusConverter converts from OTel write format to Prometheus remote write format. diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go index 2bb29ecd7a..7c9699a7e4 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go @@ -35,8 +35,8 @@ import ( func TestFromMetrics(t *testing.T) { - for _, serviceNameInTargetInfo := range []bool{false, true} { - t.Run(fmt.Sprintf("successful/keepIdentifyingAttributes=%v", serviceNameInTargetInfo), func(t *testing.T) { + for _, keepIdentifyingResourceAttributes := range []bool{false, true} { + t.Run(fmt.Sprintf("successful/keepIdentifyingAttributes=%v", keepIdentifyingResourceAttributes), func(t *testing.T) { converter := NewPrometheusConverter() payload := createExportRequest(5, 128, 128, 2, 0) var expMetadata []prompb.MetricMetadata @@ -61,7 +61,7 @@ func TestFromMetrics(t *testing.T) { annots, err := converter.FromMetrics( context.Background(), payload.Metrics(), - Settings{ServiceNameInTargetInfo: serviceNameInTargetInfo}, + Settings{KeepIdentifyingResourceAttributes: keepIdentifyingResourceAttributes}, ) require.NoError(t, err) require.Empty(t, annots) @@ -81,7 +81,7 @@ func TestFromMetrics(t *testing.T) { target_info_count++ require.Equal(t, "test-namespace/test-service", lbls.Get("job")) require.Equal(t, "id1234", lbls.Get("instance")) - if serviceNameInTargetInfo { + if keepIdentifyingResourceAttributes { require.Equal(t, "test-service", lbls.Get("service_name")) require.Equal(t, "test-namespace", lbls.Get("service_namespace")) require.Equal(t, "id1234", lbls.Get("service_instance_id")) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 67c7f7627b..afb50ef265 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -512,10 +512,10 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { converter := otlptranslator.NewPrometheusConverter() annots, err := converter.FromMetrics(r.Context(), req.Metrics(), otlptranslator.Settings{ - AddMetricSuffixes: true, - AllowUTF8: otlpCfg.TranslationStrategy == config.NoUTF8EscapingWithSuffixes, - PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, - ServiceNameInTargetInfo: otlpCfg.ServiceNameInTargetInfo, + AddMetricSuffixes: true, + AllowUTF8: otlpCfg.TranslationStrategy == config.NoUTF8EscapingWithSuffixes, + PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, + KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes, }) if err != nil { h.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) From d33d6f7db20b03c94b2c303bfb0ad269e5f6d146 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Fri, 29 Nov 2024 15:19:45 +0100 Subject: [PATCH 0994/1397] Lint fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- config/config_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/config_test.go b/config/config_test.go index 01eeb6235e..4b5b11a9fd 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1564,7 +1564,7 @@ func TestOTLPAllowServiceNameInTargetInfo(t *testing.T) { var got Config require.NoError(t, yaml.UnmarshalStrict(out, &got)) - require.Equal(t, true, got.OTLPConfig.KeepIdentifyingResourceAttributes) + require.True(t, got.OTLPConfig.KeepIdentifyingResourceAttributes) }) } From 8ce9e20001dcc154323a401e2147a3736c1f3626 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Fri, 29 Nov 2024 15:23:35 +0100 Subject: [PATCH 0995/1397] Add documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- docs/configuration/configuration.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 3ec15f9b10..015c3d777a 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -182,6 +182,10 @@ otlp: # It preserves all special characters like dots, but it still add required suffixes # for units and _total like in UnderscoreEscapingWithSuffixes. [ translation_strategy: | default = "UnderscoreEscapingWithSuffixes" ] + # Enables adding "service.name", "service.namespace" and "service.instance.id" + # resource attributes to the "target_info" metric, on top of conveting + # them into the "instance" and "job" labels. + [ keep_identifying_resource_attributes: | default = false] # Settings related to the remote read feature. remote_read: From f5c7ef3d9d0df710f053dd1dcf54f39364e70a2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Mon, 25 Nov 2024 09:27:50 +0100 Subject: [PATCH 0996/1397] Fix typo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- docs/configuration/configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 015c3d777a..32da2c61c6 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -183,7 +183,7 @@ otlp: # for units and _total like in UnderscoreEscapingWithSuffixes. [ translation_strategy: | default = "UnderscoreEscapingWithSuffixes" ] # Enables adding "service.name", "service.namespace" and "service.instance.id" - # resource attributes to the "target_info" metric, on top of conveting + # resource attributes to the "target_info" metric, on top of converting # them into the "instance" and "job" labels. [ keep_identifying_resource_attributes: | default = false] From 1b2cf7edcfd319eb741793ebbc777857cb69dca9 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Sat, 30 Nov 2024 21:57:13 +0100 Subject: [PATCH 0997/1397] Update storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go Co-authored-by: Arve Knudsen Signed-off-by: George Krajcsovits --- .../otlptranslator/prometheusremotewrite/metrics_to_prw_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go index 7c9699a7e4..05abc7743f 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go @@ -34,7 +34,6 @@ import ( ) func TestFromMetrics(t *testing.T) { - for _, keepIdentifyingResourceAttributes := range []bool{false, true} { t.Run(fmt.Sprintf("successful/keepIdentifyingAttributes=%v", keepIdentifyingResourceAttributes), func(t *testing.T) { converter := NewPrometheusConverter() From e10bbf0a84d59a9f20144ed578c9afa7079dbacd Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Sat, 30 Nov 2024 22:26:09 -0800 Subject: [PATCH 0998/1397] hot reload always_scrape_classic_histograms and convert_classic_histograms_to_nhcb configs properly Signed-off-by: Ben Ye --- scrape/scrape.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scrape/scrape.go b/scrape/scrape.go index eeab208d65..86405e0dc9 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -330,6 +330,8 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { trackTimestampsStaleness = sp.config.TrackTimestampsStaleness mrc = sp.config.MetricRelabelConfigs fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() + alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms + convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCB ) validationScheme := model.UTF8Validation @@ -377,6 +379,8 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { timeout: timeout, validationScheme: validationScheme, fallbackScrapeProtocol: fallbackScrapeProtocol, + alwaysScrapeClassicHist: alwaysScrapeClassicHist, + convertClassicHistToNHCB: convertClassicHistToNHCB, }) ) if err != nil { From 90e832c861df33e17b7240ddf1d57441860953dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Mon, 2 Dec 2024 16:08:37 +0200 Subject: [PATCH 0999/1397] scrape: fix nil panic after scrape loop reload MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't forget to set `metrics` field as otherwise scraping will lead to a nil panic in case the body size limit is reached. Signed-off-by: Giedrius Statkevičius --- scrape/scrape.go | 1 + scrape/scrape_test.go | 55 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/scrape/scrape.go b/scrape/scrape.go index eeab208d65..47ed73c051 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -359,6 +359,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme), acceptEncodingHeader: acceptEncodingHeader(enableCompression), + metrics: sp.metrics, } newLoop = sp.newLoop(scrapeLoopOptions{ target: t, diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index ce1686feca..e8494e2db1 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -4794,3 +4794,58 @@ func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice cha } })), scrapedTwice } + +type srvSameResp struct { + resp []byte +} + +func (s *srvSameResp) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Write(s.resp) +} + +func TestTargetScraperSegfault(t *testing.T) { + emptySrv := &srvSameResp{ + resp: []byte{0x42, 0x42}, + } + h := httptest.NewServer(emptySrv) + t.Cleanup(h.Close) + + cfg := &config.ScrapeConfig{ + BodySizeLimit: 1, + JobName: "test", + Scheme: "http", + ScrapeInterval: model.Duration(100 * time.Millisecond), + ScrapeTimeout: model.Duration(100 * time.Millisecond), + EnableCompression: false, + ServiceDiscoveryConfigs: discovery.Configs{ + &discovery.StaticConfig{ + { + Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(h.URL)}}, + }, + }, + }, + } + + p, err := newScrapePool( + cfg, + &nopAppendable{}, + 0, + nil, + pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }), + &Options{}, + newTestScrapeMetrics(t), + ) + require.NoError(t, err) + t.Cleanup(p.stop) + + p.Sync([]*targetgroup.Group{ + { + Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(strings.TrimPrefix(h.URL, "http://"))}}, + Source: "test", + }, + }) + + require.NoError(t, p.reload(cfg)) + + <-time.After(1 * time.Second) +} From 4d54c304f8fa7359a2f62250f15a7b3b25e8dd2b Mon Sep 17 00:00:00 2001 From: TJ Hoplock Date: Mon, 2 Dec 2024 00:57:33 -0500 Subject: [PATCH 1000/1397] ref: make query logger more efficient by building list of attrs Addresses PR feedback to make query logger more efficient. Signed-off-by: TJ Hoplock --- promql/engine.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 2ef0fabe7c..00a142b0c0 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -629,7 +629,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota ng.queryLoggerLock.RLock() if l := ng.queryLogger; l != nil { logger := slog.New(l) - f := make([]any, 0, 16) // Probably enough up front to not need to reallocate on append. + f := make([]slog.Attr, 0, 16) // Probably enough up front to not need to reallocate on append. params := make(map[string]interface{}, 4) params["query"] = q.q @@ -639,20 +639,20 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota // The step provided by the user is in seconds. params["step"] = int64(eq.Interval / (time.Second / time.Nanosecond)) } - f = append(f, "params", params) + f = append(f, slog.Any("params", params)) if err != nil { - f = append(f, "error", err) + f = append(f, slog.Any("error", err)) } - f = append(f, "stats", stats.NewQueryStats(q.Stats())) + f = append(f, slog.Any("stats", stats.NewQueryStats(q.Stats()))) if span := trace.SpanFromContext(ctx); span != nil { - f = append(f, "spanID", span.SpanContext().SpanID()) + f = append(f, slog.Any("spanID", span.SpanContext().SpanID())) } if origin := ctx.Value(QueryOrigin{}); origin != nil { for k, v := range origin.(map[string]interface{}) { - f = append(f, k, v) + f = append(f, slog.Any(k, v)) } } - logger.Info("promql query logged", f...) + logger.LogAttrs(context.Background(), slog.LevelInfo, "promql query logged", f...) // TODO: @tjhop -- do we still need this metric/error log if logger doesn't return errors? // ng.metrics.queryLogFailures.Inc() // ng.logger.Error("can't log query", "err", err) From 53eb1fe71f4c22c6687200c2faa66bfc6c37e0ac Mon Sep 17 00:00:00 2001 From: TJ Hoplock Date: Mon, 2 Dec 2024 09:21:56 -0500 Subject: [PATCH 1001/1397] test: make promql engine_test use files to exercise query logger Signed-off-by: TJ Hoplock --- promql/engine_test.go | 139 ++++++++++++++++++------------------------ 1 file changed, 59 insertions(+), 80 deletions(-) diff --git a/promql/engine_test.go b/promql/engine_test.go index b8e7cd71c3..f0a76ecc4d 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -17,9 +17,9 @@ import ( "context" "errors" "fmt" - "io" - "log/slog" "math" + "os" + "path/filepath" "sort" "strings" "sync" @@ -39,6 +39,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/testutil" ) @@ -2148,65 +2149,17 @@ func TestSubquerySelector(t *testing.T) { } } -var _ io.Closer = (*FakeQueryLogger)(nil) +func getLogLines(t *testing.T, name string) []string { + content, err := os.ReadFile(name) + require.NoError(t, err) -var _ slog.Handler = (*FakeQueryLogger)(nil) - -type FakeQueryLogger struct { - closed bool - logs []any - attrs []any -} - -func NewFakeQueryLogger() *FakeQueryLogger { - return &FakeQueryLogger{ - closed: false, - logs: make([]interface{}, 0), - attrs: make([]any, 0), + lines := strings.Split(string(content), "\n") + for i := len(lines) - 1; i >= 0; i-- { + if lines[i] == "" { + lines = append(lines[:i], lines[i+1:]...) + } } -} - -func (f *FakeQueryLogger) Close() error { - f.closed = true - return nil -} - -func (f *FakeQueryLogger) Enabled(ctx context.Context, level slog.Level) bool { - return true -} - -func (f *FakeQueryLogger) Handle(ctx context.Context, r slog.Record) error { - // Test usage only really cares about existence of keyvals passed in - // via args, just append in the log message before handling the - // provided args and any embedded kvs added via `.With()` on f.attrs. - log := append([]any{r.Message}, f.attrs...) - f.attrs = f.attrs[:0] - - r.Attrs(func(a slog.Attr) bool { - log = append(log, a.Key, a.Value.Any()) - return true - }) - - f.logs = append(f.logs, log...) - return nil -} - -func (f *FakeQueryLogger) WithAttrs(attrs []slog.Attr) slog.Handler { - attrPairs := []any{} - for _, a := range attrs { - attrPairs = append(attrPairs, a.Key, a.Value.Any()) - } - - return &FakeQueryLogger{ - closed: f.closed, - logs: f.logs, - attrs: append(f.attrs, attrPairs...), - } -} - -func (f *FakeQueryLogger) WithGroup(name string) slog.Handler { - // We don't need to support groups for this fake handler in testing. - return f + return lines } func TestQueryLogger_basic(t *testing.T) { @@ -2231,32 +2184,45 @@ func TestQueryLogger_basic(t *testing.T) { // promql.Query works without query log initialized. queryExec() - f1 := NewFakeQueryLogger() + tmpDir := t.TempDir() + ql1File := filepath.Join(tmpDir, "query1.log") + f1, err := logging.NewJSONFileLogger(ql1File) + require.NoError(t, err) + engine.SetQueryLogger(f1) queryExec() - require.Contains(t, f1.logs, `params`) - require.Contains(t, f1.logs, map[string]interface{}{"query": "test statement"}) + logLines := getLogLines(t, ql1File) + require.Contains(t, logLines[0], "params", map[string]interface{}{"query": "test statement"}) + require.Len(t, logLines, 1) - l := len(f1.logs) + l := len(logLines) queryExec() - require.Len(t, f1.logs, 2*l) + logLines = getLogLines(t, ql1File) + l2 := len(logLines) + require.Equal(t, l2, 2*l) - // Test that we close the query logger when unsetting it. - require.False(t, f1.closed, "expected f1 to be open, got closed") + // Test that we close the query logger when unsetting it. The following + // attempt to close the file should error. engine.SetQueryLogger(nil) - require.True(t, f1.closed, "expected f1 to be closed, got open") + err = f1.Close() + require.ErrorContains(t, err, "file already closed", "expected f1 to be closed, got open") queryExec() // Test that we close the query logger when swapping. - f2 := NewFakeQueryLogger() - f3 := NewFakeQueryLogger() + ql2File := filepath.Join(tmpDir, "query2.log") + f2, err := logging.NewJSONFileLogger(ql2File) + require.NoError(t, err) + ql3File := filepath.Join(tmpDir, "query3.log") + f3, err := logging.NewJSONFileLogger(ql3File) + require.NoError(t, err) engine.SetQueryLogger(f2) - require.False(t, f2.closed, "expected f2 to be open, got closed") queryExec() engine.SetQueryLogger(f3) - require.True(t, f2.closed, "expected f2 to be closed, got open") - require.False(t, f3.closed, "expected f3 to be open, got closed") + err = f2.Close() + require.ErrorContains(t, err, "file already closed", "expected f2 to be closed, got open") queryExec() + err = f3.Close() + require.NoError(t, err) } func TestQueryLogger_fields(t *testing.T) { @@ -2268,7 +2234,14 @@ func TestQueryLogger_fields(t *testing.T) { } engine := promqltest.NewTestEngineWithOpts(t, opts) - f1 := NewFakeQueryLogger() + tmpDir := t.TempDir() + ql1File := filepath.Join(tmpDir, "query1.log") + f1, err := logging.NewJSONFileLogger(ql1File) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, f1.Close()) + }) + engine.SetQueryLogger(f1) ctx, cancelCtx := context.WithCancel(context.Background()) @@ -2281,8 +2254,8 @@ func TestQueryLogger_fields(t *testing.T) { res := query.Exec(ctx) require.NoError(t, res.Err) - require.Contains(t, f1.logs, `foo`) - require.Contains(t, f1.logs, `bar`) + logLines := getLogLines(t, ql1File) + require.Contains(t, logLines[0], "foo", "bar") } func TestQueryLogger_error(t *testing.T) { @@ -2294,7 +2267,14 @@ func TestQueryLogger_error(t *testing.T) { } engine := promqltest.NewTestEngineWithOpts(t, opts) - f1 := NewFakeQueryLogger() + tmpDir := t.TempDir() + ql1File := filepath.Join(tmpDir, "query1.log") + f1, err := logging.NewJSONFileLogger(ql1File) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, f1.Close()) + }) + engine.SetQueryLogger(f1) ctx, cancelCtx := context.WithCancel(context.Background()) @@ -2308,10 +2288,9 @@ func TestQueryLogger_error(t *testing.T) { res := query.Exec(ctx) require.Error(t, res.Err, "query should have failed") - require.Contains(t, f1.logs, `params`) - require.Contains(t, f1.logs, map[string]interface{}{"query": "test statement"}) - require.Contains(t, f1.logs, `error`) - require.Contains(t, f1.logs, testErr) + logLines := getLogLines(t, ql1File) + require.Contains(t, logLines[0], "error", testErr) + require.Contains(t, logLines[0], "params", map[string]interface{}{"query": "test statement"}) } func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { From 56f45a2e25bcc3f6421b4393830f25e2ffb6a0ce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:34:00 +0100 Subject: [PATCH 1002/1397] chore(deps): bump github.com/linode/linodego from 1.42.0 to 1.43.0 (#15496) Bumps [github.com/linode/linodego](https://github.com/linode/linodego) from 1.42.0 to 1.43.0. - [Release notes](https://github.com/linode/linodego/releases) - [Commits](https://github.com/linode/linodego/compare/v1.42.0...v1.43.0) --- updated-dependencies: - dependency-name: github.com/linode/linodego dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 8 ++++---- go.sum | 48 ++++++++---------------------------------------- 2 files changed, 12 insertions(+), 44 deletions(-) diff --git a/go.mod b/go.mod index 5f45dd2327..19525c4ff9 100644 --- a/go.mod +++ b/go.mod @@ -41,7 +41,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.42.0 + github.com/linode/linodego v1.43.0 github.com/miekg/dns v1.1.62 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f @@ -75,9 +75,9 @@ require ( go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 golang.org/x/oauth2 v0.23.0 - golang.org/x/sync v0.8.0 + golang.org/x/sync v0.9.0 golang.org/x/sys v0.26.0 - golang.org/x/text v0.19.0 + golang.org/x/text v0.20.0 golang.org/x/tools v0.26.0 google.golang.org/api v0.204.0 google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 @@ -127,7 +127,7 @@ require ( github.com/go-openapi/spec v0.20.14 // indirect github.com/go-openapi/swag v0.22.9 // indirect github.com/go-openapi/validate v0.23.0 // indirect - github.com/go-resty/resty/v2 v2.13.1 // indirect + github.com/go-resty/resty/v2 v2.15.3 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/glog v1.2.2 // indirect diff --git a/go.sum b/go.sum index 7b4c18f782..973f974905 100644 --- a/go.sum +++ b/go.sum @@ -160,8 +160,8 @@ github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZC github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= -github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= -github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= +github.com/go-resty/resty/v2 v2.15.3 h1:bqff+hcqAflpiF591hhJzNdkRsFhlB96CYfBwSFvql8= +github.com/go-resty/resty/v2 v2.15.3/go.mod h1:0fHAoK7JoBy/Ch36N8VFeMsK7xQOHhvWaC3iOktwmIU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -329,8 +329,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linode/linodego v1.42.0 h1:ZSbi4MtvwrfB9Y6bknesorvvueBGGilcmh2D5dq76RM= -github.com/linode/linodego v1.42.0/go.mod h1:2yzmY6pegPBDgx2HDllmt0eIk2IlzqcgK6NR0wFCFRY= +github.com/linode/linodego v1.43.0 h1:sGeBB3caZt7vKBoPS5p4AVzmlG4JoqQOdigIibx3egk= +github.com/linode/linodego v1.43.0/go.mod h1:n4TMFu1UVNala+icHqrTEFFaicYSF74cSAUG5zkTwfA= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -498,7 +498,6 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8 github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= @@ -540,10 +539,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -554,8 +550,6 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -575,11 +569,6 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -593,10 +582,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -622,39 +609,22 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -667,8 +637,6 @@ golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 1b184ac1428d622fb093b4b1b74a654053a0f223 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:34:20 +0100 Subject: [PATCH 1003/1397] chore(deps): bump github.com/hetznercloud/hcloud-go/v2 (#15498) Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.15.0 to 2.17.0. - [Release notes](https://github.com/hetznercloud/hcloud-go/releases) - [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.15.0...v2.17.0) --- updated-dependencies: - dependency-name: github.com/hetznercloud/hcloud-go/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 19525c4ff9..ff4674e904 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.30.0 github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 - github.com/hetznercloud/hcloud-go/v2 v2.15.0 + github.com/hetznercloud/hcloud-go/v2 v2.17.0 github.com/ionos-cloud/sdk-go/v6 v6.2.1 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 diff --git a/go.sum b/go.sum index 973f974905..1bf763516b 100644 --- a/go.sum +++ b/go.sum @@ -287,8 +287,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1av github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.15.0 h1:6mpMJ/RuX1woZj+MCJdyKNEX9129KDkEIDeeyfr4GD4= -github.com/hetznercloud/hcloud-go/v2 v2.15.0/go.mod h1:h8sHav+27Xa+48cVMAvAUMELov5h298Ilg2vflyTHgg= +github.com/hetznercloud/hcloud-go/v2 v2.17.0 h1:ge0w2piey9SV6XGyU/wQ6HBR24QyMbJ3wLzezplqR68= +github.com/hetznercloud/hcloud-go/v2 v2.17.0/go.mod h1:zfyZ4Orx+mPpYDzWAxXR7DHGL50nnlZ5Edzgs1o6f/s= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY= From 071955df6d679a351d409dff64c12c375714e9a3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:35:32 +0100 Subject: [PATCH 1004/1397] chore(deps): bump actions/setup-go from 5.0.2 to 5.1.0 (#15511) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.1.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32...41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1019ea4885..329666e46d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -80,7 +80,7 @@ jobs: runs-on: windows-latest steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: go-version: 1.23.x - run: | @@ -171,7 +171,7 @@ jobs: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: cache: false go-version: 1.23.x @@ -184,7 +184,7 @@ jobs: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: go-version: 1.23.x - name: Install snmp_exporter/generator dependencies From da1de613fda21b1a860bcb5a09ad5a181a510a11 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:36:57 +0100 Subject: [PATCH 1005/1397] chore(deps): bump actions/cache from 4.0.2 to 4.1.2 (#15509) Bumps [actions/cache](https://github.com/actions/cache) from 4.0.2 to 4.1.2. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/0c45773b623bea8c8e75f6c82b208c3cf94ea4f9...6849a6489940f00c2f30c0fb92c6274307ccb58a) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 329666e46d..f7703d940d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -247,7 +247,7 @@ jobs: with: node-version-file: "web/ui/.nvmrc" registry-url: "https://registry.npmjs.org" - - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 with: path: ~/.npm key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} From 83b4723ac23362217731ff57bb2788b85ec83910 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:37:50 +0100 Subject: [PATCH 1006/1397] chore(deps): bump bufbuild/buf-setup-action from 1.43.0 to 1.47.2 (#15508) Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.43.0 to 1.47.2. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/62ee92603c244ad0da98bab36a834a999a5329e6...9672cee01808979ea1249f81d6d321217b9a10f6) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 463a725b7a..d03f190769 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 + - uses: bufbuild/buf-setup-action@9672cee01808979ea1249f81d6d321217b9a10f6 # v1.47.2 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index ce2014ecff..bf8ae3f6a4 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -13,7 +13,7 @@ jobs: if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0 + - uses: bufbuild/buf-setup-action@9672cee01808979ea1249f81d6d321217b9a10f6 # v1.47.2 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 From 405f65a48a7a3d24b3bd1e4dc779d81bd915a9c3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:39:38 +0100 Subject: [PATCH 1007/1397] chore(deps): bump github.com/digitalocean/godo from 1.128.0 to 1.131.0 (#15502) Bumps [github.com/digitalocean/godo](https://github.com/digitalocean/godo) from 1.128.0 to 1.131.0. - [Release notes](https://github.com/digitalocean/godo/releases) - [Changelog](https://github.com/digitalocean/godo/blob/main/CHANGELOG.md) - [Commits](https://github.com/digitalocean/godo/compare/v1.128.0...v1.131.0) --- updated-dependencies: - dependency-name: github.com/digitalocean/godo dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ff4674e904..5dfd1e0af9 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.128.0 + github.com/digitalocean/godo v1.131.0 github.com/docker/docker v27.3.1+incompatible github.com/edsrzf/mmap-go v1.2.0 github.com/envoyproxy/go-control-plane v0.13.1 diff --git a/go.sum b/go.sum index 1bf763516b..a94c163efa 100644 --- a/go.sum +++ b/go.sum @@ -91,8 +91,8 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/digitalocean/godo v1.128.0 h1:cGn/ibMSRZ9+8etbzMv2MnnCEPTTGlEnx3HHTPwdk1U= -github.com/digitalocean/godo v1.128.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.131.0 h1:0WHymufAV5avpodT0h5/pucUVfO4v7biquOIqhLeROY= +github.com/digitalocean/godo v1.131.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= From 09899ad56d42cea999e6cd985b0d729d28501f38 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:41:22 +0100 Subject: [PATCH 1008/1397] chore(deps): bump google.golang.org/protobuf from 1.35.1 to 1.35.2 (#15499) Bumps google.golang.org/protobuf from 1.35.1 to 1.35.2. --- updated-dependencies: - dependency-name: google.golang.org/protobuf dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5dfd1e0af9..2b87016303 100644 --- a/go.mod +++ b/go.mod @@ -82,7 +82,7 @@ require ( google.golang.org/api v0.204.0 google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 google.golang.org/grpc v1.67.1 - google.golang.org/protobuf v1.35.1 + google.golang.org/protobuf v1.35.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.2 diff --git a/go.sum b/go.sum index a94c163efa..7fe4023e00 100644 --- a/go.sum +++ b/go.sum @@ -672,8 +672,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 7e3c43450fe1d56f7b06e832ca42718dbe5f3d72 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:42:41 +0100 Subject: [PATCH 1009/1397] chore(deps): bump github.com/stretchr/testify from 1.9.0 to 1.10.0 (#15495) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.9.0 to 1.10.0. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.9.0...v1.10.0) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 2b87016303..6b15ba1d90 100644 --- a/go.mod +++ b/go.mod @@ -58,7 +58,7 @@ require ( github.com/prometheus/sigv4 v0.1.0 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/vultr/govultr/v2 v2.17.2 go.opentelemetry.io/collector/pdata v1.18.0 go.opentelemetry.io/collector/semconv v0.112.0 diff --git a/go.sum b/go.sum index 7fe4023e00..f45ab62df7 100644 --- a/go.sum +++ b/go.sum @@ -487,8 +487,9 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= From 10cd744bb36883cdc0a72680262e019c2226117c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:43:36 +0100 Subject: [PATCH 1010/1397] chore(deps): bump github.com/stretchr/testify (#15491) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.9.0 to 1.10.0. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.9.0...v1.10.0) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- documentation/examples/remote_storage/go.mod | 2 +- documentation/examples/remote_storage/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index b6a56f9ef1..94a6833717 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -10,7 +10,7 @@ require ( github.com/prometheus/client_golang v1.20.5 github.com/prometheus/common v0.60.1 github.com/prometheus/prometheus v0.53.1 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 ) require ( diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 7af1984c6f..712d981285 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -293,8 +293,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= From 0eb580525e8508f9799a4a037e50abe633c692a2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:54:44 +0100 Subject: [PATCH 1011/1397] chore(deps): bump github.com/influxdata/influxdb (#15490) Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.7 to 1.11.8. - [Release notes](https://github.com/influxdata/influxdb/releases) - [Commits](https://github.com/influxdata/influxdb/compare/v1.11.7...v1.11.8) --- updated-dependencies: - dependency-name: github.com/influxdata/influxdb dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- documentation/examples/remote_storage/go.mod | 2 +- documentation/examples/remote_storage/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 94a6833717..298236a8d5 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -6,7 +6,7 @@ require ( github.com/alecthomas/kingpin/v2 v2.4.0 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/influxdata/influxdb v1.11.7 + github.com/influxdata/influxdb v1.11.8 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/common v0.60.1 github.com/prometheus/prometheus v0.53.1 diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 712d981285..bd11b9d04e 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -166,8 +166,8 @@ github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MI github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/influxdata/influxdb v1.11.7 h1:C31A+S9YfjTCOuAv9Qs0ZdQufslOZZBtejjxiV8QNQw= -github.com/influxdata/influxdb v1.11.7/go.mod h1:zRTAuk/Ie/V1LGxJUv8jfDmfv+ypz22lxfhc1MxC3rI= +github.com/influxdata/influxdb v1.11.8 h1:lX8MJDfk91O7nqzzonQkjk87gOeQy9V/Xp3gpELhG1s= +github.com/influxdata/influxdb v1.11.8/go.mod h1:zRTAuk/Ie/V1LGxJUv8jfDmfv+ypz22lxfhc1MxC3rI= github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= From 5352e48dddffdb995572503c32f739ba9e885180 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:00:09 +0000 Subject: [PATCH 1012/1397] chore(deps): bump github/codeql-action from 3.26.10 to 3.27.5 (#15510) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.26.10 to 3.27.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/e2b3eafc8d227b0241d48be5f425d47c2d750a13...f09c1c0a94de965c15400f5634aa42fac8fb8f88) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 9002f4c8ef..c30d907326 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,12 +27,12 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Initialize CodeQL - uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 + uses: github/codeql-action/init@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 + uses: github/codeql-action/autobuild@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10 + uses: github/codeql-action/analyze@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 4586f4617e..f637e921e5 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # tag=v3.26.10 + uses: github/codeql-action/upload-sarif@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # tag=v3.27.5 with: sarif_file: results.sarif From 5e6a9f012e9b28439b61c44ad401913ec9812c2f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:55:16 +0000 Subject: [PATCH 1013/1397] chore(deps): bump golang.org/x/tools from 0.26.0 to 0.27.0 (#15494) Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.26.0 to 0.27.0. - [Release notes](https://github.com/golang/tools/releases) - [Commits](https://github.com/golang/tools/compare/v0.26.0...v0.27.0) --- updated-dependencies: - dependency-name: golang.org/x/tools dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 6b15ba1d90..fb4e386320 100644 --- a/go.mod +++ b/go.mod @@ -76,9 +76,9 @@ require ( go.uber.org/multierr v1.11.0 golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.9.0 - golang.org/x/sys v0.26.0 + golang.org/x/sys v0.27.0 golang.org/x/text v0.20.0 - golang.org/x/tools v0.26.0 + golang.org/x/tools v0.27.0 google.golang.org/api v0.204.0 google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 google.golang.org/grpc v1.67.1 @@ -187,11 +187,11 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/metric v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.28.0 // indirect + golang.org/x/crypto v0.29.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect - golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/term v0.25.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/term v0.26.0 // indirect golang.org/x/time v0.7.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect diff --git a/go.sum b/go.sum index f45ab62df7..8abe02d7a8 100644 --- a/go.sum +++ b/go.sum @@ -541,8 +541,8 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= @@ -551,8 +551,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -570,8 +570,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= @@ -615,11 +615,11 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -638,8 +638,8 @@ golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 57fab287ab75b67755cb13e0d4d65fe98ed6bd4f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 12:57:54 +0000 Subject: [PATCH 1014/1397] chore(deps): bump golang.org/x/sys from 0.26.0 to 0.27.0 (#15503) Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.26.0 to 0.27.0. - [Commits](https://github.com/golang/sys/compare/v0.26.0...v0.27.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> From 00146b69fc9486413332a230dbed407127952807 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 13:04:24 +0000 Subject: [PATCH 1015/1397] chore(deps): bump the k8s-io group with 3 updates (#15492) Bumps the k8s-io group with 3 updates: [k8s.io/api](https://github.com/kubernetes/api), [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) and [k8s.io/client-go](https://github.com/kubernetes/client-go). Updates `k8s.io/api` from 0.31.2 to 0.31.3 - [Commits](https://github.com/kubernetes/api/compare/v0.31.2...v0.31.3) Updates `k8s.io/apimachinery` from 0.31.2 to 0.31.3 - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.31.2...v0.31.3) Updates `k8s.io/client-go` from 0.31.2 to 0.31.3 - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.31.2...v0.31.3) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io - dependency-name: k8s.io/apimachinery dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-io ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index fb4e386320..ca76db8249 100644 --- a/go.mod +++ b/go.mod @@ -85,9 +85,9 @@ require ( google.golang.org/protobuf v1.35.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.31.2 - k8s.io/apimachinery v0.31.2 - k8s.io/client-go v0.31.2 + k8s.io/api v0.31.3 + k8s.io/apimachinery v0.31.3 + k8s.io/client-go v0.31.3 k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.130.1 ) diff --git a/go.sum b/go.sum index 8abe02d7a8..ccf1d9e376 100644 --- a/go.sum +++ b/go.sum @@ -702,12 +702,12 @@ gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= -k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= -k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= -k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= -k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= From 60467699411fa6e15e6f1b8e0eb91f5cba6933d2 Mon Sep 17 00:00:00 2001 From: Antoine Pultier <45740+fungiboletus@users.noreply.github.com> Date: Tue, 3 Dec 2024 14:24:50 +0100 Subject: [PATCH 1016/1397] tsdb documenation: Improve Chunk documentation Signed-off-by: Antoine Pultier <45740+fungiboletus@users.noreply.github.com> Signed-off-by: Antoine Pultier <45740+fungiboletus@users.noreply.github.com> --- tsdb/docs/format/chunks.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tsdb/docs/format/chunks.md b/tsdb/docs/format/chunks.md index 8a35721358..d4f78dfd3d 100644 --- a/tsdb/docs/format/chunks.md +++ b/tsdb/docs/format/chunks.md @@ -29,16 +29,16 @@ in-file offset (lower 4 bytes) and segment sequence number (upper 4 bytes). # Chunk ``` -┌───────────────┬───────────────────┬──────────────┬─────────────────┐ -│ len │ encoding <1 byte> │ data │ CRC32C <4 byte> │ -└───────────────┴───────────────────┴──────────────┴─────────────────┘ +┌───────────────┬───────────────────┬──────────────┬───────────────────┐ +│ len │ encoding <1 byte> │ data │ checksum <4 byte> │ +└───────────────┴───────────────────┴──────────────┴───────────────────┘ ``` Notes: -* `` has 1 to 10 bytes, from [`encoding/binary`](https://go.dev/src/encoding/binary/varint.go). +* `len`: Chunk size in bytes. Encoded using 1 to 10 bytes, using the [`` encoding](https://go.dev/src/encoding/binary/varint.go). * `encoding`: Currently either `XOR` (1), `histogram` (2), or `float histogram` (3). * `data`: See below for each encoding. -* `CRC32C`: CRC32 Castagnoli of `encoding` and `data`, serialised as an unsigned 32 bits big endian. +* `checksum`: Checksum of `encoding` and `data`. It's a [cyclic redudancy check](https://en.wikipedia.org/wiki/Cyclic_redundancy_check) with the Castagnoli polynomial, serialised as an unsigned 32 bits big endian number. Can be refered as a `CRC-32C`. ## XOR chunk data From cb315b83c68fb152602fc677c592db9dc39b8272 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 13:28:15 +0000 Subject: [PATCH 1017/1397] chore(deps): bump the go-opentelemetry-io group with 10 updates (#15493) Bumps the go-opentelemetry-io group with 10 updates: | Package | From | To | | --- | --- | --- | | [go.opentelemetry.io/collector/pdata](https://github.com/open-telemetry/opentelemetry-collector) | `1.18.0` | `1.20.0` | | [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector) | `0.112.0` | `0.114.0` | | [go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.56.0` | `0.57.0` | | [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.56.0` | `0.57.0` | | [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go) | `1.31.0` | `1.32.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace](https://github.com/open-telemetry/opentelemetry-go) | `1.31.0` | `1.32.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://github.com/open-telemetry/opentelemetry-go) | `1.31.0` | `1.32.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://github.com/open-telemetry/opentelemetry-go) | `1.31.0` | `1.32.0` | | [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) | `1.31.0` | `1.32.0` | | [go.opentelemetry.io/otel/trace](https://github.com/open-telemetry/opentelemetry-go) | `1.31.0` | `1.32.0` | Updates `go.opentelemetry.io/collector/pdata` from 1.18.0 to 1.20.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.18.0...pdata/v1.20.0) Updates `go.opentelemetry.io/collector/semconv` from 0.112.0 to 0.114.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.112.0...v0.114.0) Updates `go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace` from 0.56.0 to 0.57.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.56.0...zpages/v0.57.0) Updates `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp` from 0.56.0 to 0.57.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.56.0...zpages/v0.57.0) Updates `go.opentelemetry.io/otel` from 1.31.0 to 1.32.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...v1.32.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from 1.31.0 to 1.32.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...v1.32.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` from 1.31.0 to 1.32.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...v1.32.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` from 1.31.0 to 1.32.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...v1.32.0) Updates `go.opentelemetry.io/otel/sdk` from 1.31.0 to 1.32.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...v1.32.0) Updates `go.opentelemetry.io/otel/trace` from 1.31.0 to 1.32.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...v1.32.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/collector/pdata dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/semconv dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/sdk dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/trace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 28 ++++++++++++++-------------- go.sum | 56 ++++++++++++++++++++++++++++---------------------------- 2 files changed, 42 insertions(+), 42 deletions(-) diff --git a/go.mod b/go.mod index ca76db8249..b022fe28e1 100644 --- a/go.mod +++ b/go.mod @@ -60,16 +60,16 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.10.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v1.18.0 - go.opentelemetry.io/collector/semconv v0.112.0 - go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 - go.opentelemetry.io/otel v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 - go.opentelemetry.io/otel/sdk v1.31.0 - go.opentelemetry.io/otel/trace v1.31.0 + go.opentelemetry.io/collector/pdata v1.20.0 + go.opentelemetry.io/collector/semconv v0.114.0 + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 + go.opentelemetry.io/otel v1.32.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 + go.opentelemetry.io/otel/sdk v1.32.0 + go.opentelemetry.io/otel/trace v1.32.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 @@ -80,7 +80,7 @@ require ( golang.org/x/text v0.20.0 golang.org/x/tools v0.27.0 google.golang.org/api v0.204.0 - google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.35.2 gopkg.in/yaml.v2 v2.4.0 @@ -140,7 +140,7 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -185,7 +185,7 @@ require ( github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/crypto v0.29.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect @@ -193,7 +193,7 @@ require ( golang.org/x/net v0.31.0 // indirect golang.org/x/term v0.26.0 // indirect golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index ccf1d9e376..c1247bafe9 100644 --- a/go.sum +++ b/go.sum @@ -235,8 +235,8 @@ github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrR github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ= github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= @@ -503,28 +503,28 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.18.0 h1:/yg2rO2dxqDM2p6GutsMCxXN6sKlXwyIz/ZYyUPONBg= -go.opentelemetry.io/collector/pdata v1.18.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs= -go.opentelemetry.io/collector/semconv v0.112.0 h1:JPQyvZhlNLVSuVI+FScONaiFygB7h7NTZceUEKIQUEc= -go.opentelemetry.io/collector/semconv v0.112.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/collector/pdata v1.20.0 h1:ePcwt4bdtISP0loHaE+C9xYoU2ZkIvWv89Fob16o9SM= +go.opentelemetry.io/collector/pdata v1.20.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs= +go.opentelemetry.io/collector/semconv v0.114.0 h1:/eKcCJwZepQUtEuFuxa0thx2XIOvhFpaf214ZG1a11k= +go.opentelemetry.io/collector/semconv v0.114.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0 h1:7F3XCD6WYzDkwbi8I8N+oYJWquPVScnRosKGgqjsR8c= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0/go.mod h1:Dk3C0BfIlZDZ5c6eVS7TYiH2vssuyUU3vUsgbrR+5V4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -652,10 +652,10 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= From 4aeed2c4b1684f6e696defbfc975156fcb7f1bf6 Mon Sep 17 00:00:00 2001 From: Neeraj Gartia Date: Tue, 3 Dec 2024 19:01:05 +0530 Subject: [PATCH 1018/1397] fix resets function for histograms Signed-off-by: Neeraj Gartia --- promql/functions.go | 76 ++++++++++++----------- promql/promqltest/testdata/functions.test | 14 ++++- 2 files changed, 53 insertions(+), 37 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index 1c2d1ca823..016e676d31 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -1400,27 +1400,41 @@ func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe floats := vals[0].(Matrix)[0].Floats histograms := vals[0].(Matrix)[0].Histograms resets := 0 - - if len(floats) > 1 { - prev := floats[0].F - for _, sample := range floats[1:] { - current := sample.F - if current < prev { - resets++ - } - prev = current - } + if len(floats) == 0 && len(histograms) == 0 { + return enh.Out, nil } - if len(histograms) > 1 { - prev := histograms[0].H - for _, sample := range histograms[1:] { - current := sample.H - if current.DetectReset(prev) { + var prevSample, curSample Sample + for iFloat, iHistogram := 0, 0; iFloat < len(floats) || iHistogram < len(histograms); { + switch { + // Process a float sample if no histogram sample remains or its timestamp is earlier. + // Process a histogram sample if no float sample remains or its timestamp is earlier. + case iHistogram >= len(histograms) || iFloat < len(floats) && floats[iFloat].T < histograms[iHistogram].T: + curSample.F = floats[iFloat].F + curSample.H = nil + iFloat++ + case iFloat >= len(floats) || iHistogram < len(histograms) && floats[iFloat].T > histograms[iHistogram].T: + curSample.H = histograms[iHistogram].H + iHistogram++ + } + // Skip the comparison for the first sample, just initialize prevSample. + if iFloat+iHistogram == 1 { + prevSample = curSample + continue + } + switch { + case prevSample.H == nil && curSample.H == nil: + if curSample.F < prevSample.F { + resets++ + } + case prevSample.H != nil && curSample.H == nil, prevSample.H == nil && curSample.H != nil: + resets++ + case prevSample.H != nil && curSample.H != nil: + if curSample.H.DetectReset(prevSample.H) { resets++ } - prev = current } + prevSample = curSample } return append(enh.Out, Sample{F: float64(resets)}), nil @@ -1441,16 +1455,12 @@ func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelp // Process a float sample if no histogram sample remains or its timestamp is earlier. // Process a histogram sample if no float sample remains or its timestamp is earlier. case iHistogram >= len(histograms) || iFloat < len(floats) && floats[iFloat].T < histograms[iHistogram].T: - { - curSample.F = floats[iFloat].F - curSample.H = nil - iFloat++ - } + curSample.F = floats[iFloat].F + curSample.H = nil + iFloat++ case iFloat >= len(floats) || iHistogram < len(histograms) && floats[iFloat].T > histograms[iHistogram].T: - { - curSample.H = histograms[iHistogram].H - iHistogram++ - } + curSample.H = histograms[iHistogram].H + iHistogram++ } // Skip the comparison for the first sample, just initialize prevSample. if iFloat+iHistogram == 1 { @@ -1459,20 +1469,14 @@ func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelp } switch { case prevSample.H == nil && curSample.H == nil: - { - if curSample.F != prevSample.F && !(math.IsNaN(curSample.F) && math.IsNaN(prevSample.F)) { - changes++ - } - } - case prevSample.H != nil && curSample.H == nil, prevSample.H == nil && curSample.H != nil: - { + if curSample.F != prevSample.F && !(math.IsNaN(curSample.F) && math.IsNaN(prevSample.F)) { changes++ } + case prevSample.H != nil && curSample.H == nil, prevSample.H == nil && curSample.H != nil: + changes++ case prevSample.H != nil && curSample.H != nil: - { - if !curSample.H.Equals(prevSample.H) { - changes++ - } + if !curSample.H.Equals(prevSample.H) { + changes++ } } prevSample = curSample diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index a44a9fc67e..eaf3eed2b2 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -5,7 +5,7 @@ load 5m http_requests{path="/biz"} 0 0 0 0 0 1 1 1 1 1 http_requests_histogram{path="/foo"} {{schema:0 sum:1 count:1}}x9 http_requests_histogram{path="/bar"} 0 0 0 0 0 0 0 0 {{schema:0 sum:1 count:1}} {{schema:0 sum:1 count:1}} - http_requests_histogram{path="/biz"} 0 1 0 2 0 3 0 {{schema:0 sum:1 count:1}} {{schema:0 sum:2 count:2}} {{schema:0 sum:1 count:1}} + http_requests_histogram{path="/biz"} 0 1 0 2 0 3 0 {{schema:0 sum:1 count:1 z_bucket_w:0.001 z_bucket:2}} {{schema:0 sum:2 count:2 z_bucket_w:0.001 z_bucket:1}} {{schema:0 sum:1 count:1 z_bucket_w:0.001 z_bucket:2}} # Tests for resets(). eval instant at 50m resets(http_requests[5m]) @@ -42,6 +42,18 @@ eval instant at 50m resets(http_requests[50m]) eval instant at 50m resets(nonexistent_metric[50m]) +# Test for mix of floats and histograms. + +eval instant at 50m resets(http_requests_histogram[6m]) + {path="/foo"} 0 + {path="/bar"} 0 + {path="/biz"} 0 + +eval instant at 50m resets(http_requests_histogram[60m]) + {path="/foo"} 0 + {path="/bar"} 1 + {path="/biz"} 6 + # Tests for changes(). eval instant at 50m changes(http_requests[5m]) From f1340bac64f415df757583026476848efdda931f Mon Sep 17 00:00:00 2001 From: Antoine Pultier Date: Tue, 3 Dec 2024 14:36:56 +0100 Subject: [PATCH 1019/1397] documentation: put back trailing punctuation. markdownlint wasn't happy about the trailing punctuation in the headings. Signed-off-by: Antoine Pultier --- tsdb/docs/format/chunks.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tsdb/docs/format/chunks.md b/tsdb/docs/format/chunks.md index 837db020e3..d6e3a3ae6c 100644 --- a/tsdb/docs/format/chunks.md +++ b/tsdb/docs/format/chunks.md @@ -48,7 +48,7 @@ Notes: └──────────────────────┴───────────────┴───────────────┴──────────────────────┴──────────────────────┴──────────────────────┴──────────────────────┴─────┴──────────────────────┴──────────────────────┴──────────────────┘ ``` -### Notes +### Notes: * `ts` is the timestamp, `v` is the value. * `...` means to repeat the previous two fields as needed, with `n` starting at 2 and going up to `num_samples` – 1. @@ -73,7 +73,7 @@ Notes: └──────────────────────┴──────────────────────────┴───────────────────────────────┴─────────────────────┴──────────────────┴──────────────────┴──────────────────────┴────────────────┴──────────────────┘ ``` -### Positive and negative spans data +### Positive and negative spans data: ``` ┌─────────────────────────┬────────────────────────┬───────────────────────┬────────────────────────┬───────────────────────┬─────┬────────────────────────┬───────────────────────┐ @@ -81,7 +81,7 @@ Notes: └─────────────────────────┴────────────────────────┴───────────────────────┴────────────────────────┴───────────────────────┴─────┴────────────────────────┴───────────────────────┘ ``` -### Custom values data +### Custom values data: The `custom_values` data is currently only used for schema -53 (custom bucket boundaries). For other schemas, it is empty (length of zero). @@ -91,7 +91,7 @@ The `custom_values` data is currently only used for schema -53 (custom bucket bo └──────────────────────────┴─────────────────────────────────────┴─────┴──────────────────┘ ``` -### Samples data +### Samples data: ``` ┌──────────────────────────┐ @@ -107,7 +107,7 @@ The `custom_values` data is currently only used for schema -53 (custom bucket bo └──────────────────────────┘ ``` -#### Sample 0 data +#### Sample 0 data: ``` ┌─────────────────┬─────────────────────┬──────────────────────────┬───────────────┬───────────────────────────┬─────┬───────────────────────────┬───────────────────────────┬─────┬───────────────────────────┐ @@ -115,7 +115,7 @@ The `custom_values` data is currently only used for schema -53 (custom bucket bo └─────────────────┴─────────────────────┴──────────────────────────┴───────────────┴───────────────────────────┴─────┴───────────────────────────┴───────────────────────────┴─────┴───────────────────────────┘ ``` -#### Sample 1 data +#### Sample 1 data: ``` ┌───────────────────────┬──────────────────────────┬───────────────────────────────┬──────────────────────┬─────────────────────────────────┬─────┬─────────────────────────────────┬─────────────────────────────────┬─────┬─────────────────────────────────┐ @@ -123,7 +123,7 @@ The `custom_values` data is currently only used for schema -53 (custom bucket bo └───────────────────────┴──────────────────────────┴───────────────────────────────┴──────────────────────┴─────────────────────────────────┴─────┴─────────────────────────────────┴─────────────────────────────────┴─────┴─────────────────────────────────┘ ``` -#### Sample 2 data and following +#### Sample 2 data and following: ``` ┌─────────────────────┬────────────────────────┬─────────────────────────────┬──────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┐ @@ -131,7 +131,7 @@ The `custom_values` data is currently only used for schema -53 (custom bucket bo └─────────────────────┴────────────────────────┴─────────────────────────────┴──────────────────────┴───────────────────────────────┴─────┴───────────────────────────────┴───────────────────────────────┴─────┴───────────────────────────────┘ ``` -### Notes +### Notes: * `histogram_flags` is a byte of which currently only the first two bits are used: * `10`: Counter reset between the previous chunk and this one. @@ -181,7 +181,7 @@ bound of 33554430 is picked so that the varbit encoded value will take at most Float histograms have the same layout as histograms apart from the encoding of samples. -### Samples data +### Samples data: ``` ┌──────────────────────────┐ @@ -197,7 +197,7 @@ Float histograms have the same layout as histograms apart from the encoding of s └──────────────────────────┘ ``` -#### Sample 0 data +#### Sample 0 data: ``` ┌─────────────────┬─────────────────┬──────────────────────┬───────────────┬────────────────────────┬─────┬────────────────────────┬────────────────────────┬─────┬────────────────────────┐ @@ -205,7 +205,7 @@ Float histograms have the same layout as histograms apart from the encoding of s └─────────────────┴─────────────────┴──────────────────────┴───────────────┴────────────────────────┴─────┴────────────────────────┴────────────────────────┴─────┴────────────────────────┘ ``` -#### Sample 1 data +#### Sample 1 data: ``` ┌───────────────────────┬────────────────────────┬─────────────────────────────┬──────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┐ @@ -213,7 +213,7 @@ Float histograms have the same layout as histograms apart from the encoding of s └───────────────────────┴────────────────────────┴─────────────────────────────┴──────────────────────┴───────────────────────────────┴─────┴───────────────────────────────┴───────────────────────────────┴─────┴───────────────────────────────┘ ``` -#### Sample 2 data and following +#### Sample 2 data and following: ``` ┌─────────────────────┬────────────────────────┬─────────────────────────────┬──────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┬───────────────────────────────┬─────┬───────────────────────────────┐ From 7f26371af84d6ade4d6c40a8e9eeef32cb51f6e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 13:37:41 +0000 Subject: [PATCH 1020/1397] chore(deps): bump golang.org/x/oauth2 from 0.23.0 to 0.24.0 (#15506) Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.23.0 to 0.24.0. - [Commits](https://github.com/golang/oauth2/compare/v0.23.0...v0.24.0) --- updated-dependencies: - dependency-name: golang.org/x/oauth2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b022fe28e1..f86ade0a35 100644 --- a/go.mod +++ b/go.mod @@ -74,7 +74,7 @@ require ( go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 - golang.org/x/oauth2 v0.23.0 + golang.org/x/oauth2 v0.24.0 golang.org/x/sync v0.9.0 golang.org/x/sys v0.27.0 golang.org/x/text v0.20.0 diff --git a/go.sum b/go.sum index c1247bafe9..ff0d5e691d 100644 --- a/go.sum +++ b/go.sum @@ -574,8 +574,8 @@ golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From c9f3d9b47f374fc2e3a1567d5c0b61b196487885 Mon Sep 17 00:00:00 2001 From: machine424 Date: Tue, 3 Dec 2024 19:41:45 +0100 Subject: [PATCH 1021/1397] doc(nomad): adjust sections about nomad_sd_config's server test(nomad): extend TestConfiguredService with more valid/invalid servers configs fixes https://github.com/prometheus/prometheus/issues/12306 Signed-off-by: machine424 --- discovery/nomad/nomad_test.go | 36 +++++++++++++++++++++-------- docs/configuration/configuration.md | 3 ++- 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/discovery/nomad/nomad_test.go b/discovery/nomad/nomad_test.go index 32b087524c..c08f017496 100644 --- a/discovery/nomad/nomad_test.go +++ b/discovery/nomad/nomad_test.go @@ -127,19 +127,37 @@ func (m *SDMock) HandleServiceHashiCupsGet() { } func TestConfiguredService(t *testing.T) { - conf := &SDConfig{ - Server: "http://localhost:4646", + testCases := []struct { + name string + server string + acceptedURL bool + }{ + {"invalid hostname URL", "http://foo.bar:4646", true}, + {"invalid even though accepted by parsing", "foo.bar:4646", true}, + {"valid address URL", "http://172.30.29.23:4646", true}, + {"invalid URL", "172.30.29.23:4646", false}, } - reg := prometheus.NewRegistry() - refreshMetrics := discovery.NewRefreshMetrics(reg) - metrics := conf.NewDiscovererMetrics(reg, refreshMetrics) - require.NoError(t, metrics.Register()) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + conf := &SDConfig{ + Server: tc.server, + } - _, err := NewDiscovery(conf, nil, metrics) - require.NoError(t, err) + reg := prometheus.NewRegistry() + refreshMetrics := discovery.NewRefreshMetrics(reg) + metrics := conf.NewDiscovererMetrics(reg, refreshMetrics) + require.NoError(t, metrics.Register()) + defer metrics.Unregister() - metrics.Unregister() + _, err := NewDiscovery(conf, nil, metrics) + if tc.acceptedURL { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } } func TestNomadSDRefresh(t *testing.T) { diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 32da2c61c6..746360633e 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -2130,7 +2130,8 @@ The following meta labels are available on targets during [relabeling](#relabel_ [ namespace: | default = default ] [ refresh_interval: | default = 60s ] [ region: | default = global ] -[ server: ] +# The URL to connect to the API. +[ server: ] [ tag_separator: | default = ,] # HTTP client settings, including authentication methods (such as basic auth and From 5d248e2d101652bee7b37b888dcd27007e064758 Mon Sep 17 00:00:00 2001 From: machine424 Date: Mon, 2 Dec 2024 12:10:04 +0100 Subject: [PATCH 1022/1397] docs: improve section about Prometheus graceful shutdown. fixes https://github.com/prometheus/prometheus/issues/13183 Signed-off-by: machine424 --- docs/getting_started.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/getting_started.md b/docs/getting_started.md index e89ac705ee..82bae9b8d4 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -262,6 +262,9 @@ process ID. ## Shutting down your instance gracefully. While Prometheus does have recovery mechanisms in the case that there is an -abrupt process failure it is recommend to use the `SIGTERM` signal to cleanly -shutdown a Prometheus instance. If you're running on Linux this can be performed -by using `kill -s SIGTERM `, replacing `` with your Prometheus process ID. +abrupt process failure it is recommended to use signals or interrupts for a +clean shutdown of a Prometheus instance. On Linux, this can be done by sending +the `SIGTERM` or `SIGINT` signals to the Prometheus process. For example, you +can use `kill -s `, replacing `` with the signal name +and `` with the Prometheus process ID. Alternatively, you can press the +interrupt character at the controlling terminal, which by default is `^C` (Control-C). From 7fd3b13bd2275364a0ea88303f29f0f095392b15 Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Wed, 4 Dec 2024 17:41:05 +0530 Subject: [PATCH 1023/1397] [TEST] PromQL: Adds tests with histograms for simple functions (#15488) adds tests with histograms for simple functions Signed-off-by: Neeraj Gartia --------- Signed-off-by: Neeraj Gartia --- promql/promqltest/testdata/functions.test | 26 +++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index eaf3eed2b2..2ed7ffb6a4 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -506,6 +506,7 @@ load 5m test_sgn{src="sgn-d"} -50 test_sgn{src="sgn-e"} 0 test_sgn{src="sgn-f"} 100 + test_sgn{src="sgn-histogram"} {{schema:1 sum:1 count:1}} eval instant at 0m sgn(test_sgn) {src="sgn-a"} -1 @@ -1271,11 +1272,16 @@ clear load 5m exp_root_log{l="x"} 10 exp_root_log{l="y"} 20 + exp_root_log_h{l="z"} {{schema:1 sum:1 count:1}} eval instant at 1m exp(exp_root_log) {l="x"} 22026.465794806718 {l="y"} 485165195.4097903 +eval instant at 1m exp({__name__=~"exp_root_log(_h)?"}) + {l="x"} 22026.465794806718 + {l="y"} 485165195.4097903 + eval instant at 1m exp(exp_root_log - 10) {l="y"} 22026.465794806718 {l="x"} 1 @@ -1288,6 +1294,10 @@ eval instant at 1m ln(exp_root_log) {l="x"} 2.302585092994046 {l="y"} 2.995732273553991 +eval instant at 1m ln({__name__=~"exp_root_log(_h)?"}) + {l="x"} 2.302585092994046 + {l="y"} 2.995732273553991 + eval instant at 1m ln(exp_root_log - 10) {l="y"} 2.302585092994046 {l="x"} -Inf @@ -1300,14 +1310,26 @@ eval instant at 1m exp(ln(exp_root_log)) {l="y"} 20 {l="x"} 10 +eval instant at 1m exp(ln({__name__=~"exp_root_log(_h)?"})) + {l="y"} 20 + {l="x"} 10 + eval instant at 1m sqrt(exp_root_log) {l="x"} 3.1622776601683795 {l="y"} 4.47213595499958 +eval instant at 1m sqrt({__name__=~"exp_root_log(_h)?"}) + {l="x"} 3.1622776601683795 + {l="y"} 4.47213595499958 + eval instant at 1m log2(exp_root_log) {l="x"} 3.3219280948873626 {l="y"} 4.321928094887363 +eval instant at 1m log2({__name__=~"exp_root_log(_h)?"}) + {l="x"} 3.3219280948873626 + {l="y"} 4.321928094887363 + eval instant at 1m log2(exp_root_log - 10) {l="y"} 3.3219280948873626 {l="x"} -Inf @@ -1320,6 +1342,10 @@ eval instant at 1m log10(exp_root_log) {l="x"} 1 {l="y"} 1.301029995663981 +eval instant at 1m log10({__name__=~"exp_root_log(_h)?"}) + {l="x"} 1 + {l="y"} 1.301029995663981 + eval instant at 1m log10(exp_root_log - 10) {l="y"} 1 {l="x"} -Inf From a574335d6bd09f301c94a909200fce0ae940db8b Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Wed, 4 Dec 2024 16:41:12 +0100 Subject: [PATCH 1024/1397] OTLP receiver: Don't call NormalizeLabel in NoUTF8EscapingWithSuffixes mode Signed-off-by: Arve Knudsen --- .../prometheus/normalize_label.go | 8 +++--- .../prometheus/normalize_label_test.go | 27 +++++++++---------- .../prometheusremotewrite/helper.go | 14 +++++++--- 3 files changed, 26 insertions(+), 23 deletions(-) diff --git a/storage/remote/otlptranslator/prometheus/normalize_label.go b/storage/remote/otlptranslator/prometheus/normalize_label.go index b928e6888d..b51b5e945a 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label.go @@ -29,15 +29,15 @@ import ( // // Labels that start with non-letter rune will be prefixed with "key_". // An exception is made for double-underscores which are allowed. -func NormalizeLabel(label string, allowUTF8 bool) string { - // Trivial case - if len(label) == 0 || allowUTF8 { +func NormalizeLabel(label string) string { + // Trivial case. + if len(label) == 0 { return label } label = strutil.SanitizeLabelName(label) - // If label starts with a number, prepend with "key_" + // If label starts with a number, prepend with "key_". if unicode.IsDigit(rune(label[0])) { label = "key_" + label } else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") { diff --git a/storage/remote/otlptranslator/prometheus/normalize_label_test.go b/storage/remote/otlptranslator/prometheus/normalize_label_test.go index 19ab6cd173..77538ce8e6 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label_test.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label_test.go @@ -22,27 +22,24 @@ import ( func TestNormalizeLabel(t *testing.T) { tests := []struct { - label string - expected string - expectedUTF8 string + label string + expected string }{ - {"", "", ""}, - {"label:with:colons", "label_with_colons", "label:with:colons"}, // Without UTF-8 support, colons are only allowed in metric names - {"LabelWithCapitalLetters", "LabelWithCapitalLetters", "LabelWithCapitalLetters"}, - {"label!with&special$chars)", "label_with_special_chars_", "label!with&special$chars)"}, - {"label_with_foreign_characters_字符", "label_with_foreign_characters___", "label_with_foreign_characters_字符"}, - {"label.with.dots", "label_with_dots", "label.with.dots"}, - {"123label", "key_123label", "123label"}, - {"_label_starting_with_underscore", "key_label_starting_with_underscore", "_label_starting_with_underscore"}, - {"__label_starting_with_2underscores", "__label_starting_with_2underscores", "__label_starting_with_2underscores"}, + {"", ""}, + {"label:with:colons", "label_with_colons"}, + {"LabelWithCapitalLetters", "LabelWithCapitalLetters"}, + {"label!with&special$chars)", "label_with_special_chars_"}, + {"label_with_foreign_characters_字符", "label_with_foreign_characters___"}, + {"label.with.dots", "label_with_dots"}, + {"123label", "key_123label"}, + {"_label_starting_with_underscore", "key_label_starting_with_underscore"}, + {"__label_starting_with_2underscores", "__label_starting_with_2underscores"}, } for i, test := range tests { t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) { - result := NormalizeLabel(test.label, false) + result := NormalizeLabel(test.label) require.Equal(t, test.expected, result) - uTF8result := NormalizeLabel(test.label, true) - require.Equal(t, test.expectedUTF8, uTF8result) }) } } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 316dd60c63..62814bfc8d 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -157,7 +157,10 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting // map ensures no duplicate label names. l := make(map[string]string, maxLabelCount) for _, label := range labels { - var finalKey = prometheustranslator.NormalizeLabel(label.Name, settings.AllowUTF8) + finalKey := label.Name + if !settings.AllowUTF8 { + finalKey = prometheustranslator.NormalizeLabel(finalKey) + } if existingValue, alreadyExists := l[finalKey]; alreadyExists { l[finalKey] = existingValue + ";" + label.Value } else { @@ -166,7 +169,10 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting } for _, lbl := range promotedAttrs { - normalized := prometheustranslator.NormalizeLabel(lbl.Name, settings.AllowUTF8) + normalized := lbl.Name + if !settings.AllowUTF8 { + normalized = prometheustranslator.NormalizeLabel(normalized) + } if _, exists := l[normalized]; !exists { l[normalized] = lbl.Value } @@ -204,8 +210,8 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.") } // internal labels should be maintained - if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { - name = prometheustranslator.NormalizeLabel(name, settings.AllowUTF8) + if !settings.AllowUTF8 && !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { + name = prometheustranslator.NormalizeLabel(name) } l[name] = extras[i+1] } From 7719ed18d70d8736d7bd6188d1628835b85b50c3 Mon Sep 17 00:00:00 2001 From: machine424 Date: Tue, 3 Dec 2024 17:40:18 +0100 Subject: [PATCH 1025/1397] test: add TestTargetsFromGroupWithLabelKeepDrop to reinforce a relabelling behaviour. fixes https://github.com/prometheus/prometheus/issues/12355 Signed-off-by: machine424 --- scrape/target_test.go | 74 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/scrape/target_test.go b/scrape/target_test.go index bd27952874..0d763d7389 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -354,6 +354,80 @@ func TestTargetsFromGroup(t *testing.T) { require.EqualError(t, failures[0], expectedError) } +// TestTargetsFromGroupWithLabelKeepDrop aims to demonstrate and reinforce the current behavior: relabeling's "labelkeep" and "labeldrop" +// are applied to all labels of a target, including internal ones (labels starting with "__" such as "__address__"). +// This will be helpful for cases like https://github.com/prometheus/prometheus/issues/12355. +func TestTargetsFromGroupWithLabelKeepDrop(t *testing.T) { + tests := []struct { + name string + cfgText string + targets []model.LabelSet + shouldDropTarget bool + }{ + { + name: "no relabeling", + cfgText: ` +global: + metric_name_validation_scheme: legacy +scrape_configs: + - job_name: job1 + static_configs: + - targets: ["localhost:9090"] +`, + targets: []model.LabelSet{{model.AddressLabel: "localhost:9090"}}, + }, + { + name: "labelkeep", + cfgText: ` +global: + metric_name_validation_scheme: legacy +scrape_configs: + - job_name: job1 + static_configs: + - targets: ["localhost:9090"] + relabel_configs: + - regex: 'foo' + action: labelkeep +`, + targets: []model.LabelSet{{model.AddressLabel: "localhost:9090"}}, + shouldDropTarget: true, + }, + { + name: "labeldrop", + cfgText: ` +global: + metric_name_validation_scheme: legacy +scrape_configs: + - job_name: job1 + static_configs: + - targets: ["localhost:9090"] + relabel_configs: + - regex: '__address__' + action: labeldrop +`, + targets: []model.LabelSet{{model.AddressLabel: "localhost:9090"}}, + shouldDropTarget: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := loadConfiguration(t, tt.cfgText) + lb := labels.NewBuilder(labels.EmptyLabels()) + targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: tt.targets}, config.ScrapeConfigs[0], nil, lb) + + if tt.shouldDropTarget { + require.Len(t, failures, 1) + require.EqualError(t, failures[0], "instance 0 in group : no address") + require.Empty(t, targets) + } else { + require.Empty(t, failures) + require.Len(t, targets, 1) + } + }) + } +} + func BenchmarkTargetsFromGroup(b *testing.B) { // Simulate Kubernetes service-discovery and use subset of rules from typical Prometheus config. cfgText := ` From 8d4bcd2c773c159ae6e87c9bc7b9517bfb92835d Mon Sep 17 00:00:00 2001 From: Owen Williams Date: Tue, 3 Dec 2024 12:33:10 -0500 Subject: [PATCH 1026/1397] promql: Fix various UTF-8 bugs related to quoting Fixes UTF-8 aggregator label list items getting mutated with quote marks when String-ified. Fixes quoted metric names not supported in metric declarations. Fixes UTF-8 label names not being quoted when String-ified. Fixes https://github.com/prometheus/prometheus/issues/15470 Fixes https://github.com/prometheus/prometheus/issues/15528 Signed-off-by: Owen Williams Co-authored-by: Bryan Boreham --- model/labels/labels_common.go | 6 +- model/labels/labels_test.go | 4 + promql/engine_test.go | 2 +- promql/parser/generated_parser.y | 4 +- promql/parser/generated_parser.y.go | 333 ++++++++++++++-------------- promql/parser/parse.go | 3 +- promql/parser/parse_test.go | 80 ++++++- promql/parser/printer.go | 17 +- promql/promqltest/test.go | 4 + promql/promqltest/test_test.go | 28 ++- storage/remote/codec_test.go | 5 + 11 files changed, 295 insertions(+), 191 deletions(-) diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go index 99529a3836..a232eeea5d 100644 --- a/model/labels/labels_common.go +++ b/model/labels/labels_common.go @@ -51,7 +51,11 @@ func (ls Labels) String() string { b.WriteByte(',') b.WriteByte(' ') } - b.WriteString(l.Name) + if !model.LabelName(l.Name).IsValidLegacy() { + b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Name)) + } else { + b.WriteString(l.Name) + } b.WriteByte('=') b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Value)) i++ diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index 3a4040776c..06802d6f3e 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -39,6 +39,10 @@ func TestLabels_String(t *testing.T) { labels: Labels{}, expected: "{}", }, + { + labels: FromStrings("service.name", "t1", "whatever\\whatever", "t2"), + expected: `{"service.name"="t1", "whatever\\whatever"="t2"}`, + }, } for _, c := range cases { str := c.labels.String() diff --git a/promql/engine_test.go b/promql/engine_test.go index 8b8dc3909c..20d78a7d86 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -751,7 +751,7 @@ load 10s Interval: 5 * time.Second, }, { - Query: `count_values("wrong label!", metric)`, + Query: `count_values("wrong label!\xff", metric)`, ShouldError: true, }, } diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index c321a1e973..3865dc6548 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -669,14 +669,14 @@ label_set_item : IDENTIFIER EQL STRING { $$ = labels.Label{Name: $1.Val, Value: yylex.(*parser).unquoteString($3.Val) } } | string_identifier EQL STRING { $$ = labels.Label{Name: $1.Val, Value: yylex.(*parser).unquoteString($3.Val) } } + | string_identifier + { $$ = labels.Label{Name: labels.MetricName, Value: $1.Val} } | IDENTIFIER EQL error { yylex.(*parser).unexpected("label set", "string"); $$ = labels.Label{}} | string_identifier EQL error { yylex.(*parser).unexpected("label set", "string"); $$ = labels.Label{}} | IDENTIFIER error { yylex.(*parser).unexpected("label set", "\"=\""); $$ = labels.Label{}} - | string_identifier error - { yylex.(*parser).unexpected("label set", "\"=\""); $$ = labels.Label{}} | error { yylex.(*parser).unexpected("label set", "identifier or \"}\""); $$ = labels.Label{} } ; diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 8979410ceb..7ff8591169 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -339,7 +339,7 @@ var yyExca = [...]int16{ 79, 197, 85, 197, -2, 125, - -1, 205, + -1, 204, 9, 246, 12, 246, 13, 246, @@ -371,7 +371,7 @@ var yyExca = [...]int16{ 88, 246, 89, 246, -2, 0, - -1, 206, + -1, 205, 9, 246, 12, 246, 13, 246, @@ -407,139 +407,139 @@ var yyExca = [...]int16{ const yyPrivate = 57344 -const yyLast = 804 +const yyLast = 803 var yyAct = [...]int16{ - 155, 339, 337, 158, 344, 231, 39, 197, 281, 44, - 296, 295, 84, 120, 82, 181, 109, 108, 351, 352, - 353, 354, 107, 111, 203, 136, 204, 159, 154, 112, - 205, 206, 234, 6, 271, 55, 163, 163, 107, 334, - 333, 307, 244, 275, 309, 54, 162, 162, 250, 363, - 91, 272, 330, 131, 362, 233, 60, 270, 276, 110, - 100, 101, 298, 115, 103, 116, 106, 90, 164, 164, - 114, 265, 113, 361, 277, 307, 360, 246, 247, 338, - 103, 248, 106, 153, 165, 165, 264, 316, 201, 261, - 122, 105, 235, 237, 239, 240, 241, 249, 251, 254, - 255, 256, 257, 258, 262, 263, 273, 105, 236, 238, - 242, 243, 245, 252, 253, 152, 117, 166, 259, 260, - 176, 164, 170, 173, 163, 168, 223, 169, 172, 2, - 3, 4, 5, 107, 162, 199, 111, 165, 187, 202, - 189, 171, 112, 269, 207, 208, 209, 210, 211, 212, - 213, 214, 215, 216, 217, 218, 219, 220, 221, 200, - 89, 91, 113, 222, 123, 193, 268, 329, 224, 225, - 183, 100, 101, 191, 121, 103, 104, 106, 90, 7, - 85, 234, 266, 182, 55, 183, 328, 86, 192, 123, - 83, 244, 122, 267, 54, 132, 190, 250, 188, 121, - 345, 230, 105, 86, 233, 77, 35, 119, 304, 10, - 185, 327, 86, 303, 293, 294, 157, 315, 297, 79, - 184, 186, 326, 163, 274, 185, 246, 247, 302, 325, - 248, 324, 314, 162, 323, 184, 186, 299, 261, 313, - 322, 235, 237, 239, 240, 241, 249, 251, 254, 255, - 256, 257, 258, 262, 263, 164, 321, 236, 238, 242, - 243, 245, 252, 253, 180, 126, 320, 259, 260, 179, - 125, 165, 305, 319, 306, 308, 318, 310, 317, 130, - 88, 129, 178, 124, 311, 312, 137, 138, 139, 140, + 154, 338, 336, 157, 343, 230, 39, 196, 280, 44, + 295, 294, 84, 120, 82, 233, 180, 109, 108, 350, + 351, 352, 353, 110, 111, 243, 202, 158, 203, 135, + 112, 249, 361, 6, 333, 329, 113, 332, 232, 204, + 205, 308, 271, 60, 130, 270, 297, 268, 162, 315, + 156, 360, 153, 306, 359, 344, 200, 162, 161, 55, + 245, 246, 222, 115, 247, 116, 107, 161, 269, 54, + 267, 114, 260, 306, 182, 234, 236, 238, 239, 240, + 248, 250, 253, 254, 255, 256, 257, 261, 262, 163, + 122, 235, 237, 241, 242, 244, 251, 252, 192, 328, + 111, 258, 259, 117, 190, 164, 112, 152, 103, 55, + 106, 337, 77, 113, 184, 151, 35, 165, 327, 54, + 175, 191, 169, 172, 183, 185, 167, 189, 168, 2, + 3, 4, 5, 107, 198, 105, 159, 160, 201, 186, + 188, 7, 326, 206, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 220, 199, 194, + 89, 91, 221, 162, 264, 325, 197, 223, 224, 171, + 200, 100, 101, 161, 162, 103, 104, 106, 90, 263, + 233, 324, 170, 162, 161, 323, 362, 322, 321, 274, + 243, 122, 266, 161, 131, 163, 249, 272, 123, 320, + 229, 319, 105, 232, 275, 318, 163, 317, 121, 85, + 316, 164, 163, 292, 293, 163, 265, 296, 129, 83, + 276, 86, 164, 273, 10, 245, 246, 187, 164, 247, + 88, 164, 86, 50, 79, 36, 298, 260, 1, 78, + 234, 236, 238, 239, 240, 248, 250, 253, 254, 255, + 256, 257, 261, 262, 123, 49, 235, 237, 241, 242, + 244, 251, 252, 181, 121, 182, 258, 259, 128, 48, + 127, 304, 119, 305, 307, 59, 309, 86, 9, 9, + 47, 46, 134, 310, 311, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, - 151, 195, 160, 161, 50, 163, 36, 167, 198, 331, - 78, 332, 201, 228, 55, 162, 85, 227, 1, 340, - 341, 342, 336, 49, 54, 343, 83, 347, 346, 349, - 348, 48, 226, 47, 81, 355, 356, 164, 55, 86, - 357, 53, 77, 301, 56, 8, 359, 22, 54, 37, - 55, 175, 46, 165, 57, 128, 135, 127, 45, 43, - 54, 364, 300, 59, 133, 174, 9, 9, 42, 134, - 75, 41, 40, 51, 196, 358, 18, 19, 278, 87, - 20, 194, 229, 80, 350, 156, 76, 58, 232, 52, - 118, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 70, 71, 72, 73, 74, 0, 0, 0, 13, 0, - 0, 0, 24, 0, 30, 0, 0, 31, 32, 55, - 38, 0, 53, 77, 0, 56, 280, 0, 22, 54, - 0, 0, 0, 279, 0, 57, 0, 283, 284, 282, - 289, 291, 288, 290, 285, 286, 287, 292, 0, 0, - 0, 75, 0, 0, 0, 0, 0, 18, 19, 0, - 0, 20, 0, 0, 0, 0, 0, 76, 0, 0, - 0, 0, 61, 62, 63, 64, 65, 66, 67, 68, - 69, 70, 71, 72, 73, 74, 0, 0, 0, 13, - 0, 0, 0, 24, 0, 30, 0, 55, 31, 32, - 53, 77, 0, 56, 335, 0, 22, 54, 0, 0, - 0, 0, 0, 57, 0, 283, 284, 282, 289, 291, - 288, 290, 285, 286, 287, 292, 0, 0, 0, 75, - 0, 0, 0, 0, 0, 18, 19, 0, 0, 20, - 0, 0, 0, 17, 77, 76, 0, 0, 0, 22, + 45, 43, 132, 173, 179, 184, 166, 85, 330, 178, + 331, 42, 133, 55, 41, 183, 185, 83, 339, 340, + 341, 335, 177, 54, 342, 81, 346, 345, 348, 347, + 86, 303, 40, 314, 354, 355, 302, 55, 51, 356, + 53, 77, 300, 56, 195, 358, 22, 54, 313, 55, + 174, 301, 227, 57, 8, 312, 226, 357, 37, 54, + 363, 299, 126, 277, 87, 193, 228, 125, 80, 75, + 349, 225, 155, 58, 231, 18, 19, 52, 118, 20, + 124, 0, 0, 0, 0, 76, 0, 0, 0, 0, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, 13, 0, 0, - 0, 24, 0, 30, 0, 0, 31, 32, 18, 19, - 0, 0, 20, 0, 0, 0, 17, 35, 0, 0, - 0, 0, 22, 11, 12, 14, 15, 16, 21, 23, - 25, 26, 27, 28, 29, 33, 34, 0, 0, 0, - 13, 0, 0, 0, 24, 0, 30, 0, 0, 31, - 32, 18, 19, 0, 0, 20, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 11, 12, 14, 15, - 16, 21, 23, 25, 26, 27, 28, 29, 33, 34, - 107, 0, 0, 13, 0, 0, 0, 24, 177, 30, - 0, 0, 31, 32, 0, 0, 0, 0, 0, 107, - 0, 0, 0, 0, 0, 0, 0, 89, 91, 92, - 0, 93, 94, 95, 96, 97, 98, 99, 100, 101, - 102, 0, 103, 104, 106, 90, 89, 91, 92, 0, + 0, 24, 0, 30, 0, 0, 31, 32, 55, 38, + 107, 53, 77, 0, 56, 279, 0, 22, 54, 0, + 0, 0, 278, 0, 57, 0, 282, 283, 281, 288, + 290, 287, 289, 284, 285, 286, 291, 0, 91, 0, + 75, 0, 0, 0, 0, 0, 18, 19, 100, 101, + 20, 0, 103, 0, 106, 90, 76, 0, 0, 0, + 0, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 0, 0, 0, 13, 105, + 0, 0, 24, 0, 30, 0, 55, 31, 32, 53, + 77, 0, 56, 334, 0, 22, 54, 0, 0, 0, + 0, 0, 57, 0, 282, 283, 281, 288, 290, 287, + 289, 284, 285, 286, 291, 0, 0, 0, 75, 0, + 0, 0, 0, 0, 18, 19, 0, 0, 20, 0, + 0, 0, 17, 77, 76, 0, 0, 0, 22, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 0, 0, 0, 13, 0, 0, 0, + 24, 0, 30, 0, 0, 31, 32, 18, 19, 0, + 0, 20, 0, 0, 0, 17, 35, 0, 0, 0, + 0, 22, 11, 12, 14, 15, 16, 21, 23, 25, + 26, 27, 28, 29, 33, 34, 0, 0, 0, 13, + 0, 0, 0, 24, 0, 30, 0, 0, 31, 32, + 18, 19, 0, 0, 20, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 11, 12, 14, 15, 16, + 21, 23, 25, 26, 27, 28, 29, 33, 34, 107, + 0, 0, 13, 0, 0, 0, 24, 176, 30, 0, + 0, 31, 32, 0, 0, 0, 0, 0, 107, 0, + 0, 0, 0, 0, 0, 0, 89, 91, 92, 0, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, - 0, 103, 104, 106, 90, 107, 0, 0, 0, 105, + 0, 103, 104, 106, 90, 89, 91, 92, 0, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 0, + 103, 104, 106, 90, 107, 0, 0, 0, 105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 107, 0, 0, 0, 105, 0, - 0, 0, 89, 91, 92, 0, 93, 94, 95, 0, - 97, 98, 99, 100, 101, 102, 0, 103, 104, 106, - 90, 89, 91, 92, 0, 93, 94, 0, 0, 97, - 98, 0, 100, 101, 102, 0, 103, 104, 106, 90, - 0, 0, 0, 0, 105, 0, 0, 0, 0, 0, + 0, 0, 0, 107, 0, 0, 0, 105, 0, 0, + 0, 89, 91, 92, 0, 93, 94, 95, 0, 97, + 98, 99, 100, 101, 102, 0, 103, 104, 106, 90, + 89, 91, 92, 0, 93, 94, 0, 0, 97, 98, + 0, 100, 101, 102, 0, 103, 104, 106, 90, 0, + 0, 0, 0, 105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 105, + 0, 0, 105, } var yyPact = [...]int16{ - 31, 169, 574, 574, 410, 531, -1000, -1000, -1000, 193, + 31, 131, 573, 573, 409, 530, -1000, -1000, -1000, 103, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 314, -1000, 278, -1000, 655, + -1000, -1000, -1000, -1000, -1000, 305, -1000, 228, -1000, 654, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 57, 147, -1000, -1000, 488, -1000, 488, 192, + -1000, -1000, 21, 98, -1000, -1000, 487, -1000, 487, 99, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 187, -1000, -1000, - 263, -1000, -1000, 353, 277, -1000, -1000, 29, -1000, -53, - -53, -53, -53, -53, -53, -53, -53, -53, -53, -53, - -53, -53, -53, -53, -53, 26, 214, 305, 147, -56, - -1000, 126, 126, 329, -1000, 636, 24, -1000, 262, -1000, - -1000, 181, 166, -1000, -1000, 178, -1000, 171, -1000, 163, - -1000, 296, 488, -1000, -58, -50, -1000, 488, 488, 488, - 488, 488, 488, 488, 488, 488, 488, 488, 488, 488, - 488, 488, -1000, 175, -1000, -1000, 111, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 115, 115, 311, -1000, -1000, -1000, - -1000, 179, -1000, -1000, 64, -1000, 655, -1000, -1000, 162, - -1000, 141, -1000, -1000, -1000, -1000, -1000, 32, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 25, 80, 17, -1000, -1000, - -1000, 409, 8, 126, 126, 126, 126, 24, 24, 119, - 119, 119, 720, 701, 119, 119, 720, 24, 24, 119, - 24, 8, -1000, 40, -1000, -1000, -1000, 341, -1000, 206, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 252, -1000, -1000, + 360, -1000, -1000, 266, 214, -1000, -1000, 20, -1000, -49, + -49, -49, -49, -49, -49, -49, -49, -49, -49, -49, + -49, -49, -49, -49, -49, 50, 48, 304, 98, -55, + -1000, 167, 167, 328, -1000, 635, 52, -1000, 302, -1000, + -1000, 261, 70, -1000, -1000, 207, -1000, 102, -1000, 96, + 154, 487, -1000, -56, -41, -1000, 487, 487, 487, 487, + 487, 487, 487, 487, 487, 487, 487, 487, 487, 487, + 487, -1000, 100, -1000, -1000, 47, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 39, 39, 350, -1000, -1000, -1000, -1000, + 178, -1000, -1000, 157, -1000, 654, -1000, -1000, 196, -1000, + 45, -1000, -1000, -1000, -1000, -1000, 43, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 16, 171, 163, -1000, -1000, -1000, + 408, 406, 167, 167, 167, 167, 52, 52, 119, 119, + 119, 719, 700, 119, 119, 719, 52, 52, 119, 52, + 406, -1000, 24, -1000, -1000, -1000, 340, -1000, 329, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 488, -1000, -1000, -1000, -1000, - -1000, -1000, 56, 56, 18, 56, 72, 72, 215, 70, - -1000, -1000, 272, 270, 267, 260, 250, 234, 228, 225, - 223, 216, 205, -1000, -1000, -1000, -1000, -1000, -1000, 165, - -1000, -1000, -1000, 30, -1000, 655, -1000, -1000, -1000, 56, - -1000, 14, 13, 487, -1000, -1000, -1000, 22, 27, 27, - 27, 115, 186, 186, 22, 186, 22, -74, -1000, -1000, - -1000, -1000, -1000, 56, 56, -1000, -1000, -1000, 56, -1000, - -1000, -1000, -1000, -1000, -1000, 27, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 52, -1000, - 28, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 487, -1000, -1000, -1000, -1000, -1000, + -1000, 34, 34, 15, 34, 40, 40, 331, 32, -1000, + -1000, 204, 201, 199, 195, 193, 182, 181, 179, 175, + 159, 136, -1000, -1000, -1000, -1000, -1000, -1000, 97, -1000, + -1000, -1000, 13, -1000, 654, -1000, -1000, -1000, 34, -1000, + 11, 8, 486, -1000, -1000, -1000, 54, 174, 174, 174, + 39, 41, 41, 54, 41, 54, -73, -1000, -1000, -1000, + -1000, -1000, 34, 34, -1000, -1000, -1000, 34, -1000, -1000, + -1000, -1000, -1000, -1000, 174, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 30, -1000, 165, + -1000, -1000, -1000, -1000, } var yyPgo = [...]int16{ - 0, 390, 13, 389, 5, 15, 388, 363, 387, 385, - 12, 384, 209, 345, 383, 14, 382, 10, 11, 381, - 379, 7, 378, 8, 4, 375, 2, 1, 3, 374, - 27, 0, 373, 372, 17, 195, 371, 369, 6, 368, - 365, 16, 364, 56, 359, 9, 358, 356, 352, 333, - 331, 323, 304, 318, 306, + 0, 378, 13, 377, 5, 16, 374, 275, 373, 372, + 12, 370, 224, 354, 368, 14, 366, 10, 11, 365, + 364, 7, 363, 8, 4, 357, 2, 1, 3, 344, + 27, 0, 338, 332, 18, 194, 314, 312, 6, 311, + 303, 17, 302, 43, 301, 9, 300, 282, 281, 280, + 269, 255, 233, 238, 235, } var yyR1 = [...]int8{ @@ -584,7 +584,7 @@ var yyR2 = [...]int8{ 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 4, 2, 0, - 3, 1, 2, 3, 3, 3, 3, 2, 2, 1, + 3, 1, 2, 3, 3, 1, 3, 3, 2, 1, 2, 0, 3, 2, 1, 1, 3, 1, 3, 4, 1, 3, 5, 5, 1, 1, 1, 4, 3, 3, 2, 3, 1, 2, 3, 3, 3, 3, 3, 3, @@ -612,30 +612,30 @@ var yyChk = [...]int16{ 52, 53, 54, 56, 57, 83, 58, 14, -34, -41, 2, 79, 85, 15, -41, -38, -38, -43, -1, 20, -2, 12, -10, 2, 20, 7, 2, 4, 2, 4, - 2, 24, -35, -42, -37, -47, 78, -35, -35, -35, + 24, -35, -42, -37, -47, 78, -35, -35, -35, -35, -35, -35, -35, -35, -35, -35, -35, -35, -35, -35, - -35, -35, -45, 57, 2, -31, -9, 2, -28, -30, - 88, 89, 19, 9, 41, 57, -45, 2, -41, -34, - -17, 15, 2, -17, -40, 22, -38, 22, 20, 7, - 2, -5, 2, 4, 54, 44, 55, -5, 20, -15, - 25, 2, 25, 2, -19, 5, -29, -21, 12, -28, - -30, 16, -38, 82, 84, 80, 81, -38, -38, -38, + -35, -45, 57, 2, -31, -9, 2, -28, -30, 88, + 89, 19, 9, 41, 57, -45, 2, -41, -34, -17, + 15, 2, -17, -40, 22, -38, 22, 20, 7, 2, + -5, 2, 4, 54, 44, 55, -5, 20, -15, 25, + 2, 25, 2, -19, 5, -29, -21, 12, -28, -30, + 16, -38, 82, 84, 80, 81, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, - -38, -38, -45, 15, -28, -28, 21, 6, 2, -16, - 22, -4, -6, 25, 2, 62, 78, 63, 79, 64, - 65, 66, 80, 81, 12, 82, 47, 48, 51, 67, - 18, 68, 83, 84, 69, 70, 71, 72, 73, 88, - 89, 59, 74, 75, 22, 7, 20, -2, 25, 2, - 25, 2, 26, 26, -30, 26, 41, 57, -22, 24, - 17, -23, 30, 28, 29, 35, 36, 37, 33, 31, - 34, 32, 38, -17, -17, -18, -17, -18, 22, -45, - 21, 2, 22, 7, 2, -38, -27, 19, -27, 26, - -27, -21, -21, 24, 17, 2, 17, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 21, 2, - 22, -4, -27, 26, 26, 17, -23, -26, 57, -27, - -31, -31, -31, -28, -24, 14, -24, -26, -24, -26, - -11, 92, 93, 94, 95, -27, -27, -27, -25, -31, - 24, 21, 2, 21, -31, + -38, -45, 15, -28, -28, 21, 6, 2, -16, 22, + -4, -6, 25, 2, 62, 78, 63, 79, 64, 65, + 66, 80, 81, 12, 82, 47, 48, 51, 67, 18, + 68, 83, 84, 69, 70, 71, 72, 73, 88, 89, + 59, 74, 75, 22, 7, 20, -2, 25, 2, 25, + 2, 26, 26, -30, 26, 41, 57, -22, 24, 17, + -23, 30, 28, 29, 35, 36, 37, 33, 31, 34, + 32, 38, -17, -17, -18, -17, -18, 22, -45, 21, + 2, 22, 7, 2, -38, -27, 19, -27, 26, -27, + -21, -21, 24, 17, 2, 17, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 21, 2, 22, + -4, -27, 26, 26, 17, -23, -26, 57, -27, -31, + -31, -31, -28, -24, 14, -24, -26, -24, -26, -11, + 92, 93, 94, 95, -27, -27, -27, -25, -31, 24, + 21, 2, 21, -31, } var yyDef = [...]int16{ @@ -647,35 +647,35 @@ var yyDef = [...]int16{ 18, 19, 0, 108, 233, 234, 0, 244, 0, 85, 86, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, 227, 228, 0, 5, 100, - 0, 128, 131, 0, 0, 139, 245, 140, 144, 43, + 0, 128, 131, 0, 135, 139, 245, 140, 144, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 0, 0, 0, 0, 22, 23, 0, 0, 0, 61, 0, 83, 84, 0, 89, - 91, 0, 95, 99, 126, 0, 132, 0, 137, 0, - 138, 143, 0, 42, 47, 48, 44, 0, 0, 0, + 91, 0, 95, 99, 126, 0, 132, 0, 138, 0, + 143, 0, 42, 47, 48, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 68, 0, 70, 71, 0, 73, 239, 240, - 74, 75, 235, 236, 0, 0, 0, 82, 20, 21, - 24, 0, 54, 25, 0, 63, 65, 67, 87, 0, - 92, 0, 98, 229, 230, 231, 232, 0, 127, 130, - 133, 135, 134, 136, 142, 145, 147, 150, 154, 155, - 156, 0, 26, 0, 0, -2, -2, 27, 28, 29, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 69, 0, 237, 238, 76, 0, 81, 0, - 53, 56, 58, 59, 60, 198, 199, 200, 201, 202, - 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, - 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, - 223, 224, 225, 226, 62, 66, 88, 90, 93, 97, - 94, 96, 0, 0, 0, 0, 0, 0, 0, 0, - 160, 162, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 45, 46, 49, 247, 50, 72, 0, - 78, 80, 51, 0, 57, 64, 146, 241, 148, 0, - 151, 0, 0, 0, 158, 163, 159, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 77, 79, - 52, 55, 149, 0, 0, 157, 161, 164, 0, 243, - 165, 166, 167, 168, 169, 0, 170, 171, 172, 173, - 174, 180, 181, 182, 183, 152, 153, 242, 0, 178, - 0, 176, 179, 175, 177, + 0, 68, 0, 70, 71, 0, 73, 239, 240, 74, + 75, 235, 236, 0, 0, 0, 82, 20, 21, 24, + 0, 54, 25, 0, 63, 65, 67, 87, 0, 92, + 0, 98, 229, 230, 231, 232, 0, 127, 130, 133, + 136, 134, 137, 142, 145, 147, 150, 154, 155, 156, + 0, 26, 0, 0, -2, -2, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 69, 0, 237, 238, 76, 0, 81, 0, 53, + 56, 58, 59, 60, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 62, 66, 88, 90, 93, 97, 94, + 96, 0, 0, 0, 0, 0, 0, 0, 0, 160, + 162, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 45, 46, 49, 247, 50, 72, 0, 78, + 80, 51, 0, 57, 64, 146, 241, 148, 0, 151, + 0, 0, 0, 158, 163, 159, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 77, 79, 52, + 55, 149, 0, 0, 157, 161, 164, 0, 243, 165, + 166, 167, 168, 169, 0, 170, 171, 172, 173, 174, + 180, 181, 182, 183, 152, 153, 242, 0, 178, 0, + 176, 179, 175, 177, } var yyTok1 = [...]int8{ @@ -1623,10 +1623,9 @@ yydefault: yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } case 135: - yyDollar = yyS[yypt-3 : yypt+1] + yyDollar = yyS[yypt-1 : yypt+1] { - yylex.(*parser).unexpected("label set", "string") - yyVAL.label = labels.Label{} + yyVAL.label = labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val} } case 136: yyDollar = yyS[yypt-3 : yypt+1] @@ -1635,9 +1634,9 @@ yydefault: yyVAL.label = labels.Label{} } case 137: - yyDollar = yyS[yypt-2 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { - yylex.(*parser).unexpected("label set", "\"=\"") + yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } case 138: diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 3113d18d57..9bf27264a8 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -244,7 +244,8 @@ type seriesDescription struct { values []SequenceValue } -// ParseSeriesDesc parses the description of a time series. +// ParseSeriesDesc parses the description of a time series. It is only used in +// the PromQL testing framework code. func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue, err error) { p := NewParser(input) p.lex.seriesDesc = true diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index 0e5e2f638b..3445fce067 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -2398,7 +2398,7 @@ var testExpr = []struct { }, }, { - input: `sum by ("foo")({"some.metric"})`, + input: `sum by ("foo bar")({"some.metric"})`, expected: &AggregateExpr{ Op: SUM, Expr: &VectorSelector{ @@ -2406,14 +2406,14 @@ var testExpr = []struct { MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some.metric"), }, PosRange: posrange.PositionRange{ - Start: 15, - End: 30, + Start: 19, + End: 34, }, }, - Grouping: []string{"foo"}, + Grouping: []string{"foo bar"}, PosRange: posrange.PositionRange{ Start: 0, - End: 31, + End: 35, }, }, }, @@ -4023,6 +4023,76 @@ func TestParseExpressions(t *testing.T) { } } +func TestParseSeriesDesc(t *testing.T) { + tests := []struct { + name string + input string + expectedLabels labels.Labels + expectedValues []SequenceValue + expectError string + }{ + { + name: "empty string", + expectedLabels: labels.EmptyLabels(), + expectedValues: []SequenceValue{}, + }, + { + name: "simple line", + input: `http_requests{job="api-server", instance="0", group="production"}`, + expectedLabels: labels.FromStrings( + "__name__", "http_requests", + "group", "production", + "instance", "0", + "job", "api-server", + ), + expectedValues: []SequenceValue{}, + }, + { + name: "label name characters that require quoting", + input: `{"http.requests", "service.name"="api-server", instance="0", group="canary"} 0+50x2`, + expectedLabels: labels.FromStrings( + "__name__", "http.requests", + "group", "canary", + "instance", "0", + "service.name", "api-server", + ), + expectedValues: []SequenceValue{ + {Value: 0, Omitted: false, Histogram: (*histogram.FloatHistogram)(nil)}, + {Value: 50, Omitted: false, Histogram: (*histogram.FloatHistogram)(nil)}, + {Value: 100, Omitted: false, Histogram: (*histogram.FloatHistogram)(nil)}, + }, + }, + { + name: "confirm failure on junk after identifier", + input: `{"http.requests"xx} 0+50x2`, + expectError: `parse error: unexpected identifier "xx" in label set, expected "," or "}"`, + }, + { + name: "confirm failure on bare operator after identifier", + input: `{"http.requests"=, x="y"} 0+50x2`, + expectError: `parse error: unexpected "," in label set, expected string`, + }, + { + name: "confirm failure on unterminated string identifier", + input: `{"http.requests} 0+50x2`, + expectError: `parse error: unterminated quoted string`, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + l, v, err := ParseSeriesDesc(tc.input) + if tc.expectError != "" { + require.Contains(t, err.Error(), tc.expectError) + } else { + require.NoError(t, err) + require.True(t, labels.Equal(tc.expectedLabels, l)) + require.Equal(t, tc.expectedValues, v) + } + }) + } +} + // NaN has no equality. Thus, we need a separate test for it. func TestNaNExpression(t *testing.T) { expr, err := ParseExpr("NaN") diff --git a/promql/parser/printer.go b/promql/parser/printer.go index 63b1950827..afe755e7dd 100644 --- a/promql/parser/printer.go +++ b/promql/parser/printer.go @@ -14,8 +14,10 @@ package parser import ( + "bytes" "fmt" "sort" + "strconv" "strings" "time" @@ -91,13 +93,20 @@ func (node *AggregateExpr) getAggOpStr() string { } func joinLabels(ss []string) string { + var bytea [1024]byte // On stack to avoid memory allocation while building the output. + b := bytes.NewBuffer(bytea[:0]) + for i, s := range ss { - // If the label is already quoted, don't quote it again. - if s[0] != '"' && s[0] != '\'' && s[0] != '`' && !model.IsValidLegacyMetricName(string(model.LabelValue(s))) { - ss[i] = fmt.Sprintf("\"%s\"", s) + if i > 0 { + b.WriteString(", ") + } + if !model.IsValidLegacyMetricName(string(model.LabelValue(s))) { + b.Write(strconv.AppendQuote(b.AvailableBuffer(), s)) + } else { + b.WriteString(s) } } - return strings.Join(ss, ", ") + return b.String() } func (node *BinaryExpr) returnBool() string { diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index f208b4f313..60df005036 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -56,6 +56,10 @@ const ( DefaultMaxSamplesPerQuery = 10000 ) +func init() { + model.NameValidationScheme = model.UTF8Validation +} + type TBRun interface { testing.TB Run(string, func(*testing.T)) bool diff --git a/promql/promqltest/test_test.go b/promql/promqltest/test_test.go index d0d09271e9..327dcd78fe 100644 --- a/promql/promqltest/test_test.go +++ b/promql/promqltest/test_test.go @@ -165,6 +165,8 @@ load 5m http_requests{job="api-server", instance="1", group="production"} 0+20x10 http_requests{job="api-server", instance="0", group="canary"} 0+30x10 http_requests{job="api-server", instance="1", group="canary"} 0+40x10 + {"http.requests", "service.name"="api-server", instance="0", group="canary"} 0+50x10 + {"http.requests", "service.name"="api-server", instance="1", group="canary"} 0+60x10 ` testCases := map[string]struct { @@ -176,6 +178,12 @@ load 5m eval instant at 5m sum by (group) (http_requests) {group="production"} 30 {group="canary"} 70 +`, + }, + "instant query on UTF-8 metric with expected float result": { + input: testData + ` +eval instant at 5m sum by ("service.name") ({"http.requests"}) + {"service.name"="api-server"} 110 `, }, "instant query with unexpected float result": { @@ -184,7 +192,7 @@ eval instant at 5m sum by (group) (http_requests) {group="production"} 30 {group="canary"} 80 `, - expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 80 for {group="canary"} but got 70`, + expectedError: `error in eval sum by (group) (http_requests) (line 10): expected 80 for {group="canary"} but got 70`, }, "instant query with expected histogram result": { input: ` @@ -230,7 +238,7 @@ eval instant at 0 testmetric eval instant at 5m sum by (group) (http_requests) {group="production"} 30 `, - expectedError: `error in eval sum by (group) (http_requests) (line 8): unexpected metric {group="canary"} in result, has value 70`, + expectedError: `error in eval sum by (group) (http_requests) (line 10): unexpected metric {group="canary"} in result, has value 70`, }, "instant query, but result has an unexpected series with a histogram value": { input: ` @@ -248,7 +256,7 @@ eval instant at 5m sum by (group) (http_requests) {group="canary"} 70 {group="test"} 100 `, - expectedError: `error in eval sum by (group) (http_requests) (line 8): expected metric {group="test"} with 3: [100.000000] not found`, + expectedError: `error in eval sum by (group) (http_requests) (line 10): expected metric {group="test"} with 3: [100.000000] not found`, }, "instant query expected to fail, and query fails": { input: ` @@ -334,7 +342,7 @@ eval_ordered instant at 50m sort(http_requests) http_requests{group="canary", instance="1", job="api-server"} 400 http_requests{group="canary", instance="0", job="api-server"} 300 `, - expectedError: `error in eval sort(http_requests) (line 8): expected metric {__name__="http_requests", group="canary", instance="0", job="api-server"} with [300.000000] at position 4 but was at 3`, + expectedError: `error in eval sort(http_requests) (line 10): expected metric {__name__="http_requests", group="canary", instance="0", job="api-server"} with [300.000000] at position 4 but was at 3`, }, "instant query with results expected to match provided order, but result has an unexpected series": { input: testData + ` @@ -343,7 +351,7 @@ eval_ordered instant at 50m sort(http_requests) http_requests{group="production", instance="1", job="api-server"} 200 http_requests{group="canary", instance="0", job="api-server"} 300 `, - expectedError: `error in eval sort(http_requests) (line 8): unexpected metric {__name__="http_requests", group="canary", instance="1", job="api-server"} in result, has value 400`, + expectedError: `error in eval sort(http_requests) (line 10): unexpected metric {__name__="http_requests", group="canary", instance="1", job="api-server"} in result, has value 400`, }, "instant query with invalid timestamp": { input: `eval instant at abc123 vector(0)`, @@ -362,7 +370,7 @@ eval range from 0 to 10m step 5m sum by (group) (http_requests) {group="production"} 0 30 60 {group="canary"} 0 80 140 `, - expectedError: `error in eval sum by (group) (http_requests) (line 8): expected float value at index 1 (t=300000) for {group="canary"} to be 80, but got 70 (result has 3 float points [0 @[0] 70 @[300000] 140 @[600000]] and 0 histogram points [])`, + expectedError: `error in eval sum by (group) (http_requests) (line 10): expected float value at index 1 (t=300000) for {group="canary"} to be 80, but got 70 (result has 3 float points [0 @[0] 70 @[300000] 140 @[600000]] and 0 histogram points [])`, }, "range query with expected histogram values": { input: ` @@ -389,7 +397,7 @@ eval range from 0 to 10m step 5m sum by (group) (http_requests) {group="production"} 0 30 60 90 {group="canary"} 0 70 140 `, - expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 4 points for {group="production"}, but query time range cannot return this many points`, + expectedError: `error in eval sum by (group) (http_requests) (line 10): expected 4 points for {group="production"}, but query time range cannot return this many points`, }, "range query with missing point in result": { input: ` @@ -407,14 +415,14 @@ eval range from 0 to 10m step 5m sum by (group) (http_requests) {group="production"} 0 30 {group="canary"} 0 70 140 `, - expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 2 float points and 0 histogram points for {group="production"}, but got 3 float points [0 @[0] 30 @[300000] 60 @[600000]] and 0 histogram points []`, + expectedError: `error in eval sum by (group) (http_requests) (line 10): expected 2 float points and 0 histogram points for {group="production"}, but got 3 float points [0 @[0] 30 @[300000] 60 @[600000]] and 0 histogram points []`, }, "range query, but result has an unexpected series": { input: testData + ` eval range from 0 to 10m step 5m sum by (group) (http_requests) {group="production"} 0 30 60 `, - expectedError: `error in eval sum by (group) (http_requests) (line 8): unexpected metric {group="canary"} in result, has 3 float points [0 @[0] 70 @[300000] 140 @[600000]] and 0 histogram points []`, + expectedError: `error in eval sum by (group) (http_requests) (line 10): unexpected metric {group="canary"} in result, has 3 float points [0 @[0] 70 @[300000] 140 @[600000]] and 0 histogram points []`, }, "range query, but result is missing a series": { input: testData + ` @@ -423,7 +431,7 @@ eval range from 0 to 10m step 5m sum by (group) (http_requests) {group="canary"} 0 70 140 {group="test"} 0 100 200 `, - expectedError: `error in eval sum by (group) (http_requests) (line 8): expected metric {group="test"} not found`, + expectedError: `error in eval sum by (group) (http_requests) (line 10): expected metric {group="test"} not found`, }, "range query expected to fail, and query fails": { input: ` diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 35cd4c4cd5..6fb1ae1e49 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -163,6 +163,11 @@ func TestWriteV2RequestFixture(t *testing.T) { } func TestValidateLabelsAndMetricName(t *testing.T) { + oldScheme := model.NameValidationScheme + model.NameValidationScheme = model.LegacyValidation + defer func() { + model.NameValidationScheme = oldScheme + }() tests := []struct { input []prompb.Label expectedErr string From fc0141aec205ace7b40001c949f9d08e20a01060 Mon Sep 17 00:00:00 2001 From: Paulo Dias Date: Wed, 4 Dec 2024 23:34:29 +0000 Subject: [PATCH 1027/1397] discovery: add openstack load balancer discovery Signed-off-by: Paulo Dias --- discovery/openstack/loadbalancer.go | 201 ++++++++ discovery/openstack/loadbalancer_test.go | 133 +++++ discovery/openstack/mock_test.go | 603 ++++++++++++++++++++++- discovery/openstack/openstack.go | 9 +- docs/configuration/configuration.md | 18 + 5 files changed, 959 insertions(+), 5 deletions(-) create mode 100644 discovery/openstack/loadbalancer.go create mode 100644 discovery/openstack/loadbalancer_test.go diff --git a/discovery/openstack/loadbalancer.go b/discovery/openstack/loadbalancer.go new file mode 100644 index 0000000000..2025fd97a1 --- /dev/null +++ b/discovery/openstack/loadbalancer.go @@ -0,0 +1,201 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openstack + +import ( + "context" + "fmt" + "log" + "log/slog" + "net" + "strconv" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" + + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +const ( + openstackLabelLoadBalancerID = openstackLabelPrefix + "loadbalancer_id" + openstackLabelLoadBalancerName = openstackLabelPrefix + "loadbalancer_name" + openstackLabelLoadBalancerStatus = openstackLabelPrefix + "loadbalancer_status" + openstackLabelLoadBalancerAvailabilityZone = openstackLabelPrefix + "loadbalancer_availability_zone" + openstackLabelLoadBalancerFloatingIP = openstackLabelPrefix + "loadbalancer_floating_ip" + openstackLabelLoadBalancerVIP = openstackLabelPrefix + "loadbalancer_vip" + openstackLabelLoadBalancerProvider = openstackLabelPrefix + "loadbalancer_provider" + openstackLabelLoadBalancerTags = openstackLabelPrefix + "loadbalancer_tags" +) + +// InstanceDiscovery discovers OpenStack instances. +type LoadBalancerDiscovery struct { + provider *gophercloud.ProviderClient + authOpts *gophercloud.AuthOptions + region string + logger *slog.Logger + availability gophercloud.Availability +} + +// NewInstanceDiscovery returns a new instance discovery. +func newLoadBalancerDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, + region string, availability gophercloud.Availability, l *slog.Logger, +) *LoadBalancerDiscovery { + if l == nil { + l = promslog.NewNopLogger() + } + return &LoadBalancerDiscovery{ + provider: provider, authOpts: opts, + region: region, availability: availability, logger: l, + } +} + +func (i *LoadBalancerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { + i.provider.Context = ctx + err := openstack.Authenticate(i.provider, *i.authOpts) + if err != nil { + return nil, fmt.Errorf("could not authenticate to OpenStack: %w", err) + } + + client, err := openstack.NewLoadBalancerV2(i.provider, gophercloud.EndpointOpts{ + Region: i.region, Availability: i.availability, + }) + if err != nil { + return nil, fmt.Errorf("could not create OpenStack load balancer session: %w", err) + } + + networkClient, err := openstack.NewNetworkV2(i.provider, gophercloud.EndpointOpts{ + Region: i.region, Availability: i.availability, + }) + if err != nil { + return nil, fmt.Errorf("could not create OpenStack network session: %w", err) + } + + allPages, err := loadbalancers.List(client, loadbalancers.ListOpts{}).AllPages() + if err != nil { + return nil, fmt.Errorf("failed to list load balancers: %w", err) + } + + allLBs, err := loadbalancers.ExtractLoadBalancers(allPages) + if err != nil { + return nil, fmt.Errorf("failed to extract load balancers: %w", err) + } + + // Fetch all listeners in one API call + listenerPages, err := listeners.List(client, listeners.ListOpts{}).AllPages() + if err != nil { + return nil, fmt.Errorf("failed to list all listeners: %w", err) + } + + allListeners, err := listeners.ExtractListeners(listenerPages) + if err != nil { + return nil, fmt.Errorf("failed to extract all listeners: %w", err) + } + + // Create a map to group listeners by Load Balancer ID + listenerMap := make(map[string][]listeners.Listener) + for _, listener := range allListeners { + // Iterate through each associated Load Balancer ID in the Loadbalancers array + for _, lb := range listener.Loadbalancers { + listenerMap[lb.ID] = append(listenerMap[lb.ID], listener) + } + } + + // Fetch all floating IPs with pagination + fipPages, err := floatingips.List(networkClient, floatingips.ListOpts{}).AllPages() + if err != nil { + log.Printf("Error calling OpenStack API: %v", err) + return nil, fmt.Errorf("failed to list all fips: %w", err) + } + if err != nil { + return nil, fmt.Errorf("failed to list floating IPs: %w", err) + } + + allFIPs, err := floatingips.ExtractFloatingIPs(fipPages) + if err != nil { + return nil, fmt.Errorf("failed to extract floating IPs: %w", err) + } + + // Create a map to associate floating IPs with their resource IDs + fipMap := make(map[string]string) // Key: LoadBalancerID/PortID, Value: Floating IP + for _, fip := range allFIPs { + if fip.PortID != "" { + fipMap[fip.PortID] = fip.FloatingIP + } + } + + tg := &targetgroup.Group{ + Source: "OS_" + i.region, + } + + for _, lb := range allLBs { + // Retrieve listeners for this load balancer from the map + lbListeners, exists := listenerMap[lb.ID] + if !exists || len(lbListeners) == 0 { + i.logger.Debug("Got no listener", "loadbalancer", lb.ID) + continue + } + + // Variable to store the port of the first PROMETHEUS listener + var listenerPort int + hasPrometheusListener := false + + // Check if any listener has the PROMETHEUS protocol + for _, listener := range lbListeners { + if listener.Protocol == "PROMETHEUS" { + hasPrometheusListener = true + listenerPort = listener.ProtocolPort + break + } + } + + // Skip LBs without PROMETHEUS listener protocol + if !hasPrometheusListener { + i.logger.Debug("Got no PROMETHEUS listener", "loadbalancer", lb.ID) + continue + } + + labels := model.LabelSet{} + addr := net.JoinHostPort(lb.VipAddress, strconv.Itoa(listenerPort)) + labels[model.AddressLabel] = model.LabelValue(addr) + labels[openstackLabelLoadBalancerID] = model.LabelValue(lb.ID) + labels[openstackLabelLoadBalancerName] = model.LabelValue(lb.Name) + labels[openstackLabelLoadBalancerStatus] = model.LabelValue(lb.ProvisioningStatus) + labels[openstackLabelLoadBalancerAvailabilityZone] = model.LabelValue(lb.AvailabilityZone) + labels[openstackLabelLoadBalancerVIP] = model.LabelValue(lb.VipAddress) + labels[openstackLabelLoadBalancerProvider] = model.LabelValue(lb.Provider) + labels[openstackLabelProjectID] = model.LabelValue(lb.ProjectID) + + if len(lb.Tags) > 0 { + labels[openstackLabelLoadBalancerTags] = model.LabelValue(strings.Join(lb.Tags, ",")) + } + + if floatingIP, exists := fipMap[lb.VipPortID]; exists { + labels[openstackLabelLoadBalancerFloatingIP] = model.LabelValue(floatingIP) + } + + tg.Targets = append(tg.Targets, labels) + } + + if err != nil { + return nil, err + } + + return []*targetgroup.Group{tg}, nil +} diff --git a/discovery/openstack/loadbalancer_test.go b/discovery/openstack/loadbalancer_test.go new file mode 100644 index 0000000000..45c9d1024d --- /dev/null +++ b/discovery/openstack/loadbalancer_test.go @@ -0,0 +1,133 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openstack + +import ( + "context" + "fmt" + "testing" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" +) + +type OpenstackSDLoadBalancerTestSuite struct { + Mock *SDMock +} + +func (s *OpenstackSDLoadBalancerTestSuite) SetupTest(t *testing.T) { + s.Mock = NewSDMock(t) + s.Mock.Setup() + + s.Mock.HandleLoadBalancerListSuccessfully() + s.Mock.HandleListenersListSuccessfully() + s.Mock.HandleFloatingIPsListSuccessfully() + + s.Mock.HandleVersionsSuccessfully() + s.Mock.HandleAuthSuccessfully() +} + +func (s *OpenstackSDLoadBalancerTestSuite) openstackAuthSuccess() (refresher, error) { + conf := SDConfig{ + IdentityEndpoint: s.Mock.Endpoint(), + Password: "test", + Username: "test", + DomainName: "12345", + Region: "RegionOne", + Role: "loadbalancer", + } + return newRefresher(&conf, nil) +} + +func TestOpenstackSDLoadBalancerRefresh(t *testing.T) { + mock := &OpenstackSDLoadBalancerTestSuite{} + mock.SetupTest(t) + + instance, err := mock.openstackAuthSuccess() + require.NoError(t, err) + + ctx := context.Background() + tgs, err := instance.refresh(ctx) + + require.NoError(t, err) + require.Len(t, tgs, 1) + + tg := tgs[0] + require.NotNil(t, tg) + require.NotNil(t, tg.Targets) + require.Len(t, tg.Targets, 4) + + for i, lbls := range []model.LabelSet{ + { + "__address__": model.LabelValue("10.0.0.32:9273"), + "__meta_openstack_loadbalancer_id": model.LabelValue("ef079b0c-e610-4dfb-b1aa-b49f07ac48e5"), + "__meta_openstack_loadbalancer_name": model.LabelValue("lb1"), + "__meta_openstack_loadbalancer_status": model.LabelValue("ACTIVE"), + "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az1"), + "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.1.2"), + "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.0.32"), + "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), + "__meta_openstack_loadbalancer_tags": model.LabelValue("tag1,tag2"), + "__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"), + }, + { + "__address__": model.LabelValue("10.0.2.78:8080"), + "__meta_openstack_loadbalancer_id": model.LabelValue("d92c471e-8d3e-4b9f-b2b5-9c72a9e3ef54"), + "__meta_openstack_loadbalancer_name": model.LabelValue("lb3"), + "__meta_openstack_loadbalancer_status": model.LabelValue("ACTIVE"), + "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az3"), + "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.3.4"), + "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.2.78"), + "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), + "__meta_openstack_loadbalancer_tags": model.LabelValue("tag5,tag6"), + "__meta_openstack_project_id": model.LabelValue("ac57f03dba1a4fdebff3e67201bc7a85"), + }, + { + "__address__": model.LabelValue("10.0.3.99:9090"), + "__meta_openstack_loadbalancer_id": model.LabelValue("f5c7e918-df38-4a5a-a7d4-d9c27ab2cf67"), + "__meta_openstack_loadbalancer_name": model.LabelValue("lb4"), + "__meta_openstack_loadbalancer_status": model.LabelValue("ACTIVE"), + "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az1"), + "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.4.5"), + "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.3.99"), + "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), + "__meta_openstack_project_id": model.LabelValue("fa8c372dfe4d4c92b0c4e3a2d9b3c9fa"), + }, + { + "__address__": model.LabelValue("10.0.4.88:9876"), + "__meta_openstack_loadbalancer_id": model.LabelValue("e83a6d92-7a3e-4567-94b3-20c83b32a75e"), + "__meta_openstack_loadbalancer_name": model.LabelValue("lb5"), + "__meta_openstack_loadbalancer_status": model.LabelValue("ACTIVE"), + "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az4"), + "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.4.88"), + "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), + "__meta_openstack_project_id": model.LabelValue("a5d3b2e1e6f34cd9a5f7c2f01a6b8e29"), + }, + } { + t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { + require.Equal(t, lbls, tg.Targets[i]) + }) + } +} + +func TestOpenstackSDLoadBalancerRefreshWithDoneContext(t *testing.T) { + mock := &OpenstackSDLoadBalancerTestSuite{} + mock.SetupTest(t) + + loadbalancer, _ := mock.openstackAuthSuccess() + ctx, cancel := context.WithCancel(context.Background()) + cancel() + _, err := loadbalancer.refresh(ctx) + require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) +} diff --git a/discovery/openstack/mock_test.go b/discovery/openstack/mock_test.go index 4518f41166..0ebcc93fc3 100644 --- a/discovery/openstack/mock_test.go +++ b/discovery/openstack/mock_test.go @@ -124,7 +124,7 @@ func (m *SDMock) HandleAuthSuccessfully() { "type": "identity", "name": "keystone" }, - { + { "endpoints": [ { "id": "e2ffee808abc4a60916715b1d4b489dd", @@ -136,7 +136,33 @@ func (m *SDMock) HandleAuthSuccessfully() { ], "id": "b7f2a5b1a019459cb956e43a8cb41e31", "type": "compute" - } + }, + { + "endpoints": [ + { + "id": "dc9a55e0bf84487a98671fbc74b68e68", + "interface": "public", + "region": "RegionOne", + "region_id": "RegionOne", + "url": "%s" + } + ], + "id": "c609fc430175452290b62a4242e8a7e8", + "type": "network" + }, + { + "endpoints": [ + { + "id": "39dc322ce86c1234b4f06c2eeae0841b", + "interface": "public", + "region": "RegionOne", + "region_id": "RegionOne", + "url": "%s" + } + ], + "id": "c609fc430175123490b62a4242e8a7e8", + "type": "load-balancer" + } ], "expires_at": "2013-02-27T18:30:59.999999Z", @@ -174,7 +200,7 @@ func (m *SDMock) HandleAuthSuccessfully() { } } } - `, m.Endpoint()) + `, m.Endpoint(), m.Endpoint(), m.Endpoint()) }) } @@ -590,3 +616,574 @@ func (m *SDMock) HandleFloatingIPListSuccessfully() { fmt.Fprint(w, listOutput) }) } + +const lbListBody = ` +{ + "loadbalancers": [ + { + "id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "name": "lb1", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "project_id": "fcad67a6189847c4aecfa3c81a05783b", + "created_at": "2024-12-01T10:00:00", + "updated_at": "2024-12-01T10:30:00", + "vip_address": "10.0.0.32", + "vip_port_id": "b47c39f5-238d-4b17-ae87-9b5d19af8a2e", + "vip_subnet_id": "14a4c6a5-fe71-4a94-9071-4cd12fb8337f", + "vip_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "tags": ["tag1", "tag2"], + "availability_zone": "az1", + "vip_vnic_type": "normal", + "provider": "amphora", + "listeners": [ + { + "id": "c4146b54-febc-4caf-a53f-ed1cab6faba5" + }, + { + "id": "a058d20e-82de-4eff-bb65-5c76a8554435" + } + ], + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b" + }, + { + "id": "d92c471e-8d3e-4b9f-b2b5-9c72a9e3ef54", + "name": "lb3", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "project_id": "ac57f03dba1a4fdebff3e67201bc7a85", + "created_at": "2024-12-01T12:00:00", + "updated_at": "2024-12-01T12:45:00", + "vip_address": "10.0.2.78", + "vip_port_id": "c83b6e12-4e5d-4673-a4b3-5bc72a7f3ef9", + "vip_subnet_id": "36c5e9f6-e7a2-4975-a8c6-3b8e4f93cf45", + "vip_network_id": "g03c6f27-e617-4975-c8f7-4c9f3f94cf68", + "tags": ["tag5", "tag6"], + "availability_zone": "az3", + "vip_vnic_type": "normal", + "provider": "amphora", + "listeners": [ + { + "id": "5b9529a4-6cbf-48f8-a006-d99cbc717da0" + }, + { + "id": "5d26333b-74d1-4b2a-90ab-2b2c0f5a8048" + } + ], + "tenant_id": "ac57f03dba1a4fdebff3e67201bc7a85" + }, + { + "id": "f5c7e918-df38-4a5a-a7d4-d9c27ab2cf67", + "name": "lb4", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "project_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa", + "created_at": "2024-12-01T13:00:00", + "updated_at": "2024-12-01T13:20:00", + "vip_address": "10.0.3.99", + "vip_port_id": "f9e8b6e12-7e4d-4963-a5b3-6cd82a7f3ff6", + "vip_subnet_id": "47d6f8f9-f7b2-4876-a9d8-4e8f4g95df79", + "vip_network_id": "h04d7f38-f718-4876-d9g8-5d8g5h95df89", + "tags": [], + "availability_zone": "az1", + "vip_vnic_type": "normal", + "provider": "amphora", + "listeners": [ + { + "id": "84c87596-1ff0-4f6d-b151-0a78e1f407a3" + }, + { + "id": "fe460a7c-16a9-4984-9fe6-f6e5153ebab1" + } + ], + "tenant_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa" + }, + { + "id": "e83a6d92-7a3e-4567-94b3-20c83b32a75e", + "name": "lb5", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "project_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29", + "created_at": "2024-12-01T11:00:00", + "updated_at": "2024-12-01T11:15:00", + "vip_address": "10.0.4.88", + "vip_port_id": "d83a6d92-7a3e-4567-94b3-20c83b32a75e", + "vip_subnet_id": "25b4d8e5-fe81-4a87-9071-4cc12fb8337f", + "vip_network_id": "f02c5e19-c507-4864-b16e-2b7a39e56be3", + "tags": [], + "availability_zone": "az4", + "vip_vnic_type": "normal", + "provider": "amphora", + "listeners": [ + { + "id": "50902e62-34b8-46b2-9ed4-9053e7ad46dc" + }, + { + "id": "98a867ad-ff07-4880-b05f-32088866a68a" + } + ], + "tenant_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29" + } + ] +} +` + +// HandleLoadBalancerListSuccessfully mocks the load balancer list API. +func (m *SDMock) HandleLoadBalancerListSuccessfully() { + m.Mux.HandleFunc("/v2.0/lbaas/loadbalancers", func(w http.ResponseWriter, r *http.Request) { + testMethod(m.t, r, http.MethodGet) + testHeader(m.t, r, "X-Auth-Token", tokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, lbListBody) + }) +} + + +const listenerListBody = ` +{ + "listeners": [ + { + "id": "c4146b54-febc-4caf-a53f-ed1cab6faba5", + "name": "stats-listener", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "PROMETHEUS", + "protocol_port": 9273, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "fcad67a6189847c4aecfa3c81a05783b", + "default_pool_id": null, + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-08-29T18:05:24", + "updated_at": "2024-12-04T21:21:10", + "loadbalancers": [ + { + "id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b" + }, + { + "id": "5b9529a4-6cbf-48f8-a006-d99cbc717da0", + "name": "stats-listener2", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "PROMETHEUS", + "protocol_port": 8080, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "ac57f03dba1a4fdebff3e67201bc7a85", + "default_pool_id": null, + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-08-29T18:05:24", + "updated_at": "2024-12-04T21:21:10", + "loadbalancers": [ + { + "id": "d92c471e-8d3e-4b9f-b2b5-9c72a9e3ef54" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "ac57f03dba1a4fdebff3e67201bc7a85" + }, + { + "id": "84c87596-1ff0-4f6d-b151-0a78e1f407a3", + "name": "stats-listener3", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "PROMETHEUS", + "protocol_port": 9090, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa", + "default_pool_id": null, + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-08-29T18:05:24", + "updated_at": "2024-12-04T21:21:10", + "loadbalancers": [ + { + "id": "f5c7e918-df38-4a5a-a7d4-d9c27ab2cf67" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa" + }, + { + "id": "50902e62-34b8-46b2-9ed4-9053e7ad46dc", + "name": "stats-listener4", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "PROMETHEUS", + "protocol_port": 9876, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29", + "default_pool_id": null, + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-08-29T18:05:24", + "updated_at": "2024-12-04T21:21:10", + "loadbalancers": [ + { + "id": "e83a6d92-7a3e-4567-94b3-20c83b32a75e" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29" + }, + { + "id": "a058d20e-82de-4eff-bb65-5c76a8554435", + "name": "port6443", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "TCP", + "protocol_port": 6443, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29", + "default_pool_id": "5643208b-b691-4b1f-a6b8-356f14903e56", + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-10-02T19:32:48", + "updated_at": "2024-12-04T21:44:34", + "loadbalancers": [ + { + "id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29" + }, + { + "id": "5d26333b-74d1-4b2a-90ab-2b2c0f5a8048", + "name": "port6444", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "TCP", + "protocol_port": 6444, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "ac57f03dba1a4fdebff3e67201bc7a85", + "default_pool_id": "5643208b-b691-4b1f-a6b8-356f14903e56", + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-10-02T19:32:48", + "updated_at": "2024-12-04T21:44:34", + "loadbalancers": [ + { + "id": "d92c471e-8d3e-4b9f-b2b5-9c72a9e3ef54" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "ac57f03dba1a4fdebff3e67201bc7a85" + }, + { + "id": "fe460a7c-16a9-4984-9fe6-f6e5153ebab1", + "name": "port6445", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "TCP", + "protocol_port": 6445, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa", + "default_pool_id": "5643208b-b691-4b1f-a6b8-356f14903e56", + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-10-02T19:32:48", + "updated_at": "2024-12-04T21:44:34", + "loadbalancers": [ + { + "id": "f5c7e918-df38-4a5a-a7d4-d9c27ab2cf67" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa" + }, + { + "id": "98a867ad-ff07-4880-b05f-32088866a68a", + "name": "port6446", + "description": "", + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE", + "admin_state_up": true, + "protocol": "TCP", + "protocol_port": 6446, + "connection_limit": -1, + "default_tls_container_ref": null, + "sni_container_refs": [], + "project_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29", + "default_pool_id": "5643208b-b691-4b1f-a6b8-356f14903e56", + "l7policies": [], + "insert_headers": {}, + "created_at": "2024-10-02T19:32:48", + "updated_at": "2024-12-04T21:44:34", + "loadbalancers": [ + { + "id": "e83a6d92-7a3e-4567-94b3-20c83b32a75e" + } + ], + "timeout_client_data": 50000, + "timeout_member_connect": 5000, + "timeout_member_data": 50000, + "timeout_tcp_inspect": 0, + "tags": [], + "client_ca_tls_container_ref": null, + "client_authentication": "NONE", + "client_crl_container_ref": null, + "allowed_cidrs": null, + "tls_ciphers": null, + "tls_versions": null, + "alpn_protocols": null, + "hsts_max_age": null, + "hsts_include_subdomains": null, + "hsts_preload": null, + "tenant_id": "a5d3b2e1e6f34cd9a5f7c2f01a6b8e29" + } + ] +} +` + +// HandleListenersListSuccessfully mocks the listeners endpoint. +func (m *SDMock) HandleListenersListSuccessfully() { + m.Mux.HandleFunc("/v2.0/lbaas/listeners", func(w http.ResponseWriter, r *http.Request) { + testMethod(m.t, r, http.MethodGet) + testHeader(m.t, r, "X-Auth-Token", tokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, listenerListBody) + }) +} + +const floatingIPListBody = ` +{ + "floatingips": [ + { + "id": "fea7332d-9027-4cf9-bf62-c3c4c6ebaf84", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "floating_ip_address": "192.168.1.2", + "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", + "port_id": "b47c39f5-238d-4b17-ae87-9b5d19af8a2e", + "fixed_ip_address": "10.0.0.32", + "status": "ACTIVE", + "description": "", + "port_details": { + "name": "dummy", + "network_id": "03200a39-b399-44f3-a778-6dbb93343a31", + "mac_address": "fa:16:3e:b3:a3:c6", + "admin_state_up": true, + "status": "ACTIVE", + "device_id": "", + "device_owner": "compute:az1" + }, + "dns_domain": "", + "dns_name": "", + "port_forwardings": [], + "tags": [], + "created_at": "2023-08-30T15:11:37Z", + "updated_at": "2023-08-30T15:11:38Z", + "revision_number": 1, + "project_id": "fcad67a6189847c4aecfa3c81a05783b" + }, + { + "id": "febb9554-cf83-4f9b-94d9-1b3c34be357f", + "tenant_id": "ac57f03dba1a4fdebff3e67201bc7a85", + "floating_ip_address": "192.168.3.4", + "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", + "port_id": "c83b6e12-4e5d-4673-a4b3-5bc72a7f3ef9", + "fixed_ip_address": "10.0.2.78", + "status": "ACTIVE", + "description": "", + "port_details": { + "name": "dummy", + "network_id": "03200a39-b399-44f3-a778-6dbb93343a31", + "mac_address": "fa:16:3e:b3:a3:c6", + "admin_state_up": true, + "status": "ACTIVE", + "device_id": "", + "device_owner": "compute:az3" + }, + "dns_domain": "", + "dns_name": "", + "port_forwardings": [], + "tags": [], + "created_at": "2023-08-30T15:11:37Z", + "updated_at": "2023-08-30T15:11:38Z", + "revision_number": 1, + "project_id": "ac57f03dba1a4fdebff3e67201bc7a85" + }, + { + "id": "febb9554-cf83-4f9b-94d9-1b3c34be357f", + "tenant_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa", + "floating_ip_address": "192.168.4.5", + "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", + "port_id": "f9e8b6e12-7e4d-4963-a5b3-6cd82a7f3ff6", + "fixed_ip_address": "10.0.3.99", + "status": "ACTIVE", + "description": "", + "port_details": { + "name": "dummy", + "network_id": "03200a39-b399-44f3-a778-6dbb93343a31", + "mac_address": "fa:16:3e:b3:a3:c6", + "admin_state_up": true, + "status": "ACTIVE", + "device_id": "", + "device_owner": "compute:az3" + }, + "dns_domain": "", + "dns_name": "", + "port_forwardings": [], + "tags": [], + "created_at": "2023-08-30T15:11:37Z", + "updated_at": "2023-08-30T15:11:38Z", + "revision_number": 1, + "project_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa" + } + ] +} +` + +// HandleFloatingIPsListSuccessfully mocks the floating IPs endpoint. +func (m *SDMock) HandleFloatingIPsListSuccessfully() { + m.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) { + testMethod(m.t, r, http.MethodGet) + testHeader(m.t, r, "X-Auth-Token", tokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, floatingIPListBody) + }) +} diff --git a/discovery/openstack/openstack.go b/discovery/openstack/openstack.go index cd0bcc1267..e1dba211cf 100644 --- a/discovery/openstack/openstack.go +++ b/discovery/openstack/openstack.go @@ -97,6 +97,9 @@ const ( // OpenStack document reference // https://docs.openstack.org/horizon/pike/user/launch-instances.html OpenStackRoleInstance Role = "instance" + // Openstack document reference + // https://docs.openstack.org/openstacksdk/rocky/user/resources/load_balancer/index.html + OpenStackRoleLoadBalancer Role = "loadbalancer" ) // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -105,7 +108,7 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } switch *c { - case OpenStackRoleHypervisor, OpenStackRoleInstance: + case OpenStackRoleHypervisor, OpenStackRoleInstance, OpenStackRoleLoadBalancer: return nil default: return fmt.Errorf("unknown OpenStack SD role %q", *c) @@ -128,7 +131,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } if c.Role == "" { - return errors.New("role missing (one of: instance, hypervisor)") + return errors.New("role missing (one of: instance, hypervisor, loadbalancer)") } if c.Region == "" { return errors.New("openstack SD configuration requires a region") @@ -211,6 +214,8 @@ func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) { return newHypervisorDiscovery(client, &opts, conf.Port, conf.Region, availability, l), nil case OpenStackRoleInstance: return newInstanceDiscovery(client, &opts, conf.Port, conf.Region, conf.AllTenants, availability, l), nil + case OpenStackRoleLoadBalancer: + return newLoadBalancerDiscovery(client, &opts, conf.Region, availability, l), nil } return nil, errors.New("unknown OpenStack discovery role") } diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 3ec15f9b10..1d34d306c4 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1195,6 +1195,24 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_openstack_tag_`: each metadata item of the instance, with any unsupported characters converted to an underscore. * `__meta_openstack_user_id`: the user account owning the tenant. +#### `loadbalancer` + +The `loadbalancer` role discovers one target per `PROMETHEUS` listener of +Octavia loa dbalancer. The target address defaults to the VIP address of the +load balancer. + +The following meta labels are available on targets during [relabeling](#relabel_config): + +* `__meta_openstack_loadbalancer_availability_zone`: the availability zone of the OpenStack load balancer. +* `__meta_openstack_loadbalancer_floating_ip`: the floating IP of the OpenStack load balancer. +* `__meta_openstack_loadbalancer_id`: the OpenStack load balancer ID. +* `__meta_openstack_loadbalancer_name`: the OpenStack load balancer name. +* `__meta_openstack_loadbalancer_provider`: the Octavia provider of the OpenStack load balancer. +* `__meta_openstack_loadbalancer_status`: the status of the OpenStack load balancer. +* `__meta_openstack_loadbalancer_tags`: comma separated list of the OpenStack load balancer. +* `__meta_openstack_loadbalancer_vip`: the VIP of the OpenStack load balancer. +* `__meta_openstack_project_id`: the project (tenant) owning this instance. + See below for the configuration options for OpenStack discovery: ```yaml From 9e9929c421be507f2a89578bec0c41cf23f777e1 Mon Sep 17 00:00:00 2001 From: Paulo Dias Date: Wed, 4 Dec 2024 23:46:11 +0000 Subject: [PATCH 1028/1397] fix: remove new line Signed-off-by: Paulo Dias --- discovery/openstack/mock_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/discovery/openstack/mock_test.go b/discovery/openstack/mock_test.go index 0ebcc93fc3..015a0c431c 100644 --- a/discovery/openstack/mock_test.go +++ b/discovery/openstack/mock_test.go @@ -747,7 +747,6 @@ func (m *SDMock) HandleLoadBalancerListSuccessfully() { }) } - const listenerListBody = ` { "listeners": [ From d136e4310992dc7bfc778d94e014e3acde66b31f Mon Sep 17 00:00:00 2001 From: Paulo Dias Date: Wed, 4 Dec 2024 23:48:31 +0000 Subject: [PATCH 1029/1397] fix: fix comment Signed-off-by: Paulo Dias --- discovery/openstack/loadbalancer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/discovery/openstack/loadbalancer.go b/discovery/openstack/loadbalancer.go index 2025fd97a1..5b72a7d538 100644 --- a/discovery/openstack/loadbalancer.go +++ b/discovery/openstack/loadbalancer.go @@ -44,7 +44,7 @@ const ( openstackLabelLoadBalancerTags = openstackLabelPrefix + "loadbalancer_tags" ) -// InstanceDiscovery discovers OpenStack instances. +// LoadBalancerDiscovery discovers OpenStack load balancers. type LoadBalancerDiscovery struct { provider *gophercloud.ProviderClient authOpts *gophercloud.AuthOptions @@ -53,7 +53,7 @@ type LoadBalancerDiscovery struct { availability gophercloud.Availability } -// NewInstanceDiscovery returns a new instance discovery. +// NewLoadBalancerDiscovery returns a new loadbalancer discovery. func newLoadBalancerDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, region string, availability gophercloud.Availability, l *slog.Logger, ) *LoadBalancerDiscovery { From a90aa34e7168c9c9035f37863bd91f4183b17631 Mon Sep 17 00:00:00 2001 From: Paulo Dias Date: Wed, 4 Dec 2024 23:52:14 +0000 Subject: [PATCH 1030/1397] fix: fix docs typo Signed-off-by: Paulo Dias --- docs/configuration/configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 1d34d306c4..da3af79600 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1211,7 +1211,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_openstack_loadbalancer_status`: the status of the OpenStack load balancer. * `__meta_openstack_loadbalancer_tags`: comma separated list of the OpenStack load balancer. * `__meta_openstack_loadbalancer_vip`: the VIP of the OpenStack load balancer. -* `__meta_openstack_project_id`: the project (tenant) owning this instance. +* `__meta_openstack_project_id`: the project (tenant) owning this load balancer. See below for the configuration options for OpenStack discovery: From aa144b7263564b51117e702a0419ac16322dd9cc Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Wed, 16 Oct 2024 09:18:23 -0700 Subject: [PATCH 1031/1397] Handle custom buckets in WAL and WBL --- tsdb/encoding/encoding.go | 8 +++ tsdb/head.go | 2 + tsdb/head_append.go | 36 ++++++++++++- tsdb/head_wal.go | 107 ++++++++++++++++++++++++++++++++++++- tsdb/record/record.go | 67 ++++++++++++++++++++++- tsdb/record/record_test.go | 29 ++++++++++ tsdb/wlog/checkpoint.go | 13 +++++ 7 files changed, 259 insertions(+), 3 deletions(-) diff --git a/tsdb/encoding/encoding.go b/tsdb/encoding/encoding.go index cc7d0990f6..c339a9a5bb 100644 --- a/tsdb/encoding/encoding.go +++ b/tsdb/encoding/encoding.go @@ -104,6 +104,14 @@ func (e *Encbuf) PutHashSum(h hash.Hash) { e.B = h.Sum(e.B) } +// IsWholeWhenMultiplied checks to see if the number when multiplied by 1000 can +// be converted into an integer without losing precision. +func IsWholeWhenMultiplied(in float64) bool { + i := uint(math.Round(in * 1000)) + out := float64(i) / 1000 + return in == out +} + // Decbuf provides safe methods to extract data from a byte slice. It does all // necessary bounds checking and advancing of the byte slice. // Several datums can be extracted without checking for errors. However, before using diff --git a/tsdb/head.go b/tsdb/head.go index c67c438e52..b7a358a6a6 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -87,6 +87,7 @@ type Head struct { logger *slog.Logger appendPool zeropool.Pool[[]record.RefSample] exemplarsPool zeropool.Pool[[]exemplarWithSeriesRef] + customValuesPool zeropool.Pool[[]record.RefCustomValues] histogramsPool zeropool.Pool[[]record.RefHistogramSample] floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] metadataPool zeropool.Pool[[]record.RefMetadata] @@ -2134,6 +2135,7 @@ type memSeries struct { // We keep the last histogram value here (in addition to appending it to the chunk) so we can check for duplicates. lastHistogramValue *histogram.Histogram lastFloatHistogramValue *histogram.FloatHistogram + customValues []float64 // Current appender for the head chunk. Set when a new head chunk is cut. // It is nil only if headChunks is nil. E.g. if there was an appender that created a new series, but rolled back the commit diff --git a/tsdb/head_append.go b/tsdb/head_append.go index ea2a163f26..fbed0ee7eb 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -181,6 +181,7 @@ func (h *Head) appender() *headAppender { samples: h.getAppendBuffer(), sampleSeries: h.getSeriesBuffer(), exemplars: exemplarsBuf, + customValues: h.getCustomValuesBuffer(), histograms: h.getHistogramBuffer(), floatHistograms: h.getFloatHistogramBuffer(), metadata: h.getMetadataBuffer(), @@ -244,6 +245,18 @@ func (h *Head) putExemplarBuffer(b []exemplarWithSeriesRef) { h.exemplarsPool.Put(b[:0]) } +func (h *Head) getCustomValuesBuffer() []record.RefCustomValues { + b := h.customValuesPool.Get() + if b == nil { + return make([]record.RefCustomValues, 0, 512) + } + return b +} + +func (h *Head) putCustomValuesBuffer(b []record.RefCustomValues) { + h.customValuesPool.Put(b[:0]) +} + func (h *Head) getHistogramBuffer() []record.RefHistogramSample { b := h.histogramsPool.Get() if b == nil { @@ -326,6 +339,7 @@ type headAppender struct { histogramSeries []*memSeries // HistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). floatHistograms []record.RefFloatHistogramSample // New float histogram samples held by this appender. floatHistogramSeries []*memSeries // FloatHistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). + customValues []record.RefCustomValues // Custom values for histograms that use custom buckets held by this appender. metadata []record.RefMetadata // New metadata held by this appender. metadataSeries []*memSeries // Series corresponding to the metadata held by this appender. exemplars []exemplarWithSeriesRef // New exemplars held by this appender. @@ -690,7 +704,12 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels // This whole "if" should be removed. if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { s.lastHistogramValue = &histogram.Histogram{} - } + if histogram.IsCustomBucketsSchema(h.Schema) { + a.customValues = append(a.customValues, record.RefCustomValues{ + Ref: s.ref, + CustomValues: h.CustomValues, + }) + } // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // to skip that sample from the WAL and write only in the WBL. @@ -727,6 +746,12 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels // This whole "if" should be removed. if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { s.lastFloatHistogramValue = &histogram.FloatHistogram{} + if histogram.IsCustomBucketsSchema(fh.Schema) { + a.customValues = append(a.customValues, record.RefCustomValues{ + Ref: s.ref, + CustomValues: fh.CustomValues, + }) + } } // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise @@ -942,6 +967,13 @@ func (a *headAppender) log() error { return fmt.Errorf("log samples: %w", err) } } + if len(a.customValues) > 0 { + rec = enc.CustomValues(a.customValues, buf) + buf = rec[:0] + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log custom values: %w", err) + } + } if len(a.histograms) > 0 { rec = enc.HistogramSamples(a.histograms, buf) buf = rec[:0] @@ -1428,6 +1460,7 @@ func (a *headAppender) Commit() (err error) { defer a.head.putAppendBuffer(a.samples) defer a.head.putSeriesBuffer(a.sampleSeries) defer a.head.putExemplarBuffer(a.exemplars) + defer a.head.putCustomValuesBuffer(a.customValues) defer a.head.putHistogramBuffer(a.histograms) defer a.head.putFloatHistogramBuffer(a.floatHistograms) defer a.head.putMetadataBuffer(a.metadata) @@ -1949,6 +1982,7 @@ func (a *headAppender) Rollback() (err error) { } a.head.putAppendBuffer(a.samples) a.head.putExemplarBuffer(a.exemplars) + a.head.putCustomValuesBuffer(a.customValues) a.head.putHistogramBuffer(a.histograms) a.head.putFloatHistogramBuffer(a.floatHistograms) a.head.putMetadataBuffer(a.metadata) diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 6744d582ae..637929428d 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -58,6 +58,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch var unknownExemplarRefs atomic.Uint64 var unknownHistogramRefs atomic.Uint64 var unknownMetadataRefs atomic.Uint64 + var unknownCustomValuesRefs atomic.Uint64 // Track number of series records that had overlapping m-map chunks. var mmapOverlappingChunks atomic.Uint64 @@ -81,6 +82,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch histogramsPool zeropool.Pool[[]record.RefHistogramSample] floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] metadataPool zeropool.Pool[[]record.RefMetadata] + customValuesPool zeropool.Pool[[]record.RefCustomValues] ) defer func() { @@ -223,6 +225,18 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decoded <- meta + case record.CustomValues: + customVals := customValuesPool.Get()[:0] + customVals, err := dec.CustomValues(rec, customVals) + if err != nil { + decodeErr = &wlog.CorruptionErr{ + Err: fmt.Errorf("decode custom values: %w", err), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- customVals default: // Noop. } @@ -331,6 +345,19 @@ Outer: if r, ok := multiRef[sam.Ref]; ok { sam.Ref = r } + if histogram.IsCustomBucketsSchema(sam.H.Schema) { + ms := h.series.getByID(sam.Ref) + if ms == nil { + unknownHistogramRefs.Inc() + continue + } + + if ms.lastFloatHistogramValue != nil { + sam.H.CustomValues = ms.lastFloatHistogramValue.CustomValues + } else { + sam.H.CustomValues = ms.customValues + } + } mod := uint64(sam.Ref) % uint64(concurrency) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H}) } @@ -367,6 +394,14 @@ Outer: if r, ok := multiRef[sam.Ref]; ok { sam.Ref = r } + if histogram.IsCustomBucketsSchema(sam.FH.Schema) { + ms := h.series.getByID(sam.Ref) + if ms == nil { + unknownHistogramRefs.Inc() + continue + } + sam.FH.CustomValues = ms.customValues + } mod := uint64(sam.Ref) % uint64(concurrency) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH}) } @@ -393,6 +428,29 @@ Outer: } } metadataPool.Put(v) + case []record.RefCustomValues: + for _, cv := range v { + s := h.series.getByID(cv.Ref) + if s == nil { + unknownCustomValuesRefs.Inc() + continue + } + //TODO: do we actually want to check lastFloatHistogramValue? + if s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { + s.customValues = cv.CustomValues + } + } + customValuesPool.Put(v) + // iterate over custom value records and do same things as for series/samples - put them in correct processor + // processor depends on series ref + // something like this: + // idx := uint64(mSeries.ref) % uint64(concurrency) + // processors[idx].input <- walSubsetProcessorInputItem{customValues: } + //for _, cv := range v { + // idx := uint64(cv.Ref) % uint64(concurrency) + // processors[idx].input <- walSubsetProcessorInputItem{customValues: cv} + //} + //customValuesPool.Put(v) default: panic(fmt.Errorf("unexpected decoded type: %T", d)) } @@ -616,10 +674,25 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp if s.t <= ms.mmMaxTime { continue } + var chunkCreated bool if s.h != nil { + //if histogram.IsCustomBucketsSchema(s.h.Schema) { + // if ms.lastHistogramValue != nil { + // + // } + //} _, chunkCreated = ms.appendHistogram(s.t, s.h, 0, appendChunkOpts) } else { + //if histogram.IsCustomBucketsSchema(s.fh.Schema) { + // if ms.lastFloatHistogramValue != nil { + // s.h.CustomValues = ms.lastFloatHistogramValue.CustomValues + // } else { + // s.h.CustomValues = ms.customValues + // } + // //customVals := h. + // //s.h.CustomValues = + //} _, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, appendChunkOpts) } if chunkCreated { @@ -647,7 +720,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) { // Track number of samples, histogram samples, m-map markers, that referenced a series we don't know about // for error reporting. - var unknownRefs, unknownHistogramRefs, mmapMarkerUnknownRefs atomic.Uint64 + var unknownRefs, unknownHistogramRefs, unknownCustomValuesRefs, mmapMarkerUnknownRefs atomic.Uint64 lastSeq, lastOff := lastMmapRef.Unpack() // Start workers that each process samples for a partition of the series ID space. @@ -747,6 +820,18 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decodedCh <- hists + case record.CustomValues: + customVals := customValuesPool.Get().([]record.RefCustomValues)[:0] + customVals, err := dec.CustomValues(rec, customVals) + if err != nil { + decodeErr = &wlog.CorruptionErr{ + Err: fmt.Errorf("decode custom values: %w", err), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decodedCh <- customVals default: // Noop. } @@ -831,6 +916,18 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch if r, ok := multiRef[sam.Ref]; ok { sam.Ref = r } + if histogram.IsCustomBucketsSchema(sam.H.Schema) { + ms := h.series.getByID(sam.Ref) + if ms == nil { + unknownHistogramRefs.Inc() + continue + } + if ms.lastFloatHistogramValue != nil { + sam.H.CustomValues = ms.lastFloatHistogramValue.CustomValues + } else { + sam.H.CustomValues = ms.customValues + } + } mod := uint64(sam.Ref) % uint64(concurrency) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H}) } @@ -863,6 +960,14 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch if r, ok := multiRef[sam.Ref]; ok { sam.Ref = r } + if histogram.IsCustomBucketsSchema(sam.FH.Schema) { + ms := h.series.getByID(sam.Ref) + if ms == nil { + unknownHistogramRefs.Inc() + continue + } + sam.FH.CustomValues = ms.customValues + } mod := uint64(sam.Ref) % uint64(concurrency) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH}) } diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 784d0b23d7..6f5fb24384 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -52,6 +52,7 @@ const ( HistogramSamples Type = 7 // FloatHistogramSamples is used to match WAL records of type Float Histograms. FloatHistogramSamples Type = 8 + CustomValues Type = 9 ) func (rt Type) String() string { @@ -72,6 +73,8 @@ func (rt Type) String() string { return "mmapmarkers" case Metadata: return "metadata" + case CustomValues: + return "custom_values" default: return "unknown" } @@ -147,6 +150,11 @@ type RefSeries struct { Labels labels.Labels } +type RefCustomValues struct { + Ref chunks.HeadSeriesRef + CustomValues []float64 +} + // RefSample is a timestamp/value pair associated with a reference to a series. // TODO(beorn7): Perhaps make this "polymorphic", including histogram and float-histogram pointers? Then get rid of RefHistogramSample. type RefSample struct { @@ -207,7 +215,7 @@ func (d *Decoder) Type(rec []byte) Type { return Unknown } switch t := Type(rec[0]); t { - case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples: + case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples, CustomValues: return t } return Unknown @@ -589,6 +597,39 @@ func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) { } } +// TODO: optimize +func (d *Decoder) CustomValues(rec []byte, customValues []RefCustomValues) ([]RefCustomValues, error) { + dec := encoding.Decbuf{B: rec} + + if Type(dec.Byte()) != CustomValues { + return nil, errors.New("invalid record type") + } + if dec.Len() == 0 { + return customValues, nil + } + for len(dec.B) > 0 && dec.Err() == nil { + ref := storage.SeriesRef(dec.Be64()) + l := dec.Uvarint() + if l > 0 { + vals := make([]float64, l) + for i := range vals { + vals[i] = dec.Be64Float64() + } + customValues = append(customValues, RefCustomValues{ + Ref: chunks.HeadSeriesRef(ref), + CustomValues: vals, + }) + } + } + if dec.Err() != nil { + return nil, dec.Err() + } + if len(dec.B) > 0 { + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) + } + return customValues, nil +} + // Encoder encodes series, sample, and tombstones records. // The zero value is ready to use. type Encoder struct{} @@ -831,3 +872,27 @@ func EncodeFloatHistogram(buf *encoding.Encbuf, h *histogram.FloatHistogram) { buf.PutBEFloat64(b) } } + +func (e *Encoder) CustomValues(customValues []RefCustomValues, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(CustomValues)) + + if len(customValues) == 0 { + return buf.Get() + } + + for _, v := range customValues { + buf.PutBE64(uint64(v.Ref)) + EncodeCustomValues(&buf, v.CustomValues) + } + + return buf.Get() +} + +// TODO: optimize +func EncodeCustomValues(buf *encoding.Encbuf, values []float64) { + buf.PutUvarint(len(values)) + for _, v := range values { + buf.PutBEFloat64(v) + } +} diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index f3a657aecb..c5f9f09f26 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -181,6 +181,22 @@ func TestRecord_EncodeDecode(t *testing.T) { decFloatHistograms, err = dec.FloatHistogramSamples(enc.FloatHistogramSamples(floatHistograms, nil), nil) require.NoError(t, err) require.Equal(t, floatHistograms, decFloatHistograms) + + // Custom values for histograms + customValues := []RefCustomValues{ + { + Ref: 56, + CustomValues: []float64{0, 1, 2, 3, 4}, + }, + { + Ref: 42, + CustomValues: []float64{5, 10, 15, 20, 25}, + }, + } + + decCustomValues, err := dec.CustomValues(enc.CustomValues(customValues, nil), nil) + require.NoError(t, err) + require.Equal(t, customValues, decCustomValues) } // TestRecord_Corrupted ensures that corrupted records return the correct error. @@ -269,6 +285,15 @@ func TestRecord_Corrupted(t *testing.T) { _, err := dec.HistogramSamples(corrupted, nil) require.ErrorIs(t, err, encoding.ErrInvalidSize) }) + + t.Run("Test corrupted customValues record", func(t *testing.T) { + customValues := []RefCustomValues{ + {Ref: 56, CustomValues: []float64{0, 1, 2, 3, 4}}, + } + corrupted := enc.CustomValues(customValues, nil)[:8] + _, err := dec.CustomValues(corrupted, nil) + require.ErrorIs(t, err, encoding.ErrInvalidSize) + }) } func TestRecord_Type(t *testing.T) { @@ -312,6 +337,10 @@ func TestRecord_Type(t *testing.T) { recordType = dec.Type(enc.HistogramSamples(histograms, nil)) require.Equal(t, HistogramSamples, recordType) + customValues := []RefCustomValues{{Ref: 56, CustomValues: []float64{0, 1, 2, 3, 4}}} + recordType = dec.Type(enc.CustomValues(customValues, nil)) + require.Equal(t, CustomValues, recordType) + recordType = dec.Type(nil) require.Equal(t, Unknown, recordType) diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index 58e11c770e..a96d142495 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -151,6 +151,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He samples []record.RefSample histogramSamples []record.RefHistogramSample floatHistogramSamples []record.RefFloatHistogramSample + customValues []record.RefCustomValues tstones []tombstones.Stone exemplars []record.RefExemplar metadata []record.RefMetadata @@ -299,6 +300,18 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } stats.TotalMetadata += len(metadata) stats.DroppedMetadata += len(metadata) - repl + case record.CustomValues: + customValues, err = dec.CustomValues(rec, customValues) + if err != nil { + return nil, fmt.Errorf("decode custom values: %w", err) + } + repl := customValues[:0] + for _, v := range customValues { + repl = append(repl, v) + } + if len(repl) > 0 { + buf = enc.CustomValues(repl, buf) + } default: // Unknown record type, probably from a future Prometheus version. continue From 6d413fad361914372ccea6c99c74fb49fe1048d9 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Mon, 28 Oct 2024 08:43:00 -0700 Subject: [PATCH 1032/1397] Use histogram records for custom value handling --- tsdb/head_append.go | 37 +---------------- tsdb/head_wal.go | 72 +-------------------------------- tsdb/record/record.go | 81 +++++++++++++++----------------------- tsdb/record/record_test.go | 29 -------------- tsdb/wlog/checkpoint.go | 13 ------ 5 files changed, 34 insertions(+), 198 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index fbed0ee7eb..7601f7847b 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -181,7 +181,6 @@ func (h *Head) appender() *headAppender { samples: h.getAppendBuffer(), sampleSeries: h.getSeriesBuffer(), exemplars: exemplarsBuf, - customValues: h.getCustomValuesBuffer(), histograms: h.getHistogramBuffer(), floatHistograms: h.getFloatHistogramBuffer(), metadata: h.getMetadataBuffer(), @@ -245,18 +244,6 @@ func (h *Head) putExemplarBuffer(b []exemplarWithSeriesRef) { h.exemplarsPool.Put(b[:0]) } -func (h *Head) getCustomValuesBuffer() []record.RefCustomValues { - b := h.customValuesPool.Get() - if b == nil { - return make([]record.RefCustomValues, 0, 512) - } - return b -} - -func (h *Head) putCustomValuesBuffer(b []record.RefCustomValues) { - h.customValuesPool.Put(b[:0]) -} - func (h *Head) getHistogramBuffer() []record.RefHistogramSample { b := h.histogramsPool.Get() if b == nil { @@ -339,7 +326,6 @@ type headAppender struct { histogramSeries []*memSeries // HistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). floatHistograms []record.RefFloatHistogramSample // New float histogram samples held by this appender. floatHistogramSeries []*memSeries // FloatHistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). - customValues []record.RefCustomValues // Custom values for histograms that use custom buckets held by this appender. metadata []record.RefMetadata // New metadata held by this appender. metadataSeries []*memSeries // Series corresponding to the metadata held by this appender. exemplars []exemplarWithSeriesRef // New exemplars held by this appender. @@ -704,13 +690,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels // This whole "if" should be removed. if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { s.lastHistogramValue = &histogram.Histogram{} - if histogram.IsCustomBucketsSchema(h.Schema) { - a.customValues = append(a.customValues, record.RefCustomValues{ - Ref: s.ref, - CustomValues: h.CustomValues, - }) - } - + } // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // to skip that sample from the WAL and write only in the WBL. _, delta, err := s.appendableHistogram(t, h, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) @@ -746,12 +726,6 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels // This whole "if" should be removed. if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { s.lastFloatHistogramValue = &histogram.FloatHistogram{} - if histogram.IsCustomBucketsSchema(fh.Schema) { - a.customValues = append(a.customValues, record.RefCustomValues{ - Ref: s.ref, - CustomValues: fh.CustomValues, - }) - } } // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise @@ -967,13 +941,6 @@ func (a *headAppender) log() error { return fmt.Errorf("log samples: %w", err) } } - if len(a.customValues) > 0 { - rec = enc.CustomValues(a.customValues, buf) - buf = rec[:0] - if err := a.head.wal.Log(rec); err != nil { - return fmt.Errorf("log custom values: %w", err) - } - } if len(a.histograms) > 0 { rec = enc.HistogramSamples(a.histograms, buf) buf = rec[:0] @@ -1460,7 +1427,6 @@ func (a *headAppender) Commit() (err error) { defer a.head.putAppendBuffer(a.samples) defer a.head.putSeriesBuffer(a.sampleSeries) defer a.head.putExemplarBuffer(a.exemplars) - defer a.head.putCustomValuesBuffer(a.customValues) defer a.head.putHistogramBuffer(a.histograms) defer a.head.putFloatHistogramBuffer(a.floatHistograms) defer a.head.putMetadataBuffer(a.metadata) @@ -1982,7 +1948,6 @@ func (a *headAppender) Rollback() (err error) { } a.head.putAppendBuffer(a.samples) a.head.putExemplarBuffer(a.exemplars) - a.head.putCustomValuesBuffer(a.customValues) a.head.putHistogramBuffer(a.histograms) a.head.putFloatHistogramBuffer(a.floatHistograms) a.head.putMetadataBuffer(a.metadata) diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 637929428d..885d14a08b 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -58,7 +58,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch var unknownExemplarRefs atomic.Uint64 var unknownHistogramRefs atomic.Uint64 var unknownMetadataRefs atomic.Uint64 - var unknownCustomValuesRefs atomic.Uint64 + // Track number of series records that had overlapping m-map chunks. var mmapOverlappingChunks atomic.Uint64 @@ -82,7 +82,6 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch histogramsPool zeropool.Pool[[]record.RefHistogramSample] floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] metadataPool zeropool.Pool[[]record.RefMetadata] - customValuesPool zeropool.Pool[[]record.RefCustomValues] ) defer func() { @@ -225,18 +224,6 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decoded <- meta - case record.CustomValues: - customVals := customValuesPool.Get()[:0] - customVals, err := dec.CustomValues(rec, customVals) - if err != nil { - decodeErr = &wlog.CorruptionErr{ - Err: fmt.Errorf("decode custom values: %w", err), - Segment: r.Segment(), - Offset: r.Offset(), - } - return - } - decoded <- customVals default: // Noop. } @@ -428,29 +415,6 @@ Outer: } } metadataPool.Put(v) - case []record.RefCustomValues: - for _, cv := range v { - s := h.series.getByID(cv.Ref) - if s == nil { - unknownCustomValuesRefs.Inc() - continue - } - //TODO: do we actually want to check lastFloatHistogramValue? - if s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { - s.customValues = cv.CustomValues - } - } - customValuesPool.Put(v) - // iterate over custom value records and do same things as for series/samples - put them in correct processor - // processor depends on series ref - // something like this: - // idx := uint64(mSeries.ref) % uint64(concurrency) - // processors[idx].input <- walSubsetProcessorInputItem{customValues: } - //for _, cv := range v { - // idx := uint64(cv.Ref) % uint64(concurrency) - // processors[idx].input <- walSubsetProcessorInputItem{customValues: cv} - //} - //customValuesPool.Put(v) default: panic(fmt.Errorf("unexpected decoded type: %T", d)) } @@ -720,7 +684,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) { // Track number of samples, histogram samples, m-map markers, that referenced a series we don't know about // for error reporting. - var unknownRefs, unknownHistogramRefs, unknownCustomValuesRefs, mmapMarkerUnknownRefs atomic.Uint64 + var unknownRefs, unknownHistogramRefs, mmapMarkerUnknownRefs atomic.Uint64 lastSeq, lastOff := lastMmapRef.Unpack() // Start workers that each process samples for a partition of the series ID space. @@ -820,18 +784,6 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decodedCh <- hists - case record.CustomValues: - customVals := customValuesPool.Get().([]record.RefCustomValues)[:0] - customVals, err := dec.CustomValues(rec, customVals) - if err != nil { - decodeErr = &wlog.CorruptionErr{ - Err: fmt.Errorf("decode custom values: %w", err), - Segment: r.Segment(), - Offset: r.Offset(), - } - return - } - decodedCh <- customVals default: // Noop. } @@ -916,18 +868,6 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch if r, ok := multiRef[sam.Ref]; ok { sam.Ref = r } - if histogram.IsCustomBucketsSchema(sam.H.Schema) { - ms := h.series.getByID(sam.Ref) - if ms == nil { - unknownHistogramRefs.Inc() - continue - } - if ms.lastFloatHistogramValue != nil { - sam.H.CustomValues = ms.lastFloatHistogramValue.CustomValues - } else { - sam.H.CustomValues = ms.customValues - } - } mod := uint64(sam.Ref) % uint64(concurrency) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H}) } @@ -960,14 +900,6 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch if r, ok := multiRef[sam.Ref]; ok { sam.Ref = r } - if histogram.IsCustomBucketsSchema(sam.FH.Schema) { - ms := h.series.getByID(sam.Ref) - if ms == nil { - unknownHistogramRefs.Inc() - continue - } - sam.FH.CustomValues = ms.customValues - } mod := uint64(sam.Ref) % uint64(concurrency) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH}) } diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 6f5fb24384..d759c18551 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -513,6 +513,18 @@ func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) { for i := range h.NegativeBuckets { h.NegativeBuckets[i] = buf.Varint64() } + + if histogram.IsCustomBucketsSchema(h.Schema) { + l = buf.Uvarint() + if l > 0 { + if l > 0 { + h.CustomValues = make([]float64, l) + } + for i := range h.CustomValues { + h.CustomValues[i] = buf.Be64Float64() + } + } + } } func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) { @@ -595,39 +607,18 @@ func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) { for i := range fh.NegativeBuckets { fh.NegativeBuckets[i] = buf.Be64Float64() } -} -// TODO: optimize -func (d *Decoder) CustomValues(rec []byte, customValues []RefCustomValues) ([]RefCustomValues, error) { - dec := encoding.Decbuf{B: rec} - - if Type(dec.Byte()) != CustomValues { - return nil, errors.New("invalid record type") - } - if dec.Len() == 0 { - return customValues, nil - } - for len(dec.B) > 0 && dec.Err() == nil { - ref := storage.SeriesRef(dec.Be64()) - l := dec.Uvarint() + if histogram.IsCustomBucketsSchema(fh.Schema) { + l = buf.Uvarint() if l > 0 { - vals := make([]float64, l) - for i := range vals { - vals[i] = dec.Be64Float64() + if l > 0 { + fh.CustomValues = make([]float64, l) + } + for i := range fh.CustomValues { + fh.CustomValues[i] = buf.Be64Float64() } - customValues = append(customValues, RefCustomValues{ - Ref: chunks.HeadSeriesRef(ref), - CustomValues: vals, - }) } } - if dec.Err() != nil { - return nil, dec.Err() - } - if len(dec.B) > 0 { - return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) - } - return customValues, nil } // Encoder encodes series, sample, and tombstones records. @@ -813,6 +804,13 @@ func EncodeHistogram(buf *encoding.Encbuf, h *histogram.Histogram) { for _, b := range h.NegativeBuckets { buf.PutVarint64(b) } + + if histogram.IsCustomBucketsSchema(h.Schema) { + buf.PutUvarint(len(h.CustomValues)) + for _, v := range h.CustomValues { + buf.PutBEFloat64(v) + } + } } func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { @@ -871,28 +869,11 @@ func EncodeFloatHistogram(buf *encoding.Encbuf, h *histogram.FloatHistogram) { for _, b := range h.NegativeBuckets { buf.PutBEFloat64(b) } -} -func (e *Encoder) CustomValues(customValues []RefCustomValues, b []byte) []byte { - buf := encoding.Encbuf{B: b} - buf.PutByte(byte(CustomValues)) - - if len(customValues) == 0 { - return buf.Get() - } - - for _, v := range customValues { - buf.PutBE64(uint64(v.Ref)) - EncodeCustomValues(&buf, v.CustomValues) - } - - return buf.Get() -} - -// TODO: optimize -func EncodeCustomValues(buf *encoding.Encbuf, values []float64) { - buf.PutUvarint(len(values)) - for _, v := range values { - buf.PutBEFloat64(v) + if histogram.IsCustomBucketsSchema(h.Schema) { + buf.PutUvarint(len(h.CustomValues)) + for _, v := range h.CustomValues { + buf.PutBEFloat64(v) + } } } diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index c5f9f09f26..f3a657aecb 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -181,22 +181,6 @@ func TestRecord_EncodeDecode(t *testing.T) { decFloatHistograms, err = dec.FloatHistogramSamples(enc.FloatHistogramSamples(floatHistograms, nil), nil) require.NoError(t, err) require.Equal(t, floatHistograms, decFloatHistograms) - - // Custom values for histograms - customValues := []RefCustomValues{ - { - Ref: 56, - CustomValues: []float64{0, 1, 2, 3, 4}, - }, - { - Ref: 42, - CustomValues: []float64{5, 10, 15, 20, 25}, - }, - } - - decCustomValues, err := dec.CustomValues(enc.CustomValues(customValues, nil), nil) - require.NoError(t, err) - require.Equal(t, customValues, decCustomValues) } // TestRecord_Corrupted ensures that corrupted records return the correct error. @@ -285,15 +269,6 @@ func TestRecord_Corrupted(t *testing.T) { _, err := dec.HistogramSamples(corrupted, nil) require.ErrorIs(t, err, encoding.ErrInvalidSize) }) - - t.Run("Test corrupted customValues record", func(t *testing.T) { - customValues := []RefCustomValues{ - {Ref: 56, CustomValues: []float64{0, 1, 2, 3, 4}}, - } - corrupted := enc.CustomValues(customValues, nil)[:8] - _, err := dec.CustomValues(corrupted, nil) - require.ErrorIs(t, err, encoding.ErrInvalidSize) - }) } func TestRecord_Type(t *testing.T) { @@ -337,10 +312,6 @@ func TestRecord_Type(t *testing.T) { recordType = dec.Type(enc.HistogramSamples(histograms, nil)) require.Equal(t, HistogramSamples, recordType) - customValues := []RefCustomValues{{Ref: 56, CustomValues: []float64{0, 1, 2, 3, 4}}} - recordType = dec.Type(enc.CustomValues(customValues, nil)) - require.Equal(t, CustomValues, recordType) - recordType = dec.Type(nil) require.Equal(t, Unknown, recordType) diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index a96d142495..58e11c770e 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -151,7 +151,6 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He samples []record.RefSample histogramSamples []record.RefHistogramSample floatHistogramSamples []record.RefFloatHistogramSample - customValues []record.RefCustomValues tstones []tombstones.Stone exemplars []record.RefExemplar metadata []record.RefMetadata @@ -300,18 +299,6 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } stats.TotalMetadata += len(metadata) stats.DroppedMetadata += len(metadata) - repl - case record.CustomValues: - customValues, err = dec.CustomValues(rec, customValues) - if err != nil { - return nil, fmt.Errorf("decode custom values: %w", err) - } - repl := customValues[:0] - for _, v := range customValues { - repl = append(repl, v) - } - if len(repl) > 0 { - buf = enc.CustomValues(repl, buf) - } default: // Unknown record type, probably from a future Prometheus version. continue From cfcd51538dede02375a02aaf478e7fc7ac299890 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Mon, 28 Oct 2024 09:49:08 -0700 Subject: [PATCH 1033/1397] Remove references to custom values record --- tsdb/head.go | 2 -- tsdb/head_append.go | 11 ----------- tsdb/head_wal.go | 35 ----------------------------------- tsdb/record/record.go | 10 +--------- 4 files changed, 1 insertion(+), 57 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index b7a358a6a6..c67c438e52 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -87,7 +87,6 @@ type Head struct { logger *slog.Logger appendPool zeropool.Pool[[]record.RefSample] exemplarsPool zeropool.Pool[[]exemplarWithSeriesRef] - customValuesPool zeropool.Pool[[]record.RefCustomValues] histogramsPool zeropool.Pool[[]record.RefHistogramSample] floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] metadataPool zeropool.Pool[[]record.RefMetadata] @@ -2135,7 +2134,6 @@ type memSeries struct { // We keep the last histogram value here (in addition to appending it to the chunk) so we can check for duplicates. lastHistogramValue *histogram.Histogram lastFloatHistogramValue *histogram.FloatHistogram - customValues []float64 // Current appender for the head chunk. Set when a new head chunk is cut. // It is nil only if headChunks is nil. E.g. if there was an appender that created a new series, but rolled back the commit diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 7601f7847b..7dacb9037b 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -1458,17 +1458,6 @@ func (a *headAppender) Commit() (err error) { a.commitFloatHistograms(acc) a.commitMetadata() - a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected)) - a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected)) - a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOBRejected)) - a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatTooOldRejected)) - a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatsAppended)) - a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histogramsAppended)) - a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.oooFloatsAccepted)) - a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.oooHistogramAccepted)) - a.head.updateMinMaxTime(acc.inOrderMint, acc.inOrderMaxt) - a.head.updateMinOOOMaxOOOTime(acc.oooMinT, acc.oooMaxT) - acc.collectOOORecords(a) if a.head.wbl != nil { if err := a.head.wbl.Log(acc.oooRecords...); err != nil { diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 885d14a08b..dd4f4f8b17 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -332,19 +332,6 @@ Outer: if r, ok := multiRef[sam.Ref]; ok { sam.Ref = r } - if histogram.IsCustomBucketsSchema(sam.H.Schema) { - ms := h.series.getByID(sam.Ref) - if ms == nil { - unknownHistogramRefs.Inc() - continue - } - - if ms.lastFloatHistogramValue != nil { - sam.H.CustomValues = ms.lastFloatHistogramValue.CustomValues - } else { - sam.H.CustomValues = ms.customValues - } - } mod := uint64(sam.Ref) % uint64(concurrency) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H}) } @@ -381,14 +368,6 @@ Outer: if r, ok := multiRef[sam.Ref]; ok { sam.Ref = r } - if histogram.IsCustomBucketsSchema(sam.FH.Schema) { - ms := h.series.getByID(sam.Ref) - if ms == nil { - unknownHistogramRefs.Inc() - continue - } - sam.FH.CustomValues = ms.customValues - } mod := uint64(sam.Ref) % uint64(concurrency) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH}) } @@ -641,22 +620,8 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp var chunkCreated bool if s.h != nil { - //if histogram.IsCustomBucketsSchema(s.h.Schema) { - // if ms.lastHistogramValue != nil { - // - // } - //} _, chunkCreated = ms.appendHistogram(s.t, s.h, 0, appendChunkOpts) } else { - //if histogram.IsCustomBucketsSchema(s.fh.Schema) { - // if ms.lastFloatHistogramValue != nil { - // s.h.CustomValues = ms.lastFloatHistogramValue.CustomValues - // } else { - // s.h.CustomValues = ms.customValues - // } - // //customVals := h. - // //s.h.CustomValues = - //} _, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, appendChunkOpts) } if chunkCreated { diff --git a/tsdb/record/record.go b/tsdb/record/record.go index d759c18551..83adecdbb4 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -52,7 +52,6 @@ const ( HistogramSamples Type = 7 // FloatHistogramSamples is used to match WAL records of type Float Histograms. FloatHistogramSamples Type = 8 - CustomValues Type = 9 ) func (rt Type) String() string { @@ -73,8 +72,6 @@ func (rt Type) String() string { return "mmapmarkers" case Metadata: return "metadata" - case CustomValues: - return "custom_values" default: return "unknown" } @@ -150,11 +147,6 @@ type RefSeries struct { Labels labels.Labels } -type RefCustomValues struct { - Ref chunks.HeadSeriesRef - CustomValues []float64 -} - // RefSample is a timestamp/value pair associated with a reference to a series. // TODO(beorn7): Perhaps make this "polymorphic", including histogram and float-histogram pointers? Then get rid of RefHistogramSample. type RefSample struct { @@ -215,7 +207,7 @@ func (d *Decoder) Type(rec []byte) Type { return Unknown } switch t := Type(rec[0]); t { - case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples, CustomValues: + case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples: return t } return Unknown From 37df50adb9365b415e224128e02b2f5f606a71c2 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Wed, 13 Nov 2024 14:20:11 -0800 Subject: [PATCH 1034/1397] Attempt for record type --- tsdb/agent/db.go | 41 +++-- tsdb/agent/db_test.go | 7 +- tsdb/docs/format/wal.md | 26 +++ tsdb/encoding/encoding.go | 8 - tsdb/head_append.go | 46 ++++- tsdb/head_test.go | 313 +++++++++++++++++++++++++++++++++++ tsdb/head_wal.go | 7 +- tsdb/record/record.go | 112 +++++++++---- tsdb/record/record_test.go | 71 +++++++- tsdb/testutil.go | 34 +++- tsdb/tsdbutil/histogram.go | 23 +++ tsdb/wlog/checkpoint.go | 40 ++++- tsdb/wlog/checkpoint_test.go | 10 +- tsdb/wlog/watcher_test.go | 6 +- 14 files changed, 660 insertions(+), 84 deletions(-) diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 3863e6cd99..5067edc3ae 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -463,7 +463,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H return } decoded <- samples - case record.HistogramSamples: + case record.HistogramSamples, record.CustomBucketHistogramSamples: histograms := histogramsPool.Get()[:0] histograms, err = dec.HistogramSamples(rec, histograms) if err != nil { @@ -475,7 +475,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H return } decoded <- histograms - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.CustomBucketFloatHistogramSamples: floatHistograms := floatHistogramsPool.Get()[:0] floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms) if err != nil { @@ -1154,19 +1154,40 @@ func (a *appender) log() error { } if len(a.pendingHistograms) > 0 { - buf = encoder.HistogramSamples(a.pendingHistograms, buf) - if err := a.wal.Log(buf); err != nil { - return err + buf1, buf2 := encoder.HistogramSamples(a.pendingHistograms, buf) + //buf = append(buf1, buf2...) + //if err := a.wal.Log(buf); err != nil { + // return err + //} + if len(buf1) > 0 { + buf = buf1[:0] + if err := a.wal.Log(buf1); err != nil { + return err + } + } + if len(buf2) > 0 { + buf = buf2[:0] + if err := a.wal.Log(buf2); err != nil { + return err + } } - buf = buf[:0] } if len(a.pendingFloatHistograms) > 0 { - buf = encoder.FloatHistogramSamples(a.pendingFloatHistograms, buf) - if err := a.wal.Log(buf); err != nil { - return err + buf1, buf2 := encoder.FloatHistogramSamples(a.pendingFloatHistograms, buf) + if len(buf1) > 0 { + buf = buf1[:0] + if err := a.wal.Log(buf1); err != nil { + return err + } } - buf = buf[:0] + if len(buf2) > 0 { + buf = buf2[:0] + if err := a.wal.Log(buf2); err != nil { + return err + } + } + //buf = buf[:0] } if len(a.pendingExamplars) > 0 { diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index b28c29095c..5332c61cdb 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -193,7 +193,8 @@ func TestCommit(t *testing.T) { ) for r.Next() { rec := r.Record() - switch dec.Type(rec) { + recType := dec.Type(rec) + switch recType { case record.Series: var series []record.RefSeries series, err = dec.Series(rec, series) @@ -206,13 +207,13 @@ func TestCommit(t *testing.T) { require.NoError(t, err) walSamplesCount += len(samples) - case record.HistogramSamples: + case record.HistogramSamples, record.CustomBucketHistogramSamples: var histograms []record.RefHistogramSample histograms, err = dec.HistogramSamples(rec, histograms) require.NoError(t, err) walHistogramCount += len(histograms) - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.CustomBucketFloatHistogramSamples: var floatHistograms []record.RefFloatHistogramSample floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms) require.NoError(t, err) diff --git a/tsdb/docs/format/wal.md b/tsdb/docs/format/wal.md index db1ce97a8b..835ede4113 100644 --- a/tsdb/docs/format/wal.md +++ b/tsdb/docs/format/wal.md @@ -79,6 +79,32 @@ The first sample record begins at the second row. └──────────────────────────────────────────────────────────────────┘ ``` +### Native histogram records + +Native histogram records are encoded as + +``` +┌──────────────────────────────────────────────────────────────────┐ +│ type = 2 <1b> │ +├──────────────────────────────────────────────────────────────────┤ +│ ┌────────────────────┬───────────────────────────┐ │ +│ │ id <8b> │ timestamp <8b> │ │ +│ └────────────────────┴───────────────────────────┘ │ +│ ┌────────────────────┬───────────────────────────┬ │ +│ │ id_delta │ timestamp_delta │ │ +│ ├────────────────────┴───────────────────────────┴─────────────┤ │ +│ │ n = len(labels) │ │ +│ ├──────────────────────┬───────────────────────────────────────┤ │ +│ │ len(str_1) │ str_1 │ │ +│ ├──────────────────────┴───────────────────────────────────────┤ │ +│ │ ... │ │ +│ ├───────────────────────┬──────────────────────────────────────┤ │ +│ │ len(str_2n) │ str_2n │ │ │ +│ └───────────────────────┴────────────────┴─────────────────────┘ │ +│ . . . │ +└──────────────────────────────────────────────────────────────────┘ +``` + ### Tombstone records Tombstone records encode tombstones as a list of triples `(series_id, min_time, max_time)` diff --git a/tsdb/encoding/encoding.go b/tsdb/encoding/encoding.go index c339a9a5bb..cc7d0990f6 100644 --- a/tsdb/encoding/encoding.go +++ b/tsdb/encoding/encoding.go @@ -104,14 +104,6 @@ func (e *Encbuf) PutHashSum(h hash.Hash) { e.B = h.Sum(e.B) } -// IsWholeWhenMultiplied checks to see if the number when multiplied by 1000 can -// be converted into an integer without losing precision. -func IsWholeWhenMultiplied(in float64) bool { - i := uint(math.Round(in * 1000)) - out := float64(i) / 1000 - return in == out -} - // Decbuf provides safe methods to extract data from a byte slice. It does all // necessary bounds checking and advancing of the byte slice. // Several datums can be extracted without checking for errors. However, before using diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 7dacb9037b..3701a57135 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -942,17 +942,47 @@ func (a *headAppender) log() error { } } if len(a.histograms) > 0 { - rec = enc.HistogramSamples(a.histograms, buf) - buf = rec[:0] - if err := a.head.wal.Log(rec); err != nil { - return fmt.Errorf("log histograms: %w", err) + rec1, rec2 := enc.HistogramSamples(a.histograms, buf) + //rec = append(rec1, rec2...) + // + //buf = rec[:0] + // + //if err := a.head.wal.Log(rec); err != nil { + // return fmt.Errorf("log samples: %w", err) + //} + if len(rec1) != 0 { + buf = rec1[:0] + if err := a.head.wal.Log(rec1); err != nil { + return fmt.Errorf("log histograms: %w", err) + } + } + if len(rec2) != 0 { + buf = rec2[:0] + if err := a.head.wal.Log(rec2); err != nil { + return fmt.Errorf("log custom bucket histograms: %w", err) + } } } if len(a.floatHistograms) > 0 { - rec = enc.FloatHistogramSamples(a.floatHistograms, buf) - buf = rec[:0] - if err := a.head.wal.Log(rec); err != nil { - return fmt.Errorf("log float histograms: %w", err) + rec1, rec2 := enc.FloatHistogramSamples(a.floatHistograms, buf) + //rec = append(rec1, rec2...) + // + //buf = rec[:0] + // + //if err := a.head.wal.Log(rec); err != nil { + // return fmt.Errorf("log samples: %w", err) + //} + if len(rec1) != 0 { + buf = rec1[:0] + if err := a.head.wal.Log(rec1); err != nil { + return fmt.Errorf("log float histograms: %w", err) + } + } + if len(rec2) != 0 { + buf = rec2[:0] + if err := a.head.wal.Log(rec2); err != nil { + return fmt.Errorf("log custom bucket float histograms: %w", err) + } } } // Exemplars should be logged after samples (float/native histogram/etc), diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 2ca3aeffc7..527476e113 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -740,6 +740,89 @@ func TestHead_ReadWAL(t *testing.T) { } } +func TestHead_ReadWAL2(t *testing.T) { + for _, compress := range []wlog.CompressionType{wlog.CompressionNone, wlog.CompressionSnappy, wlog.CompressionZstd} { + t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { + entries := []interface{}{ + []record.RefSeries{ + {Ref: 10, Labels: labels.FromStrings("a", "1")}, + {Ref: 11, Labels: labels.FromStrings("a", "2")}, + {Ref: 100, Labels: labels.FromStrings("a", "3")}, + }, + []record.RefHistogramSample{ + {Ref: 0, T: 99, H: tsdbutil.GenerateTestHistogram(1)}, + {Ref: 10, T: 100, H: tsdbutil.GenerateTestCustomBucketsHistogram(2)}, + {Ref: 100, T: 100, H: tsdbutil.GenerateTestHistogram(3)}, + }, + []record.RefSeries{ + {Ref: 50, Labels: labels.FromStrings("a", "4")}, + // This series has two refs pointing to it. + {Ref: 101, Labels: labels.FromStrings("a", "3")}, + }, + []record.RefHistogramSample{ + {Ref: 10, T: 101, H: tsdbutil.GenerateTestHistogram(5)}, + {Ref: 50, T: 101, H: tsdbutil.GenerateTestHistogram(6)}, + {Ref: 101, T: 101, H: tsdbutil.GenerateTestCustomBucketsHistogram(7)}, + }, + []tombstones.Stone{ + {Ref: 0, Intervals: []tombstones.Interval{{Mint: 99, Maxt: 101}}}, + }, + []record.RefExemplar{ + {Ref: 10, T: 100, V: 1, Labels: labels.FromStrings("trace_id", "asdf")}, + }, + } + + head, w := newTestHead(t, 1000, compress, false) + defer func() { + require.NoError(t, head.Close()) + }() + + populateTestWL(t, w, entries) + + require.NoError(t, head.Init(math.MinInt64)) + require.Equal(t, uint64(101), head.lastSeriesID.Load()) + + s10 := head.series.getByID(10) + s11 := head.series.getByID(11) + s50 := head.series.getByID(50) + s100 := head.series.getByID(100) + + testutil.RequireEqual(t, labels.FromStrings("a", "1"), s10.lset) + require.Nil(t, s11) // Series without samples should be garbage collected at head.Init(). + testutil.RequireEqual(t, labels.FromStrings("a", "4"), s50.lset) + testutil.RequireEqual(t, labels.FromStrings("a", "3"), s100.lset) + + expandChunk := func(c chunkenc.Iterator) (x []sample) { + for c.Next() == chunkenc.ValHistogram { + t, v := c.AtHistogram(nil) + //t, v := c.At() + x = append(x, sample{t: t, h: v}) + } + require.NoError(t, c.Err()) + return x + } + + c, _, _, err := s10.chunk(0, head.chunkDiskMapper, &head.memChunkPool) + require.NoError(t, err) + require.Equal(t, []sample{{100, 0, tsdbutil.GenerateTestCustomBucketsHistogram(2), nil}, {101, 0, tsdbutil.GenerateTestCustomBucketsHistogram(5), nil}}, expandChunk(c.chunk.Iterator(nil))) + c, _, _, err = s50.chunk(0, head.chunkDiskMapper, &head.memChunkPool) + require.NoError(t, err) + require.Equal(t, []sample{{101, 0, tsdbutil.GenerateTestHistogram(6), nil}}, expandChunk(c.chunk.Iterator(nil))) + // The samples before the new series record should be discarded since a duplicate record + // is only possible when old samples were compacted. + c, _, _, err = s100.chunk(0, head.chunkDiskMapper, &head.memChunkPool) + require.NoError(t, err) + require.Equal(t, []sample{{101, 0, tsdbutil.GenerateTestCustomBucketsHistogram(7), nil}}, expandChunk(c.chunk.Iterator(nil))) + + q, err := head.ExemplarQuerier(context.Background()) + require.NoError(t, err) + e, err := q.Select(0, 1000, []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "1")}) + require.NoError(t, err) + require.True(t, exemplar.Exemplar{Ts: 100, Value: 1, Labels: labels.FromStrings("trace_id", "asdf")}.Equals(e[0].Exemplars[0])) + }) + } +} + func TestHead_WALMultiRef(t *testing.T) { head, w := newTestHead(t, 1000, wlog.CompressionNone, false) @@ -3953,6 +4036,194 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { testQuery() } +func TestHistogramInWALAndMmapChunk2(t *testing.T) { + head, _ := newTestHead(t, 3000, wlog.CompressionNone, false) + t.Cleanup(func() { + require.NoError(t, head.Close()) + }) + require.NoError(t, head.Init(0)) + + // Series with only histograms. + s1 := labels.FromStrings("a", "b1") + k1 := s1.String() + numHistograms := 300 + exp := map[string][]chunks.Sample{} + ts := int64(0) + var app storage.Appender + for _, custom := range []bool{true, false} { + app = head.Appender(context.Background()) + var hists []*histogram.Histogram + if custom { + hists = tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms) + } else { + hists = tsdbutil.GenerateTestHistograms(numHistograms) + } + for _, h := range hists { + if !custom { + h.NegativeSpans = h.PositiveSpans + h.NegativeBuckets = h.PositiveBuckets + } + _, err := app.AppendHistogram(0, s1, ts, h, nil) + require.NoError(t, err) + exp[k1] = append(exp[k1], sample{t: ts, h: h.Copy()}) + ts++ + if ts%5 == 0 { + require.NoError(t, app.Commit()) + app = head.Appender(context.Background()) + } + } + require.NoError(t, app.Commit()) + } + for _, custom := range []bool{true, false} { + app = head.Appender(context.Background()) + var hists []*histogram.FloatHistogram + if custom { + hists = tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms) + } else { + hists = tsdbutil.GenerateTestFloatHistograms(numHistograms) + } + for _, h := range hists { + if !custom { + h.NegativeSpans = h.PositiveSpans + h.NegativeBuckets = h.PositiveBuckets + } + _, err := app.AppendHistogram(0, s1, ts, nil, h) + require.NoError(t, err) + exp[k1] = append(exp[k1], sample{t: ts, fh: h.Copy()}) + ts++ + if ts%5 == 0 { + require.NoError(t, app.Commit()) + app = head.Appender(context.Background()) + } + } + require.NoError(t, app.Commit()) + head.mmapHeadChunks() + } + + // There should be 20 mmap chunks in s1. + ms := head.series.getByHash(s1.Hash(), s1) + require.Len(t, ms.mmappedChunks, 19) + expMmapChunks := make([]*mmappedChunk, 0, 20) + for _, mmap := range ms.mmappedChunks { + require.Positive(t, mmap.numSamples) + cpy := *mmap + expMmapChunks = append(expMmapChunks, &cpy) + } + expHeadChunkSamples := ms.headChunks.chunk.NumSamples() + require.Positive(t, expHeadChunkSamples) + + // Series with mix of histograms and float. + s2 := labels.FromStrings("a", "b2") + k2 := s2.String() + ts = 0 + for _, custom := range []bool{true, false} { + app = head.Appender(context.Background()) + var hists []*histogram.Histogram + if custom { + hists = tsdbutil.GenerateTestCustomBucketsHistograms(100) + } else { + hists = tsdbutil.GenerateTestHistograms(100) + } + for _, h := range hists { + ts++ + if !custom { + h.NegativeSpans = h.PositiveSpans + h.NegativeBuckets = h.PositiveBuckets + } + _, err := app.AppendHistogram(0, s2, ts, h, nil) + require.NoError(t, err) + eh := h.Copy() + if ts > 30 && (ts-10)%20 == 1 { + // Need "unknown" hint after float sample. + eh.CounterResetHint = histogram.UnknownCounterReset + } + exp[k2] = append(exp[k2], sample{t: ts, h: eh}) + if ts%20 == 0 { + require.NoError(t, app.Commit()) + app = head.Appender(context.Background()) + // Add some float. + for i := 0; i < 10; i++ { + ts++ + _, err := app.Append(0, s2, ts, float64(ts)) + require.NoError(t, err) + exp[k2] = append(exp[k2], sample{t: ts, f: float64(ts)}) + } + require.NoError(t, app.Commit()) + app = head.Appender(context.Background()) + } + } + require.NoError(t, app.Commit()) + } + for _, custom := range []bool{true, false} { + app = head.Appender(context.Background()) + var hists []*histogram.FloatHistogram + if custom { + hists = tsdbutil.GenerateTestCustomBucketsFloatHistograms(100) + } else { + hists = tsdbutil.GenerateTestFloatHistograms(100) + } + for _, h := range hists { + ts++ + if !custom { + h.NegativeSpans = h.PositiveSpans + h.NegativeBuckets = h.PositiveBuckets + } + _, err := app.AppendHistogram(0, s2, ts, nil, h) + require.NoError(t, err) + eh := h.Copy() + if ts > 30 && (ts-10)%20 == 1 { + // Need "unknown" hint after float sample. + eh.CounterResetHint = histogram.UnknownCounterReset + } + exp[k2] = append(exp[k2], sample{t: ts, fh: eh}) + if ts%20 == 0 { + require.NoError(t, app.Commit()) + app = head.Appender(context.Background()) + // Add some float. + for i := 0; i < 10; i++ { + ts++ + _, err := app.Append(0, s2, ts, float64(ts)) + require.NoError(t, err) + exp[k2] = append(exp[k2], sample{t: ts, f: float64(ts)}) + } + require.NoError(t, app.Commit()) + app = head.Appender(context.Background()) + } + } + require.NoError(t, app.Commit()) + } + + // Restart head. + require.NoError(t, head.Close()) + startHead := func() { + w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) + require.NoError(t, err) + head, err = NewHead(nil, nil, w, nil, head.opts, nil) + require.NoError(t, err) + require.NoError(t, head.Init(0)) + } + startHead() + + // Checking contents of s1. + ms = head.series.getByHash(s1.Hash(), s1) + require.Equal(t, expMmapChunks, ms.mmappedChunks) + require.Equal(t, expHeadChunkSamples, ms.headChunks.chunk.NumSamples()) + + testQuery := func() { + q, err := NewBlockQuerier(head, head.MinTime(), head.MaxTime()) + require.NoError(t, err) + act := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "a", "b.*")) + compareSeries(t, exp, act) + } + testQuery() + + // Restart with no mmap chunks to test WAL replay. + require.NoError(t, head.Close()) + require.NoError(t, os.RemoveAll(mmappedChunksDir(head.opts.ChunkDirRoot))) + startHead() + testQuery() +} + func TestChunkSnapshot(t *testing.T) { head, _ := newTestHead(t, 120*4, wlog.CompressionNone, false) defer func() { @@ -5089,6 +5360,48 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) { require.Positive(t, offset) } +func TestHistogramWALANDWBLReplay(t *testing.T) { + dir := t.TempDir() + wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) + require.NoError(t, err) + oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, wlog.CompressionSnappy) + require.NoError(t, err) + + opts := DefaultHeadOptions() + opts.ChunkRange = 1000 + opts.ChunkDirRoot = dir + opts.OutOfOrderTimeWindow.Store(30 * time.Minute.Milliseconds()) + opts.EnableNativeHistograms.Store(true) + opts.EnableOOONativeHistograms.Store(true) + + h, err := NewHead(nil, nil, wal, oooWlog, opts, nil) + require.NoError(t, err) + require.NoError(t, h.Init(0)) + + var expOOOSamples []chunks.Sample + l := labels.FromStrings("foo", "bar") + appendSample := func(mins int64, val float64, isOOO bool, isCustomBucketHistogram bool) { + app := h.Appender(context.Background()) + var s sample + if isCustomBucketHistogram { + s = sample{t: mins * time.Minute.Milliseconds(), h: tsdbutil.GenerateTestCustomBucketsHistogram(int(val))} + } else { + s = sample{t: mins * time.Minute.Milliseconds(), h: tsdbutil.GenerateTestHistogram(int(val))} + } + _, err := app.AppendHistogram(0, l, mins*time.Minute.Milliseconds(), s.h, nil) + require.NoError(t, err) + require.NoError(t, app.Commit()) + + if isOOO { + expOOOSamples = append(expOOOSamples, s) + } + } + + // In-order histogram samples. + appendSample(60, 60, false, false) + +} + // TestWBLReplay checks the replay at a low level. func TestWBLReplay(t *testing.T) { for name, scenario := range sampleTypeScenarios { diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index dd4f4f8b17..9d1e24b706 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -139,7 +139,8 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch dec := record.NewDecoder(syms) for r.Next() { rec := r.Record() - switch dec.Type(rec) { + recType := dec.Type(rec) + switch recType { case record.Series: series := seriesPool.Get()[:0] series, err = dec.Series(rec, series) @@ -188,7 +189,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decoded <- exemplars - case record.HistogramSamples: + case record.HistogramSamples, record.CustomBucketHistogramSamples: hists := histogramsPool.Get()[:0] hists, err = dec.HistogramSamples(rec, hists) if err != nil { @@ -200,7 +201,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decoded <- hists - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.CustomBucketFloatHistogramSamples: hists := floatHistogramsPool.Get()[:0] hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 83adecdbb4..15f5053d53 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -52,6 +52,10 @@ const ( HistogramSamples Type = 7 // FloatHistogramSamples is used to match WAL records of type Float Histograms. FloatHistogramSamples Type = 8 + // CustomBucketHistogramSamples is used to match WAL records of type Histogram with custom buckets. + CustomBucketHistogramSamples Type = 9 + // CustomBucketFloatHistogramSamples is used to match WAL records of type Float Histogram with custom buckets. + CustomBucketFloatHistogramSamples Type = 10 ) func (rt Type) String() string { @@ -68,6 +72,10 @@ func (rt Type) String() string { return "histogram_samples" case FloatHistogramSamples: return "float_histogram_samples" + case CustomBucketHistogramSamples: + return "custom_bucket_histogram_samples" + case CustomBucketFloatHistogramSamples: + return "custom_bucket_float_histogram_samples" case MmapMarkers: return "mmapmarkers" case Metadata: @@ -185,6 +193,10 @@ type RefFloatHistogramSample struct { FH *histogram.FloatHistogram } +type RefCustomBucketHistogramSample struct { + RefHistogramSample +} + // RefMmapMarker marks that the all the samples of the given series until now have been m-mapped to disk. type RefMmapMarker struct { Ref chunks.HeadSeriesRef @@ -207,7 +219,7 @@ func (d *Decoder) Type(rec []byte) Type { return Unknown } switch t := Type(rec[0]); t { - case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples: + case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples, CustomBucketHistogramSamples, CustomBucketFloatHistogramSamples: return t } return Unknown @@ -428,7 +440,7 @@ func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMar func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) { dec := encoding.Decbuf{B: rec} t := Type(dec.Byte()) - if t != HistogramSamples { + if t != HistogramSamples && t != CustomBucketHistogramSamples { return nil, errors.New("invalid record type") } if dec.Len() == 0 { @@ -509,12 +521,10 @@ func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) { if histogram.IsCustomBucketsSchema(h.Schema) { l = buf.Uvarint() if l > 0 { - if l > 0 { - h.CustomValues = make([]float64, l) - } - for i := range h.CustomValues { - h.CustomValues[i] = buf.Be64Float64() - } + h.CustomValues = make([]float64, l) + } + for i := range h.CustomValues { + h.CustomValues[i] = buf.Be64Float64() } } } @@ -522,7 +532,7 @@ func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) { func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) { dec := encoding.Decbuf{B: rec} t := Type(dec.Byte()) - if t != FloatHistogramSamples { + if t != FloatHistogramSamples && t != CustomBucketFloatHistogramSamples { return nil, errors.New("invalid record type") } if dec.Len() == 0 { @@ -603,12 +613,10 @@ func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) { if histogram.IsCustomBucketsSchema(fh.Schema) { l = buf.Uvarint() if l > 0 { - if l > 0 { - fh.CustomValues = make([]float64, l) - } - for i := range fh.CustomValues { - fh.CustomValues[i] = buf.Be64Float64() - } + fh.CustomValues = make([]float64, l) + } + for i := range fh.CustomValues { + fh.CustomValues[i] = buf.Be64Float64() } } } @@ -740,12 +748,15 @@ func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte { return buf.Get() } -func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) []byte { +func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([]byte, []byte) { buf := encoding.Encbuf{B: b} buf.PutByte(byte(HistogramSamples)) + customBucketHistBuf := encoding.Encbuf{B: b} + customBucketHistBuf.PutByte(byte(CustomBucketHistogramSamples)) + if len(histograms) == 0 { - return buf.Get() + return buf.Get(), customBucketHistBuf.Get() } // Store base timestamp and base reference number of first histogram. @@ -754,14 +765,34 @@ func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) [] buf.PutBE64(uint64(first.Ref)) buf.PutBE64int64(first.T) - for _, h := range histograms { - buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) - buf.PutVarint64(h.T - first.T) + customBucketHistBuf.PutBE64(uint64(first.Ref)) + customBucketHistBuf.PutBE64int64(first.T) - EncodeHistogram(&buf, h.H) + histsAdded := 0 + customBucketHistsAdded := 0 + for _, h := range histograms { + if h.H.UsesCustomBuckets() { + customBucketHistBuf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + customBucketHistBuf.PutVarint64(h.T - first.T) + + EncodeHistogram(&customBucketHistBuf, h.H) + customBucketHistsAdded++ + } else { + buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + buf.PutVarint64(h.T - first.T) + + EncodeHistogram(&buf, h.H) + histsAdded++ + } } - return buf.Get() + if customBucketHistsAdded == 0 { + customBucketHistBuf.Reset() + } else if histsAdded == 0 { + buf.Reset() + } + + return buf.Get(), customBucketHistBuf.Get() } // EncodeHistogram encodes a Histogram into a byte slice. @@ -805,12 +836,15 @@ func EncodeHistogram(buf *encoding.Encbuf, h *histogram.Histogram) { } } -func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { +func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) ([]byte, []byte) { buf := encoding.Encbuf{B: b} buf.PutByte(byte(FloatHistogramSamples)) + customBucketHistBuf := encoding.Encbuf{B: b} + customBucketHistBuf.PutByte(byte(CustomBucketFloatHistogramSamples)) + if len(histograms) == 0 { - return buf.Get() + return buf.Get(), customBucketHistBuf.Get() } // Store base timestamp and base reference number of first histogram. @@ -819,14 +853,34 @@ func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b buf.PutBE64(uint64(first.Ref)) buf.PutBE64int64(first.T) - for _, h := range histograms { - buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) - buf.PutVarint64(h.T - first.T) + customBucketHistBuf.PutBE64(uint64(first.Ref)) + customBucketHistBuf.PutBE64int64(first.T) - EncodeFloatHistogram(&buf, h.FH) + histsAdded := 0 + customBucketHistsAdded := 0 + for _, h := range histograms { + if h.FH.UsesCustomBuckets() { + customBucketHistBuf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + customBucketHistBuf.PutVarint64(h.T - first.T) + + EncodeFloatHistogram(&customBucketHistBuf, h.FH) + customBucketHistsAdded++ + } else { + buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + buf.PutVarint64(h.T - first.T) + + EncodeFloatHistogram(&buf, h.FH) + histsAdded++ + } } - return buf.Get() + if customBucketHistsAdded == 0 { + customBucketHistBuf.Reset() + } else if histsAdded == 0 { + buf.Reset() + } + + return buf.Get(), customBucketHistBuf.Get() } // EncodeFloatHistogram encodes the Float Histogram into a byte slice. diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index f3a657aecb..67c7eab970 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -148,10 +148,30 @@ func TestRecord_EncodeDecode(t *testing.T) { NegativeBuckets: []int64{1, 2, -1}, }, }, + { + Ref: 67, + T: 5678, + H: &histogram.Histogram{ + Count: 8, + ZeroThreshold: 0.001, + Sum: 35.5, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 2, Length: 2}, + }, + PositiveBuckets: []int64{2, -1, 2, 0}, + CustomValues: []float64{0, 2, 4, 6, 8}, + }, + }, } - decHistograms, err := dec.HistogramSamples(enc.HistogramSamples(histograms, nil), nil) + histSamples, customBucketHistSamples := enc.HistogramSamples(histograms, nil) + decHistograms, err := dec.HistogramSamples(histSamples, nil) require.NoError(t, err) + decCustomBucketHistograms, err := dec.HistogramSamples(customBucketHistSamples, nil) + require.NoError(t, err) + decHistograms = append(decHistograms, decCustomBucketHistograms...) require.Equal(t, histograms, decHistograms) floatHistograms := make([]RefFloatHistogramSample, len(histograms)) @@ -162,24 +182,36 @@ func TestRecord_EncodeDecode(t *testing.T) { FH: h.H.ToFloat(nil), } } - decFloatHistograms, err := dec.FloatHistogramSamples(enc.FloatHistogramSamples(floatHistograms, nil), nil) + histSamples, customBucketFloatHistSamples := enc.FloatHistogramSamples(floatHistograms, nil) + decFloatHistograms, err := dec.FloatHistogramSamples(histSamples, nil) require.NoError(t, err) + decCustomBucketFloatHistograms, err := dec.FloatHistogramSamples(customBucketFloatHistSamples, nil) + decFloatHistograms = append(decFloatHistograms, decCustomBucketFloatHistograms...) require.Equal(t, floatHistograms, decFloatHistograms) // Gauge integer histograms. for i := range histograms { histograms[i].H.CounterResetHint = histogram.GaugeType } - decHistograms, err = dec.HistogramSamples(enc.HistogramSamples(histograms, nil), nil) + + gaugeHistSamples, customBucketGaugeHistSamples := enc.HistogramSamples(histograms, nil) + decGaugeHistograms, err := dec.HistogramSamples(gaugeHistSamples, nil) require.NoError(t, err) - require.Equal(t, histograms, decHistograms) + decCustomBucketGaugeHistograms, err := dec.HistogramSamples(customBucketGaugeHistSamples, nil) + require.NoError(t, err) + decGaugeHistograms = append(decGaugeHistograms, decCustomBucketGaugeHistograms...) + require.Equal(t, histograms, decGaugeHistograms) // Gauge float histograms. for i := range floatHistograms { floatHistograms[i].FH.CounterResetHint = histogram.GaugeType } - decFloatHistograms, err = dec.FloatHistogramSamples(enc.FloatHistogramSamples(floatHistograms, nil), nil) + + gaugeHistSamples, customBucketGaugeFloatHistSamples := enc.FloatHistogramSamples(floatHistograms, nil) + decGaugeFloatHistograms, err := dec.FloatHistogramSamples(gaugeHistSamples, nil) require.NoError(t, err) + decCustomBucketGaugeFloatHistograms, err := dec.FloatHistogramSamples(customBucketGaugeFloatHistSamples, nil) + decFloatHistograms = append(decGaugeFloatHistograms, decCustomBucketGaugeFloatHistograms...) require.Equal(t, floatHistograms, decFloatHistograms) } @@ -265,8 +297,12 @@ func TestRecord_Corrupted(t *testing.T) { }, } - corrupted := enc.HistogramSamples(histograms, nil)[:8] - _, err := dec.HistogramSamples(corrupted, nil) + corruptedHists, corruptedCustomBucketHists := enc.HistogramSamples(histograms, nil) + corruptedHists = corruptedHists[:8] + corruptedCustomBucketHists = corruptedCustomBucketHists[:8] + _, err := dec.HistogramSamples(corruptedHists, nil) + require.ErrorIs(t, err, encoding.ErrInvalidSize) + _, err = dec.HistogramSamples(corruptedCustomBucketHists, nil) require.ErrorIs(t, err, encoding.ErrInvalidSize) }) } @@ -308,9 +344,28 @@ func TestRecord_Type(t *testing.T) { PositiveBuckets: []int64{1, 1, -1, 0}, }, }, + { + Ref: 67, + T: 5678, + H: &histogram.Histogram{ + Count: 8, + ZeroThreshold: 0.001, + Sum: 35.5, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 2, Length: 2}, + }, + PositiveBuckets: []int64{2, -1, 2, 0}, + CustomValues: []float64{0, 2, 4, 6, 8}, + }, + }, } - recordType = dec.Type(enc.HistogramSamples(histograms, nil)) + hists, customBucketHists := enc.HistogramSamples(histograms, nil) + recordType = dec.Type(hists) require.Equal(t, HistogramSamples, recordType) + recordType = dec.Type(customBucketHists) + require.Equal(t, CustomBucketHistogramSamples, recordType) recordType = dec.Type(nil) require.Equal(t, Unknown, recordType) diff --git a/tsdb/testutil.go b/tsdb/testutil.go index c39eb133c7..a13d89186e 100644 --- a/tsdb/testutil.go +++ b/tsdb/testutil.go @@ -29,11 +29,13 @@ import ( ) const ( - float = "float" - intHistogram = "integer histogram" - floatHistogram = "float histogram" - gaugeIntHistogram = "gauge int histogram" - gaugeFloatHistogram = "gauge float histogram" + float = "float" + intHistogram = "integer histogram" + floatHistogram = "float histogram" + customBucketIntHistogram = "custom bucket int histogram" + customBucketFloatHistogram = "custom bucket float histogram" + gaugeIntHistogram = "gauge int histogram" + gaugeFloatHistogram = "gauge float histogram" ) type testValue struct { @@ -82,6 +84,28 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{ return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))} }, }, + customBucketIntHistogram: { + sampleType: sampleMetricTypeHistogram, + appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { + s := sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(int(value))} + ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil) + return ref, s, err + }, + sampleFunc: func(ts, value int64) sample { + return sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(int(value))} + }, + }, + customBucketFloatHistogram: { + sampleType: sampleMetricTypeHistogram, + appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { + s := sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(int(value))} + ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh) + return ref, s, err + }, + sampleFunc: func(ts, value int64) sample { + return sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(int(value))} + }, + }, gaugeIntHistogram: { sampleType: sampleMetricTypeHistogram, appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { diff --git a/tsdb/tsdbutil/histogram.go b/tsdb/tsdbutil/histogram.go index ce934a638d..4b6cebd579 100644 --- a/tsdb/tsdbutil/histogram.go +++ b/tsdb/tsdbutil/histogram.go @@ -57,6 +57,18 @@ func GenerateTestHistogram(i int) *histogram.Histogram { } } +func GenerateTestCustomBucketsHistograms(n int) (r []*histogram.Histogram) { + for i := 0; i < n; i++ { + h := GenerateTestCustomBucketsHistogram(i) + if i > 0 { + h.CounterResetHint = histogram.NotCounterReset + } + r = append(r, h) + + } + return r +} + func GenerateTestCustomBucketsHistogram(i int) *histogram.Histogram { return &histogram.Histogram{ Count: 5 + uint64(i*4), @@ -117,6 +129,17 @@ func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram { } } +func GenerateTestCustomBucketsFloatHistograms(n int) (r []*histogram.FloatHistogram) { + for i := 0; i < n; i++ { + h := GenerateTestCustomBucketsFloatHistogram(i) + if i > 0 { + h.CounterResetHint = histogram.NotCounterReset + } + r = append(r, h) + } + return r +} + func GenerateTestCustomBucketsFloatHistogram(i int) *histogram.FloatHistogram { return &histogram.FloatHistogram{ Count: 5 + float64(i*4), diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index 58e11c770e..cd82676da9 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -221,11 +221,27 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } } if len(repl) > 0 { - buf = enc.HistogramSamples(repl, buf) + buf, _ = enc.HistogramSamples(repl, buf) + } + stats.TotalSamples += len(histogramSamples) + stats.DroppedSamples += len(histogramSamples) - len(repl) + case record.CustomBucketHistogramSamples: + histogramSamples, err = dec.HistogramSamples(rec, histogramSamples) + if err != nil { + return nil, fmt.Errorf("decode histogram samples: %w", err) + } + // Drop irrelevant histogramSamples in place. + repl := histogramSamples[:0] + for _, h := range histogramSamples { + if h.T >= mint { + repl = append(repl, h) + } + } + if len(repl) > 0 { + _, buf = enc.HistogramSamples(repl, buf) } stats.TotalSamples += len(histogramSamples) stats.DroppedSamples += len(histogramSamples) - len(repl) - case record.FloatHistogramSamples: floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples) if err != nil { @@ -239,11 +255,27 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } } if len(repl) > 0 { - buf = enc.FloatHistogramSamples(repl, buf) + buf, _ = enc.FloatHistogramSamples(repl, buf) + } + stats.TotalSamples += len(floatHistogramSamples) + stats.DroppedSamples += len(floatHistogramSamples) - len(repl) + case record.CustomBucketFloatHistogramSamples: + floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples) + if err != nil { + return nil, fmt.Errorf("decode float histogram samples: %w", err) + } + // Drop irrelevant floatHistogramSamples in place. + repl := floatHistogramSamples[:0] + for _, fh := range floatHistogramSamples { + if fh.T >= mint { + repl = append(repl, fh) + } + } + if len(repl) > 0 { + _, buf = enc.FloatHistogramSamples(repl, buf) } stats.TotalSamples += len(floatHistogramSamples) stats.DroppedSamples += len(floatHistogramSamples) - len(repl) - case record.Tombstones: tstones, err = dec.Tombstones(rec, tstones) if err != nil { diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go index 8ee193f5ac..a5692a9aa4 100644 --- a/tsdb/wlog/checkpoint_test.go +++ b/tsdb/wlog/checkpoint_test.go @@ -208,22 +208,24 @@ func TestCheckpoint(t *testing.T) { require.NoError(t, w.Log(b)) samplesInWAL += 4 h := makeHistogram(i) - b = enc.HistogramSamples([]record.RefHistogramSample{ + b1, b2 := enc.HistogramSamples([]record.RefHistogramSample{ {Ref: 0, T: last, H: h}, {Ref: 1, T: last + 10000, H: h}, {Ref: 2, T: last + 20000, H: h}, {Ref: 3, T: last + 30000, H: h}, }, nil) - require.NoError(t, w.Log(b)) + require.NoError(t, w.Log(b1)) + require.NoError(t, w.Log(b2)) histogramsInWAL += 4 fh := makeFloatHistogram(i) - b = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{ + b1, b2 = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{ {Ref: 0, T: last, FH: fh}, {Ref: 1, T: last + 10000, FH: fh}, {Ref: 2, T: last + 20000, FH: fh}, {Ref: 3, T: last + 30000, FH: fh}, }, nil) - require.NoError(t, w.Log(b)) + require.NoError(t, w.Log(b1)) + require.NoError(t, w.Log(b2)) floatHistogramsInWAL += 4 b = enc.Exemplars([]record.RefExemplar{ diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index 398b0f4414..c2499a7cec 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -209,19 +209,21 @@ func TestTailSamples(t *testing.T) { NegativeBuckets: []int64{int64(-i) - 1}, } - histogram := enc.HistogramSamples([]record.RefHistogramSample{{ + histogram, customBucketHistogram := enc.HistogramSamples([]record.RefHistogramSample{{ Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, H: hist, }}, nil) require.NoError(t, w.Log(histogram)) + require.NoError(t, w.Log(customBucketHistogram)) - floatHistogram := enc.FloatHistogramSamples([]record.RefFloatHistogramSample{{ + floatHistogram, floatCustomBucketHistogram := enc.FloatHistogramSamples([]record.RefFloatHistogramSample{{ Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, FH: hist.ToFloat(nil), }}, nil) require.NoError(t, w.Log(floatHistogram)) + require.NoError(t, w.Log(floatCustomBucketHistogram)) } } From 454f6d39ca5748aed08b110ad3f2eb027bff8969 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Thu, 14 Nov 2024 10:00:55 -0800 Subject: [PATCH 1035/1397] Add separate handling for histograms and custom bucket histograms --- tsdb/agent/db.go | 43 ++++++------- tsdb/agent/db_test.go | 30 ++++++++- tsdb/head_append.go | 49 ++++++--------- tsdb/record/record.go | 114 ++++++++++++++++++++--------------- tsdb/record/record_test.go | 45 ++++++++++---- tsdb/wlog/checkpoint.go | 4 +- tsdb/wlog/checkpoint_test.go | 60 +++++++++++++++--- tsdb/wlog/watcher.go | 4 +- tsdb/wlog/watcher_test.go | 36 ++++++++--- 9 files changed, 246 insertions(+), 139 deletions(-) diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 5067edc3ae..bcfc7be129 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -1154,40 +1154,35 @@ func (a *appender) log() error { } if len(a.pendingHistograms) > 0 { - buf1, buf2 := encoder.HistogramSamples(a.pendingHistograms, buf) - //buf = append(buf1, buf2...) - //if err := a.wal.Log(buf); err != nil { - // return err - //} - if len(buf1) > 0 { - buf = buf1[:0] - if err := a.wal.Log(buf1); err != nil { - return err - } + var customBucketsExist bool + buf, customBucketsExist = encoder.HistogramSamples(a.pendingHistograms, buf) + if err := a.wal.Log(buf); err != nil { + return err } - if len(buf2) > 0 { - buf = buf2[:0] - if err := a.wal.Log(buf2); err != nil { + buf = buf[:0] + if customBucketsExist { + buf = encoder.CustomBucketHistogramSamples(a.pendingHistograms, buf) + if err := a.wal.Log(buf); err != nil { return err } + buf = buf[:0] } } if len(a.pendingFloatHistograms) > 0 { - buf1, buf2 := encoder.FloatHistogramSamples(a.pendingFloatHistograms, buf) - if len(buf1) > 0 { - buf = buf1[:0] - if err := a.wal.Log(buf1); err != nil { + var customBucketsExist bool + buf, customBucketsExist = encoder.FloatHistogramSamples(a.pendingFloatHistograms, buf) + if err := a.wal.Log(buf); err != nil { + return err + } + buf = buf[:0] + if customBucketsExist { + buf = encoder.CustomBucketFloatHistogramSamples(a.pendingFloatHistograms, buf) + if err := a.wal.Log(buf); err != nil { return err } + buf = buf[:0] } - if len(buf2) > 0 { - buf = buf2[:0] - if err := a.wal.Log(buf2); err != nil { - return err - } - } - //buf = buf[:0] } if len(a.pendingExamplars) > 0 { diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index 5332c61cdb..6b5d9ece05 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -163,6 +163,18 @@ func TestCommit(t *testing.T) { } } + lbls = labelsForTest(t.Name()+"_custom_bucket_histogram", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + customBucketHistograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms) + + for i := 0; i < numHistograms; i++ { + _, err := app.AppendHistogram(0, lset, int64(i), customBucketHistograms[i], nil) + require.NoError(t, err) + } + } + lbls = labelsForTest(t.Name()+"_float_histogram", numSeries) for _, l := range lbls { lset := labels.New(l...) @@ -175,6 +187,18 @@ func TestCommit(t *testing.T) { } } + lbls = labelsForTest(t.Name()+"custom_bucket_float_histogram", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + customBucketFloatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms) + + for i := 0; i < numHistograms; i++ { + _, err := app.AppendHistogram(0, lset, int64(i), nil, customBucketFloatHistograms[i]) + require.NoError(t, err) + } + } + require.NoError(t, app.Commit()) require.NoError(t, s.Close()) @@ -230,11 +254,11 @@ func TestCommit(t *testing.T) { } // Check that the WAL contained the same number of committed series/samples/exemplars. - require.Equal(t, numSeries*3, walSeriesCount, "unexpected number of series") + require.Equal(t, numSeries*5, walSeriesCount, "unexpected number of series") require.Equal(t, numSeries*numDatapoints, walSamplesCount, "unexpected number of samples") require.Equal(t, numSeries*numDatapoints, walExemplarsCount, "unexpected number of exemplars") - require.Equal(t, numSeries*numHistograms, walHistogramCount, "unexpected number of histograms") - require.Equal(t, numSeries*numHistograms, walFloatHistogramCount, "unexpected number of float histograms") + require.Equal(t, numSeries*numHistograms*2, walHistogramCount, "unexpected number of histograms") + require.Equal(t, numSeries*numHistograms*2, walFloatHistogramCount, "unexpected number of float histograms") } func TestRollback(t *testing.T) { diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 3701a57135..78b256fee3 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -942,45 +942,30 @@ func (a *headAppender) log() error { } } if len(a.histograms) > 0 { - rec1, rec2 := enc.HistogramSamples(a.histograms, buf) - //rec = append(rec1, rec2...) - // - //buf = rec[:0] - // - //if err := a.head.wal.Log(rec); err != nil { - // return fmt.Errorf("log samples: %w", err) - //} - if len(rec1) != 0 { - buf = rec1[:0] - if err := a.head.wal.Log(rec1); err != nil { - return fmt.Errorf("log histograms: %w", err) - } + rec, customBucketsExist := enc.HistogramSamples(a.histograms, buf) + buf = rec[:0] + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log histograms: %w", err) } - if len(rec2) != 0 { - buf = rec2[:0] - if err := a.head.wal.Log(rec2); err != nil { + + if customBucketsExist { + enc.CustomBucketHistogramSamples(a.histograms, buf) + buf = rec[:0] + if err := a.head.wal.Log(rec); err != nil { return fmt.Errorf("log custom bucket histograms: %w", err) } } } if len(a.floatHistograms) > 0 { - rec1, rec2 := enc.FloatHistogramSamples(a.floatHistograms, buf) - //rec = append(rec1, rec2...) - // - //buf = rec[:0] - // - //if err := a.head.wal.Log(rec); err != nil { - // return fmt.Errorf("log samples: %w", err) - //} - if len(rec1) != 0 { - buf = rec1[:0] - if err := a.head.wal.Log(rec1); err != nil { - return fmt.Errorf("log float histograms: %w", err) - } + rec, customBucketsExist := enc.FloatHistogramSamples(a.floatHistograms, buf) + buf = rec[:0] + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log float histograms: %w", err) } - if len(rec2) != 0 { - buf = rec2[:0] - if err := a.head.wal.Log(rec2); err != nil { + + if customBucketsExist { + buf = rec[:0] + if err := a.head.wal.Log(rec); err != nil { return fmt.Errorf("log custom bucket float histograms: %w", err) } } diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 15f5053d53..2b19cdbb6f 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -193,10 +193,6 @@ type RefFloatHistogramSample struct { FH *histogram.FloatHistogram } -type RefCustomBucketHistogramSample struct { - RefHistogramSample -} - // RefMmapMarker marks that the all the samples of the given series until now have been m-mapped to disk. type RefMmapMarker struct { Ref chunks.HeadSeriesRef @@ -748,15 +744,12 @@ func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte { return buf.Get() } -func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([]byte, []byte) { +func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([]byte, bool) { buf := encoding.Encbuf{B: b} buf.PutByte(byte(HistogramSamples)) - customBucketHistBuf := encoding.Encbuf{B: b} - customBucketHistBuf.PutByte(byte(CustomBucketHistogramSamples)) - if len(histograms) == 0 { - return buf.Get(), customBucketHistBuf.Get() + return buf.Get(), false } // Store base timestamp and base reference number of first histogram. @@ -765,34 +758,46 @@ func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([ buf.PutBE64(uint64(first.Ref)) buf.PutBE64int64(first.T) - customBucketHistBuf.PutBE64(uint64(first.Ref)) - customBucketHistBuf.PutBE64int64(first.T) - - histsAdded := 0 - customBucketHistsAdded := 0 + customBucketSamplesExist := false for _, h := range histograms { if h.H.UsesCustomBuckets() { - customBucketHistBuf.PutVarint64(int64(h.Ref) - int64(first.Ref)) - customBucketHistBuf.PutVarint64(h.T - first.T) + customBucketSamplesExist = true + continue + } - EncodeHistogram(&customBucketHistBuf, h.H) - customBucketHistsAdded++ - } else { + buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + buf.PutVarint64(h.T - first.T) + + EncodeHistogram(&buf, h.H) + } + + return buf.Get(), customBucketSamplesExist +} + +func (e *Encoder) CustomBucketHistogramSamples(histograms []RefHistogramSample, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(CustomBucketHistogramSamples)) + + if len(histograms) == 0 { + return buf.Get() + } + + // Store base timestamp and base reference number of first histogram. + // All histograms encode their timestamp and ref as delta to those. + first := histograms[0] + buf.PutBE64(uint64(first.Ref)) + buf.PutBE64int64(first.T) + + for _, h := range histograms { + if h.H.UsesCustomBuckets() { buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) buf.PutVarint64(h.T - first.T) EncodeHistogram(&buf, h.H) - histsAdded++ } } - if customBucketHistsAdded == 0 { - customBucketHistBuf.Reset() - } else if histsAdded == 0 { - buf.Reset() - } - - return buf.Get(), customBucketHistBuf.Get() + return buf.Get() } // EncodeHistogram encodes a Histogram into a byte slice. @@ -836,15 +841,12 @@ func EncodeHistogram(buf *encoding.Encbuf, h *histogram.Histogram) { } } -func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) ([]byte, []byte) { +func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) ([]byte, bool) { buf := encoding.Encbuf{B: b} buf.PutByte(byte(FloatHistogramSamples)) - customBucketHistBuf := encoding.Encbuf{B: b} - customBucketHistBuf.PutByte(byte(CustomBucketFloatHistogramSamples)) - if len(histograms) == 0 { - return buf.Get(), customBucketHistBuf.Get() + return buf.Get(), false } // Store base timestamp and base reference number of first histogram. @@ -853,34 +855,46 @@ func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b buf.PutBE64(uint64(first.Ref)) buf.PutBE64int64(first.T) - customBucketHistBuf.PutBE64(uint64(first.Ref)) - customBucketHistBuf.PutBE64int64(first.T) - - histsAdded := 0 - customBucketHistsAdded := 0 + customBucketsExist := false for _, h := range histograms { if h.FH.UsesCustomBuckets() { - customBucketHistBuf.PutVarint64(int64(h.Ref) - int64(first.Ref)) - customBucketHistBuf.PutVarint64(h.T - first.T) + customBucketsExist = true + continue + } - EncodeFloatHistogram(&customBucketHistBuf, h.FH) - customBucketHistsAdded++ - } else { + buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + buf.PutVarint64(h.T - first.T) + + EncodeFloatHistogram(&buf, h.FH) + } + + return buf.Get(), customBucketsExist +} + +func (e *Encoder) CustomBucketFloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(CustomBucketFloatHistogramSamples)) + + if len(histograms) == 0 { + return buf.Get() + } + + // Store base timestamp and base reference number of first histogram. + // All histograms encode their timestamp and ref as delta to those. + first := histograms[0] + buf.PutBE64(uint64(first.Ref)) + buf.PutBE64int64(first.T) + + for _, h := range histograms { + if h.FH.UsesCustomBuckets() { buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) buf.PutVarint64(h.T - first.T) EncodeFloatHistogram(&buf, h.FH) - histsAdded++ } } - if customBucketHistsAdded == 0 { - customBucketHistBuf.Reset() - } else if histsAdded == 0 { - buf.Reset() - } - - return buf.Get(), customBucketHistBuf.Get() + return buf.Get() } // EncodeFloatHistogram encodes the Float Histogram into a byte slice. diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index 67c7eab970..af94f2b207 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -166,12 +166,12 @@ func TestRecord_EncodeDecode(t *testing.T) { }, } - histSamples, customBucketHistSamples := enc.HistogramSamples(histograms, nil) + histSamples, _ := enc.HistogramSamples(histograms, nil) + customBucketHistSamples := enc.CustomBucketHistogramSamples(histograms, nil) decHistograms, err := dec.HistogramSamples(histSamples, nil) require.NoError(t, err) - decCustomBucketHistograms, err := dec.HistogramSamples(customBucketHistSamples, nil) - require.NoError(t, err) - decHistograms = append(decHistograms, decCustomBucketHistograms...) + decCustomBucketHistSamples, err := dec.HistogramSamples(customBucketHistSamples, nil) + decHistograms = append(decHistograms, decCustomBucketHistSamples...) require.Equal(t, histograms, decHistograms) floatHistograms := make([]RefFloatHistogramSample, len(histograms)) @@ -182,10 +182,12 @@ func TestRecord_EncodeDecode(t *testing.T) { FH: h.H.ToFloat(nil), } } - histSamples, customBucketFloatHistSamples := enc.FloatHistogramSamples(floatHistograms, nil) - decFloatHistograms, err := dec.FloatHistogramSamples(histSamples, nil) + floatHistSamples, _ := enc.FloatHistogramSamples(floatHistograms, nil) + customBucketFloatHistSamples := enc.CustomBucketFloatHistogramSamples(floatHistograms, nil) + decFloatHistograms, err := dec.FloatHistogramSamples(floatHistSamples, nil) require.NoError(t, err) decCustomBucketFloatHistograms, err := dec.FloatHistogramSamples(customBucketFloatHistSamples, nil) + require.NoError(t, err) decFloatHistograms = append(decFloatHistograms, decCustomBucketFloatHistograms...) require.Equal(t, floatHistograms, decFloatHistograms) @@ -194,7 +196,8 @@ func TestRecord_EncodeDecode(t *testing.T) { histograms[i].H.CounterResetHint = histogram.GaugeType } - gaugeHistSamples, customBucketGaugeHistSamples := enc.HistogramSamples(histograms, nil) + gaugeHistSamples, _ := enc.HistogramSamples(histograms, nil) + customBucketGaugeHistSamples := enc.CustomBucketHistogramSamples(histograms, nil) decGaugeHistograms, err := dec.HistogramSamples(gaugeHistSamples, nil) require.NoError(t, err) decCustomBucketGaugeHistograms, err := dec.HistogramSamples(customBucketGaugeHistSamples, nil) @@ -207,10 +210,12 @@ func TestRecord_EncodeDecode(t *testing.T) { floatHistograms[i].FH.CounterResetHint = histogram.GaugeType } - gaugeHistSamples, customBucketGaugeFloatHistSamples := enc.FloatHistogramSamples(floatHistograms, nil) - decGaugeFloatHistograms, err := dec.FloatHistogramSamples(gaugeHistSamples, nil) + gaugeFloatHistSamples, _ := enc.FloatHistogramSamples(floatHistograms, nil) + customBucketGaugeFloatHistSamples := enc.CustomBucketFloatHistogramSamples(floatHistograms, nil) + decGaugeFloatHistograms, err := dec.FloatHistogramSamples(gaugeFloatHistSamples, nil) require.NoError(t, err) decCustomBucketGaugeFloatHistograms, err := dec.FloatHistogramSamples(customBucketGaugeFloatHistSamples, nil) + require.NoError(t, err) decFloatHistograms = append(decGaugeFloatHistograms, decCustomBucketGaugeFloatHistograms...) require.Equal(t, floatHistograms, decFloatHistograms) } @@ -295,10 +300,27 @@ func TestRecord_Corrupted(t *testing.T) { PositiveBuckets: []int64{1, 1, -1, 0}, }, }, + { + Ref: 67, + T: 5678, + H: &histogram.Histogram{ + Count: 8, + ZeroThreshold: 0.001, + Sum: 35.5, + Schema: -53, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 2, Length: 2}, + }, + PositiveBuckets: []int64{2, -1, 2, 0}, + CustomValues: []float64{0, 2, 4, 6, 8}, + }, + }, } - corruptedHists, corruptedCustomBucketHists := enc.HistogramSamples(histograms, nil) + corruptedHists, _ := enc.HistogramSamples(histograms, nil) corruptedHists = corruptedHists[:8] + corruptedCustomBucketHists := enc.CustomBucketHistogramSamples(histograms, nil) corruptedCustomBucketHists = corruptedCustomBucketHists[:8] _, err := dec.HistogramSamples(corruptedHists, nil) require.ErrorIs(t, err, encoding.ErrInvalidSize) @@ -361,9 +383,10 @@ func TestRecord_Type(t *testing.T) { }, }, } - hists, customBucketHists := enc.HistogramSamples(histograms, nil) + hists, _ := enc.HistogramSamples(histograms, nil) recordType = dec.Type(hists) require.Equal(t, HistogramSamples, recordType) + customBucketHists := enc.CustomBucketHistogramSamples(histograms, nil) recordType = dec.Type(customBucketHists) require.Equal(t, CustomBucketHistogramSamples, recordType) diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index cd82676da9..5bb79595b9 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -238,7 +238,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } } if len(repl) > 0 { - _, buf = enc.HistogramSamples(repl, buf) + buf = enc.CustomBucketHistogramSamples(repl, buf) } stats.TotalSamples += len(histogramSamples) stats.DroppedSamples += len(histogramSamples) - len(repl) @@ -272,7 +272,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } } if len(repl) > 0 { - _, buf = enc.FloatHistogramSamples(repl, buf) + buf = enc.CustomBucketFloatHistogramSamples(repl, buf) } stats.TotalSamples += len(floatHistogramSamples) stats.DroppedSamples += len(floatHistogramSamples) - len(repl) diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go index a5692a9aa4..f947f28095 100644 --- a/tsdb/wlog/checkpoint_test.go +++ b/tsdb/wlog/checkpoint_test.go @@ -127,6 +127,20 @@ func TestCheckpoint(t *testing.T) { PositiveBuckets: []int64{int64(i + 1), 1, -1, 0}, } } + makeCustomBucketHistogram := func(i int) *histogram.Histogram { + return &histogram.Histogram{ + Count: 5 + uint64(i*4), + ZeroCount: 2 + uint64(i), + ZeroThreshold: 0.001, + Sum: 18.4 * float64(i+1), + Schema: -53, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + CustomValues: []float64{0, 1, 2, 3, 4}, + } + } makeFloatHistogram := func(i int) *histogram.FloatHistogram { return &histogram.FloatHistogram{ Count: 5 + float64(i*4), @@ -141,6 +155,20 @@ func TestCheckpoint(t *testing.T) { PositiveBuckets: []float64{float64(i + 1), 1, -1, 0}, } } + makeCustomBucketFloatHistogram := func(i int) *histogram.FloatHistogram { + return &histogram.FloatHistogram{ + Count: 5 + float64(i*4), + ZeroCount: 2 + float64(i), + ZeroThreshold: 0.001, + Sum: 18.4 * float64(i+1), + Schema: -53, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + CustomValues: []float64{0, 1, 2, 3, 4}, + } + } for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { @@ -208,24 +236,40 @@ func TestCheckpoint(t *testing.T) { require.NoError(t, w.Log(b)) samplesInWAL += 4 h := makeHistogram(i) - b1, b2 := enc.HistogramSamples([]record.RefHistogramSample{ + b, _ = enc.HistogramSamples([]record.RefHistogramSample{ {Ref: 0, T: last, H: h}, {Ref: 1, T: last + 10000, H: h}, {Ref: 2, T: last + 20000, H: h}, {Ref: 3, T: last + 30000, H: h}, }, nil) - require.NoError(t, w.Log(b1)) - require.NoError(t, w.Log(b2)) + require.NoError(t, w.Log(b)) + histogramsInWAL += 4 + cbh := makeCustomBucketHistogram(i) + b = enc.CustomBucketHistogramSamples([]record.RefHistogramSample{ + {Ref: 0, T: last, H: cbh}, + {Ref: 1, T: last + 10000, H: cbh}, + {Ref: 2, T: last + 20000, H: cbh}, + {Ref: 3, T: last + 30000, H: cbh}, + }, nil) + require.NoError(t, w.Log(b)) histogramsInWAL += 4 fh := makeFloatHistogram(i) - b1, b2 = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{ + b, _ = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{ {Ref: 0, T: last, FH: fh}, {Ref: 1, T: last + 10000, FH: fh}, {Ref: 2, T: last + 20000, FH: fh}, {Ref: 3, T: last + 30000, FH: fh}, }, nil) - require.NoError(t, w.Log(b1)) - require.NoError(t, w.Log(b2)) + require.NoError(t, w.Log(b)) + floatHistogramsInWAL += 4 + cbfh := makeCustomBucketFloatHistogram(i) + b = enc.CustomBucketFloatHistogramSamples([]record.RefFloatHistogramSample{ + {Ref: 0, T: last, FH: cbfh}, + {Ref: 1, T: last + 10000, FH: cbfh}, + {Ref: 2, T: last + 20000, FH: cbfh}, + {Ref: 3, T: last + 30000, FH: cbfh}, + }, nil) + require.NoError(t, w.Log(b)) floatHistogramsInWAL += 4 b = enc.Exemplars([]record.RefExemplar{ @@ -286,14 +330,14 @@ func TestCheckpoint(t *testing.T) { require.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp") } samplesInCheckpoint += len(samples) - case record.HistogramSamples: + case record.HistogramSamples, record.CustomBucketHistogramSamples: histograms, err := dec.HistogramSamples(rec, nil) require.NoError(t, err) for _, h := range histograms { require.GreaterOrEqual(t, h.T, last/2, "histogram with wrong timestamp") } histogramsInCheckpoint += len(histograms) - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.CustomBucketFloatHistogramSamples: floatHistograms, err := dec.FloatHistogramSamples(rec, nil) require.NoError(t, err) for _, h := range floatHistograms { diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index 89db5d2dd7..169bd296fe 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -546,7 +546,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { } w.writer.AppendExemplars(exemplars) - case record.HistogramSamples: + case record.HistogramSamples, record.CustomBucketHistogramSamples: // Skip if experimental "histograms over remote write" is not enabled. if !w.sendHistograms { break @@ -574,7 +574,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { histogramsToSend = histogramsToSend[:0] } - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.CustomBucketFloatHistogramSamples: // Skip if experimental "histograms over remote write" is not enabled. if !w.sendHistograms { break diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index c2499a7cec..5ff70bb215 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -209,21 +209,43 @@ func TestTailSamples(t *testing.T) { NegativeBuckets: []int64{int64(-i) - 1}, } - histogram, customBucketHistogram := enc.HistogramSamples([]record.RefHistogramSample{{ + histograms, _ := enc.HistogramSamples([]record.RefHistogramSample{{ Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, H: hist, }}, nil) - require.NoError(t, w.Log(histogram)) - require.NoError(t, w.Log(customBucketHistogram)) + require.NoError(t, w.Log(histograms)) - floatHistogram, floatCustomBucketHistogram := enc.FloatHistogramSamples([]record.RefFloatHistogramSample{{ + customBucketHist := &histogram.Histogram{ + Schema: -53, + ZeroThreshold: 1e-128, + ZeroCount: 0, + Count: 2, + Sum: 0, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + CustomValues: []float64{float64(i) + 2}, + } + + customBucketHistograms := enc.CustomBucketHistogramSamples([]record.RefHistogramSample{{ + Ref: chunks.HeadSeriesRef(inner), + T: now.UnixNano() + 1, + H: customBucketHist, + }}, nil) + require.NoError(t, w.Log(customBucketHistograms)) + + floatHistograms, _ := enc.FloatHistogramSamples([]record.RefFloatHistogramSample{{ Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, FH: hist.ToFloat(nil), }}, nil) - require.NoError(t, w.Log(floatHistogram)) - require.NoError(t, w.Log(floatCustomBucketHistogram)) + require.NoError(t, w.Log(floatHistograms)) + + customBucketFloatHistograms := enc.CustomBucketFloatHistogramSamples([]record.RefFloatHistogramSample{{ + Ref: chunks.HeadSeriesRef(inner), + T: now.UnixNano() + 1, + FH: customBucketHist.ToFloat(nil), + }}, nil) + require.NoError(t, w.Log(customBucketFloatHistograms)) } } @@ -250,7 +272,7 @@ func TestTailSamples(t *testing.T) { expectedSeries := seriesCount expectedSamples := seriesCount * samplesCount expectedExemplars := seriesCount * exemplarsCount - expectedHistograms := seriesCount * histogramsCount + expectedHistograms := seriesCount * histogramsCount * 2 retry(t, defaultRetryInterval, defaultRetries, func() bool { return wt.checkNumSeries() >= expectedSeries }) From 6684344026c9395df1db1f92114009cac50803f5 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Thu, 21 Nov 2024 10:50:18 -0800 Subject: [PATCH 1036/1397] Rename old histogram record type, use old names for new records --- tsdb/agent/db.go | 24 +-- tsdb/agent/db_test.go | 66 +++++++- tsdb/db_test.go | 186 +++++++++++++++++++- tsdb/head_append.go | 19 +-- tsdb/head_test.go | 317 +---------------------------------- tsdb/head_wal.go | 8 +- tsdb/ooo_head_read_test.go | 2 +- tsdb/record/record.go | 112 +++---------- tsdb/record/record_test.go | 36 +--- tsdb/tsdbutil/histogram.go | 1 - tsdb/wlog/checkpoint.go | 42 +---- tsdb/wlog/checkpoint_test.go | 12 +- tsdb/wlog/watcher.go | 4 +- tsdb/wlog/watcher_test.go | 8 +- 14 files changed, 302 insertions(+), 535 deletions(-) diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index bcfc7be129..5cf56d5871 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -463,7 +463,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H return } decoded <- samples - case record.HistogramSamples, record.CustomBucketHistogramSamples: + case record.HistogramSamples, record.HistogramSamplesLegacy: histograms := histogramsPool.Get()[:0] histograms, err = dec.HistogramSamples(rec, histograms) if err != nil { @@ -475,7 +475,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H return } decoded <- histograms - case record.FloatHistogramSamples, record.CustomBucketFloatHistogramSamples: + case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: floatHistograms := floatHistogramsPool.Get()[:0] floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms) if err != nil { @@ -1154,35 +1154,19 @@ func (a *appender) log() error { } if len(a.pendingHistograms) > 0 { - var customBucketsExist bool - buf, customBucketsExist = encoder.HistogramSamples(a.pendingHistograms, buf) + buf = encoder.HistogramSamples(a.pendingHistograms, buf) if err := a.wal.Log(buf); err != nil { return err } buf = buf[:0] - if customBucketsExist { - buf = encoder.CustomBucketHistogramSamples(a.pendingHistograms, buf) - if err := a.wal.Log(buf); err != nil { - return err - } - buf = buf[:0] - } } if len(a.pendingFloatHistograms) > 0 { - var customBucketsExist bool - buf, customBucketsExist = encoder.FloatHistogramSamples(a.pendingFloatHistograms, buf) + buf = encoder.FloatHistogramSamples(a.pendingFloatHistograms, buf) if err := a.wal.Log(buf); err != nil { return err } buf = buf[:0] - if customBucketsExist { - buf = encoder.CustomBucketFloatHistogramSamples(a.pendingFloatHistograms, buf) - if err := a.wal.Log(buf); err != nil { - return err - } - buf = buf[:0] - } } if len(a.pendingExamplars) > 0 { diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index 6b5d9ece05..8bcb71c86a 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -163,7 +163,7 @@ func TestCommit(t *testing.T) { } } - lbls = labelsForTest(t.Name()+"_custom_bucket_histogram", numSeries) + lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries) for _, l := range lbls { lset := labels.New(l...) @@ -187,7 +187,7 @@ func TestCommit(t *testing.T) { } } - lbls = labelsForTest(t.Name()+"custom_bucket_float_histogram", numSeries) + lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries) for _, l := range lbls { lset := labels.New(l...) @@ -231,13 +231,13 @@ func TestCommit(t *testing.T) { require.NoError(t, err) walSamplesCount += len(samples) - case record.HistogramSamples, record.CustomBucketHistogramSamples: + case record.HistogramSamples, record.HistogramSamplesLegacy: var histograms []record.RefHistogramSample histograms, err = dec.HistogramSamples(rec, histograms) require.NoError(t, err) walHistogramCount += len(histograms) - case record.FloatHistogramSamples, record.CustomBucketFloatHistogramSamples: + case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: var floatHistograms []record.RefFloatHistogramSample floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms) require.NoError(t, err) @@ -294,6 +294,18 @@ func TestRollback(t *testing.T) { } } + lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms) + + for i := 0; i < numHistograms; i++ { + _, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil) + require.NoError(t, err) + } + } + lbls = labelsForTest(t.Name()+"_float_histogram", numSeries) for _, l := range lbls { lset := labels.New(l...) @@ -306,6 +318,18 @@ func TestRollback(t *testing.T) { } } + lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms) + + for i := 0; i < numHistograms; i++ { + _, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i]) + require.NoError(t, err) + } + } + // Do a rollback, which should clear uncommitted data. A followup call to // commit should persist nothing to the WAL. require.NoError(t, app.Rollback()) @@ -346,13 +370,13 @@ func TestRollback(t *testing.T) { require.NoError(t, err) walExemplarsCount += len(exemplars) - case record.HistogramSamples: + case record.HistogramSamples, record.HistogramSamplesLegacy: var histograms []record.RefHistogramSample histograms, err = dec.HistogramSamples(rec, histograms) require.NoError(t, err) walHistogramCount += len(histograms) - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: var floatHistograms []record.RefFloatHistogramSample floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms) require.NoError(t, err) @@ -363,7 +387,7 @@ func TestRollback(t *testing.T) { } // Check that only series get stored after calling Rollback. - require.Equal(t, numSeries*3, walSeriesCount, "series should have been written to WAL") + require.Equal(t, numSeries*5, walSeriesCount, "series should have been written to WAL") require.Equal(t, 0, walSamplesCount, "samples should not have been written to WAL") require.Equal(t, 0, walExemplarsCount, "exemplars should not have been written to WAL") require.Equal(t, 0, walHistogramCount, "histograms should not have been written to WAL") @@ -412,6 +436,19 @@ func TestFullTruncateWAL(t *testing.T) { require.NoError(t, app.Commit()) } + lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms) + + for i := 0; i < numHistograms; i++ { + _, err := app.AppendHistogram(0, lset, int64(lastTs), histograms[i], nil) + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + } + lbls = labelsForTest(t.Name()+"_float_histogram", numSeries) for _, l := range lbls { lset := labels.New(l...) @@ -425,11 +462,24 @@ func TestFullTruncateWAL(t *testing.T) { require.NoError(t, app.Commit()) } + lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms) + + for i := 0; i < numHistograms; i++ { + _, err := app.AppendHistogram(0, lset, int64(lastTs), nil, floatHistograms[i]) + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + } + // Truncate WAL with mint to GC all the samples. s.truncate(lastTs + 1) m := gatherFamily(t, reg, "prometheus_agent_deleted_series") - require.Equal(t, float64(numSeries*3), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count") + require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count") } func TestPartialTruncateWAL(t *testing.T) { diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 306dc4579e..4bbf4b4656 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -4281,6 +4281,188 @@ func TestOOOWALWrite(t *testing.T) { }, }, }, + "custom buckets histogram": { + appendSample: func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) { + seriesRef, err := app.AppendHistogram(0, l, minutes(mins), tsdbutil.GenerateTestCustomBucketsHistogram(int(mins)), nil) + require.NoError(t, err) + return seriesRef, nil + }, + expectedOOORecords: []interface{}{ + // The MmapRef in this are not hand calculated, and instead taken from the test run. + // What is important here is the order of records, and that MmapRef increases for each record. + []record.RefMmapMarker{ + {Ref: 1}, + }, + []record.RefHistogramSample{ + {Ref: 1, T: minutes(40), H: tsdbutil.GenerateTestCustomBucketsHistogram(40)}, + }, + + []record.RefMmapMarker{ + {Ref: 2}, + }, + []record.RefHistogramSample{ + {Ref: 2, T: minutes(42), H: tsdbutil.GenerateTestCustomBucketsHistogram(42)}, + }, + + []record.RefHistogramSample{ + {Ref: 2, T: minutes(45), H: tsdbutil.GenerateTestCustomBucketsHistogram(45)}, + {Ref: 1, T: minutes(35), H: tsdbutil.GenerateTestCustomBucketsHistogram(35)}, + }, + []record.RefMmapMarker{ // 3rd sample, hence m-mapped. + {Ref: 1, MmapRef: 0x100000000 + 8}, + }, + []record.RefHistogramSample{ + {Ref: 1, T: minutes(36), H: tsdbutil.GenerateTestCustomBucketsHistogram(36)}, + {Ref: 1, T: minutes(37), H: tsdbutil.GenerateTestCustomBucketsHistogram(37)}, + }, + + []record.RefMmapMarker{ // 3rd sample, hence m-mapped. + {Ref: 1, MmapRef: 0x100000000 + 82}, + }, + []record.RefHistogramSample{ // Does not contain the in-order sample here. + {Ref: 1, T: minutes(50), H: tsdbutil.GenerateTestCustomBucketsHistogram(50)}, + }, + + // Single commit but multiple OOO records. + []record.RefMmapMarker{ + {Ref: 2, MmapRef: 0x100000000 + 160}, + }, + []record.RefHistogramSample{ + {Ref: 2, T: minutes(50), H: tsdbutil.GenerateTestCustomBucketsHistogram(50)}, + {Ref: 2, T: minutes(51), H: tsdbutil.GenerateTestCustomBucketsHistogram(51)}, + }, + []record.RefMmapMarker{ + {Ref: 2, MmapRef: 0x100000000 + 239}, + }, + []record.RefHistogramSample{ + {Ref: 2, T: minutes(52), H: tsdbutil.GenerateTestCustomBucketsHistogram(52)}, + {Ref: 2, T: minutes(53), H: tsdbutil.GenerateTestCustomBucketsHistogram(53)}, + }, + }, + expectedInORecords: []interface{}{ + []record.RefSeries{ + {Ref: 1, Labels: s1}, + {Ref: 2, Labels: s2}, + }, + []record.RefHistogramSample{ + {Ref: 1, T: minutes(60), H: tsdbutil.GenerateTestCustomBucketsHistogram(60)}, + {Ref: 2, T: minutes(60), H: tsdbutil.GenerateTestCustomBucketsHistogram(60)}, + }, + []record.RefHistogramSample{ + {Ref: 1, T: minutes(40), H: tsdbutil.GenerateTestCustomBucketsHistogram(40)}, + }, + []record.RefHistogramSample{ + {Ref: 2, T: minutes(42), H: tsdbutil.GenerateTestCustomBucketsHistogram(42)}, + }, + []record.RefHistogramSample{ + {Ref: 2, T: minutes(45), H: tsdbutil.GenerateTestCustomBucketsHistogram(45)}, + {Ref: 1, T: minutes(35), H: tsdbutil.GenerateTestCustomBucketsHistogram(35)}, + {Ref: 1, T: minutes(36), H: tsdbutil.GenerateTestCustomBucketsHistogram(36)}, + {Ref: 1, T: minutes(37), H: tsdbutil.GenerateTestCustomBucketsHistogram(37)}, + }, + []record.RefHistogramSample{ // Contains both in-order and ooo sample. + {Ref: 1, T: minutes(50), H: tsdbutil.GenerateTestCustomBucketsHistogram(50)}, + {Ref: 2, T: minutes(65), H: tsdbutil.GenerateTestCustomBucketsHistogram(65)}, + }, + []record.RefHistogramSample{ + {Ref: 2, T: minutes(50), H: tsdbutil.GenerateTestCustomBucketsHistogram(50)}, + {Ref: 2, T: minutes(51), H: tsdbutil.GenerateTestCustomBucketsHistogram(51)}, + {Ref: 2, T: minutes(52), H: tsdbutil.GenerateTestCustomBucketsHistogram(52)}, + {Ref: 2, T: minutes(53), H: tsdbutil.GenerateTestCustomBucketsHistogram(53)}, + }, + }, + }, + "custom buckets float histogram": { + appendSample: func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) { + seriesRef, err := app.AppendHistogram(0, l, minutes(mins), nil, tsdbutil.GenerateTestCustomBucketsFloatHistogram(int(mins))) + require.NoError(t, err) + return seriesRef, nil + }, + expectedOOORecords: []interface{}{ + // The MmapRef in this are not hand calculated, and instead taken from the test run. + // What is important here is the order of records, and that MmapRef increases for each record. + []record.RefMmapMarker{ + {Ref: 1}, + }, + []record.RefFloatHistogramSample{ + {Ref: 1, T: minutes(40), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(40)}, + }, + + []record.RefMmapMarker{ + {Ref: 2}, + }, + []record.RefFloatHistogramSample{ + {Ref: 2, T: minutes(42), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(42)}, + }, + + []record.RefFloatHistogramSample{ + {Ref: 2, T: minutes(45), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(45)}, + {Ref: 1, T: minutes(35), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(35)}, + }, + []record.RefMmapMarker{ // 3rd sample, hence m-mapped. + {Ref: 1, MmapRef: 0x100000000 + 8}, + }, + []record.RefFloatHistogramSample{ + {Ref: 1, T: minutes(36), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(36)}, + {Ref: 1, T: minutes(37), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(37)}, + }, + + []record.RefMmapMarker{ // 3rd sample, hence m-mapped. + {Ref: 1, MmapRef: 0x100000000 + 134}, + }, + []record.RefFloatHistogramSample{ // Does not contain the in-order sample here. + {Ref: 1, T: minutes(50), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(50)}, + }, + + // Single commit but multiple OOO records. + []record.RefMmapMarker{ + {Ref: 2, MmapRef: 0x100000000 + 263}, + }, + []record.RefFloatHistogramSample{ + {Ref: 2, T: minutes(50), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(50)}, + {Ref: 2, T: minutes(51), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(51)}, + }, + []record.RefMmapMarker{ + {Ref: 2, MmapRef: 0x100000000 + 393}, + }, + []record.RefFloatHistogramSample{ + {Ref: 2, T: minutes(52), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(52)}, + {Ref: 2, T: minutes(53), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(53)}, + }, + }, + expectedInORecords: []interface{}{ + []record.RefSeries{ + {Ref: 1, Labels: s1}, + {Ref: 2, Labels: s2}, + }, + []record.RefFloatHistogramSample{ + {Ref: 1, T: minutes(60), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(60)}, + {Ref: 2, T: minutes(60), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(60)}, + }, + []record.RefFloatHistogramSample{ + {Ref: 1, T: minutes(40), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(40)}, + }, + []record.RefFloatHistogramSample{ + {Ref: 2, T: minutes(42), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(42)}, + }, + []record.RefFloatHistogramSample{ + {Ref: 2, T: minutes(45), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(45)}, + {Ref: 1, T: minutes(35), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(35)}, + {Ref: 1, T: minutes(36), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(36)}, + {Ref: 1, T: minutes(37), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(37)}, + }, + []record.RefFloatHistogramSample{ // Contains both in-order and ooo sample. + {Ref: 1, T: minutes(50), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(50)}, + {Ref: 2, T: minutes(65), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(65)}, + }, + []record.RefFloatHistogramSample{ + {Ref: 2, T: minutes(50), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(50)}, + {Ref: 2, T: minutes(51), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(51)}, + {Ref: 2, T: minutes(52), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(52)}, + {Ref: 2, T: minutes(53), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(53)}, + }, + }, + }, } for name, scenario := range scenarios { t.Run(name, func(t *testing.T) { @@ -4374,11 +4556,11 @@ func testOOOWALWrite(t *testing.T, markers, err := dec.MmapMarkers(rec, nil) require.NoError(t, err) records = append(records, markers) - case record.HistogramSamples: + case record.HistogramSamples, record.HistogramSamplesLegacy: histogramSamples, err := dec.HistogramSamples(rec, nil) require.NoError(t, err) records = append(records, histogramSamples) - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: floatHistogramSamples, err := dec.FloatHistogramSamples(rec, nil) require.NoError(t, err) records = append(records, floatHistogramSamples) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 78b256fee3..7dacb9037b 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -942,33 +942,18 @@ func (a *headAppender) log() error { } } if len(a.histograms) > 0 { - rec, customBucketsExist := enc.HistogramSamples(a.histograms, buf) + rec = enc.HistogramSamples(a.histograms, buf) buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { return fmt.Errorf("log histograms: %w", err) } - - if customBucketsExist { - enc.CustomBucketHistogramSamples(a.histograms, buf) - buf = rec[:0] - if err := a.head.wal.Log(rec); err != nil { - return fmt.Errorf("log custom bucket histograms: %w", err) - } - } } if len(a.floatHistograms) > 0 { - rec, customBucketsExist := enc.FloatHistogramSamples(a.floatHistograms, buf) + rec = enc.FloatHistogramSamples(a.floatHistograms, buf) buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { return fmt.Errorf("log float histograms: %w", err) } - - if customBucketsExist { - buf = rec[:0] - if err := a.head.wal.Log(rec); err != nil { - return fmt.Errorf("log custom bucket float histograms: %w", err) - } - } } // Exemplars should be logged after samples (float/native histogram/etc), // otherwise it might happen that we send the exemplars in a remote write diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 527476e113..c3377fecff 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -187,11 +187,11 @@ func readTestWAL(t testing.TB, dir string) (recs []interface{}) { samples, err := dec.Samples(rec, nil) require.NoError(t, err) recs = append(recs, samples) - case record.HistogramSamples: + case record.HistogramSamples, record.HistogramSamplesLegacy: samples, err := dec.HistogramSamples(rec, nil) require.NoError(t, err) recs = append(recs, samples) - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: samples, err := dec.FloatHistogramSamples(rec, nil) require.NoError(t, err) recs = append(recs, samples) @@ -740,89 +740,6 @@ func TestHead_ReadWAL(t *testing.T) { } } -func TestHead_ReadWAL2(t *testing.T) { - for _, compress := range []wlog.CompressionType{wlog.CompressionNone, wlog.CompressionSnappy, wlog.CompressionZstd} { - t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { - entries := []interface{}{ - []record.RefSeries{ - {Ref: 10, Labels: labels.FromStrings("a", "1")}, - {Ref: 11, Labels: labels.FromStrings("a", "2")}, - {Ref: 100, Labels: labels.FromStrings("a", "3")}, - }, - []record.RefHistogramSample{ - {Ref: 0, T: 99, H: tsdbutil.GenerateTestHistogram(1)}, - {Ref: 10, T: 100, H: tsdbutil.GenerateTestCustomBucketsHistogram(2)}, - {Ref: 100, T: 100, H: tsdbutil.GenerateTestHistogram(3)}, - }, - []record.RefSeries{ - {Ref: 50, Labels: labels.FromStrings("a", "4")}, - // This series has two refs pointing to it. - {Ref: 101, Labels: labels.FromStrings("a", "3")}, - }, - []record.RefHistogramSample{ - {Ref: 10, T: 101, H: tsdbutil.GenerateTestHistogram(5)}, - {Ref: 50, T: 101, H: tsdbutil.GenerateTestHistogram(6)}, - {Ref: 101, T: 101, H: tsdbutil.GenerateTestCustomBucketsHistogram(7)}, - }, - []tombstones.Stone{ - {Ref: 0, Intervals: []tombstones.Interval{{Mint: 99, Maxt: 101}}}, - }, - []record.RefExemplar{ - {Ref: 10, T: 100, V: 1, Labels: labels.FromStrings("trace_id", "asdf")}, - }, - } - - head, w := newTestHead(t, 1000, compress, false) - defer func() { - require.NoError(t, head.Close()) - }() - - populateTestWL(t, w, entries) - - require.NoError(t, head.Init(math.MinInt64)) - require.Equal(t, uint64(101), head.lastSeriesID.Load()) - - s10 := head.series.getByID(10) - s11 := head.series.getByID(11) - s50 := head.series.getByID(50) - s100 := head.series.getByID(100) - - testutil.RequireEqual(t, labels.FromStrings("a", "1"), s10.lset) - require.Nil(t, s11) // Series without samples should be garbage collected at head.Init(). - testutil.RequireEqual(t, labels.FromStrings("a", "4"), s50.lset) - testutil.RequireEqual(t, labels.FromStrings("a", "3"), s100.lset) - - expandChunk := func(c chunkenc.Iterator) (x []sample) { - for c.Next() == chunkenc.ValHistogram { - t, v := c.AtHistogram(nil) - //t, v := c.At() - x = append(x, sample{t: t, h: v}) - } - require.NoError(t, c.Err()) - return x - } - - c, _, _, err := s10.chunk(0, head.chunkDiskMapper, &head.memChunkPool) - require.NoError(t, err) - require.Equal(t, []sample{{100, 0, tsdbutil.GenerateTestCustomBucketsHistogram(2), nil}, {101, 0, tsdbutil.GenerateTestCustomBucketsHistogram(5), nil}}, expandChunk(c.chunk.Iterator(nil))) - c, _, _, err = s50.chunk(0, head.chunkDiskMapper, &head.memChunkPool) - require.NoError(t, err) - require.Equal(t, []sample{{101, 0, tsdbutil.GenerateTestHistogram(6), nil}}, expandChunk(c.chunk.Iterator(nil))) - // The samples before the new series record should be discarded since a duplicate record - // is only possible when old samples were compacted. - c, _, _, err = s100.chunk(0, head.chunkDiskMapper, &head.memChunkPool) - require.NoError(t, err) - require.Equal(t, []sample{{101, 0, tsdbutil.GenerateTestCustomBucketsHistogram(7), nil}}, expandChunk(c.chunk.Iterator(nil))) - - q, err := head.ExemplarQuerier(context.Background()) - require.NoError(t, err) - e, err := q.Select(0, 1000, []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "1")}) - require.NoError(t, err) - require.True(t, exemplar.Exemplar{Ts: 100, Value: 1, Labels: labels.FromStrings("trace_id", "asdf")}.Equals(e[0].Exemplars[0])) - }) - } -} - func TestHead_WALMultiRef(t *testing.T) { head, w := newTestHead(t, 1000, wlog.CompressionNone, false) @@ -4036,194 +3953,6 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { testQuery() } -func TestHistogramInWALAndMmapChunk2(t *testing.T) { - head, _ := newTestHead(t, 3000, wlog.CompressionNone, false) - t.Cleanup(func() { - require.NoError(t, head.Close()) - }) - require.NoError(t, head.Init(0)) - - // Series with only histograms. - s1 := labels.FromStrings("a", "b1") - k1 := s1.String() - numHistograms := 300 - exp := map[string][]chunks.Sample{} - ts := int64(0) - var app storage.Appender - for _, custom := range []bool{true, false} { - app = head.Appender(context.Background()) - var hists []*histogram.Histogram - if custom { - hists = tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms) - } else { - hists = tsdbutil.GenerateTestHistograms(numHistograms) - } - for _, h := range hists { - if !custom { - h.NegativeSpans = h.PositiveSpans - h.NegativeBuckets = h.PositiveBuckets - } - _, err := app.AppendHistogram(0, s1, ts, h, nil) - require.NoError(t, err) - exp[k1] = append(exp[k1], sample{t: ts, h: h.Copy()}) - ts++ - if ts%5 == 0 { - require.NoError(t, app.Commit()) - app = head.Appender(context.Background()) - } - } - require.NoError(t, app.Commit()) - } - for _, custom := range []bool{true, false} { - app = head.Appender(context.Background()) - var hists []*histogram.FloatHistogram - if custom { - hists = tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms) - } else { - hists = tsdbutil.GenerateTestFloatHistograms(numHistograms) - } - for _, h := range hists { - if !custom { - h.NegativeSpans = h.PositiveSpans - h.NegativeBuckets = h.PositiveBuckets - } - _, err := app.AppendHistogram(0, s1, ts, nil, h) - require.NoError(t, err) - exp[k1] = append(exp[k1], sample{t: ts, fh: h.Copy()}) - ts++ - if ts%5 == 0 { - require.NoError(t, app.Commit()) - app = head.Appender(context.Background()) - } - } - require.NoError(t, app.Commit()) - head.mmapHeadChunks() - } - - // There should be 20 mmap chunks in s1. - ms := head.series.getByHash(s1.Hash(), s1) - require.Len(t, ms.mmappedChunks, 19) - expMmapChunks := make([]*mmappedChunk, 0, 20) - for _, mmap := range ms.mmappedChunks { - require.Positive(t, mmap.numSamples) - cpy := *mmap - expMmapChunks = append(expMmapChunks, &cpy) - } - expHeadChunkSamples := ms.headChunks.chunk.NumSamples() - require.Positive(t, expHeadChunkSamples) - - // Series with mix of histograms and float. - s2 := labels.FromStrings("a", "b2") - k2 := s2.String() - ts = 0 - for _, custom := range []bool{true, false} { - app = head.Appender(context.Background()) - var hists []*histogram.Histogram - if custom { - hists = tsdbutil.GenerateTestCustomBucketsHistograms(100) - } else { - hists = tsdbutil.GenerateTestHistograms(100) - } - for _, h := range hists { - ts++ - if !custom { - h.NegativeSpans = h.PositiveSpans - h.NegativeBuckets = h.PositiveBuckets - } - _, err := app.AppendHistogram(0, s2, ts, h, nil) - require.NoError(t, err) - eh := h.Copy() - if ts > 30 && (ts-10)%20 == 1 { - // Need "unknown" hint after float sample. - eh.CounterResetHint = histogram.UnknownCounterReset - } - exp[k2] = append(exp[k2], sample{t: ts, h: eh}) - if ts%20 == 0 { - require.NoError(t, app.Commit()) - app = head.Appender(context.Background()) - // Add some float. - for i := 0; i < 10; i++ { - ts++ - _, err := app.Append(0, s2, ts, float64(ts)) - require.NoError(t, err) - exp[k2] = append(exp[k2], sample{t: ts, f: float64(ts)}) - } - require.NoError(t, app.Commit()) - app = head.Appender(context.Background()) - } - } - require.NoError(t, app.Commit()) - } - for _, custom := range []bool{true, false} { - app = head.Appender(context.Background()) - var hists []*histogram.FloatHistogram - if custom { - hists = tsdbutil.GenerateTestCustomBucketsFloatHistograms(100) - } else { - hists = tsdbutil.GenerateTestFloatHistograms(100) - } - for _, h := range hists { - ts++ - if !custom { - h.NegativeSpans = h.PositiveSpans - h.NegativeBuckets = h.PositiveBuckets - } - _, err := app.AppendHistogram(0, s2, ts, nil, h) - require.NoError(t, err) - eh := h.Copy() - if ts > 30 && (ts-10)%20 == 1 { - // Need "unknown" hint after float sample. - eh.CounterResetHint = histogram.UnknownCounterReset - } - exp[k2] = append(exp[k2], sample{t: ts, fh: eh}) - if ts%20 == 0 { - require.NoError(t, app.Commit()) - app = head.Appender(context.Background()) - // Add some float. - for i := 0; i < 10; i++ { - ts++ - _, err := app.Append(0, s2, ts, float64(ts)) - require.NoError(t, err) - exp[k2] = append(exp[k2], sample{t: ts, f: float64(ts)}) - } - require.NoError(t, app.Commit()) - app = head.Appender(context.Background()) - } - } - require.NoError(t, app.Commit()) - } - - // Restart head. - require.NoError(t, head.Close()) - startHead := func() { - w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) - require.NoError(t, err) - head, err = NewHead(nil, nil, w, nil, head.opts, nil) - require.NoError(t, err) - require.NoError(t, head.Init(0)) - } - startHead() - - // Checking contents of s1. - ms = head.series.getByHash(s1.Hash(), s1) - require.Equal(t, expMmapChunks, ms.mmappedChunks) - require.Equal(t, expHeadChunkSamples, ms.headChunks.chunk.NumSamples()) - - testQuery := func() { - q, err := NewBlockQuerier(head, head.MinTime(), head.MaxTime()) - require.NoError(t, err) - act := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "a", "b.*")) - compareSeries(t, exp, act) - } - testQuery() - - // Restart with no mmap chunks to test WAL replay. - require.NoError(t, head.Close()) - require.NoError(t, os.RemoveAll(mmappedChunksDir(head.opts.ChunkDirRoot))) - startHead() - testQuery() -} - func TestChunkSnapshot(t *testing.T) { head, _ := newTestHead(t, 120*4, wlog.CompressionNone, false) defer func() { @@ -5360,48 +5089,6 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) { require.Positive(t, offset) } -func TestHistogramWALANDWBLReplay(t *testing.T) { - dir := t.TempDir() - wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) - require.NoError(t, err) - oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, wlog.CompressionSnappy) - require.NoError(t, err) - - opts := DefaultHeadOptions() - opts.ChunkRange = 1000 - opts.ChunkDirRoot = dir - opts.OutOfOrderTimeWindow.Store(30 * time.Minute.Milliseconds()) - opts.EnableNativeHistograms.Store(true) - opts.EnableOOONativeHistograms.Store(true) - - h, err := NewHead(nil, nil, wal, oooWlog, opts, nil) - require.NoError(t, err) - require.NoError(t, h.Init(0)) - - var expOOOSamples []chunks.Sample - l := labels.FromStrings("foo", "bar") - appendSample := func(mins int64, val float64, isOOO bool, isCustomBucketHistogram bool) { - app := h.Appender(context.Background()) - var s sample - if isCustomBucketHistogram { - s = sample{t: mins * time.Minute.Milliseconds(), h: tsdbutil.GenerateTestCustomBucketsHistogram(int(val))} - } else { - s = sample{t: mins * time.Minute.Milliseconds(), h: tsdbutil.GenerateTestHistogram(int(val))} - } - _, err := app.AppendHistogram(0, l, mins*time.Minute.Milliseconds(), s.h, nil) - require.NoError(t, err) - require.NoError(t, app.Commit()) - - if isOOO { - expOOOSamples = append(expOOOSamples, s) - } - } - - // In-order histogram samples. - appendSample(60, 60, false, false) - -} - // TestWBLReplay checks the replay at a low level. func TestWBLReplay(t *testing.T) { for name, scenario := range sampleTypeScenarios { diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 9d1e24b706..458162522b 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -189,7 +189,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decoded <- exemplars - case record.HistogramSamples, record.CustomBucketHistogramSamples: + case record.HistogramSamples, record.HistogramSamplesLegacy: hists := histogramsPool.Get()[:0] hists, err = dec.HistogramSamples(rec, hists) if err != nil { @@ -201,7 +201,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decoded <- hists - case record.FloatHistogramSamples, record.CustomBucketFloatHistogramSamples: + case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: hists := floatHistogramsPool.Get()[:0] hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { @@ -726,7 +726,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decodedCh <- markers - case record.HistogramSamples: + case record.HistogramSamples, record.HistogramSamplesLegacy: hists := histogramSamplesPool.Get()[:0] hists, err = dec.HistogramSamples(rec, hists) if err != nil { @@ -738,7 +738,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decodedCh <- hists - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: hists := floatHistogramSamplesPool.Get()[:0] hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index bc1cb67d1e..adbd3278ba 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -963,7 +963,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( }, }, { - name: "After Series() prev head gets mmapped after getting samples, new head gets new samples also overlapping, none of these should appear in response.", + name: "After Series() prev head mmapped after getting samples, new head gets new samples also overlapping, none should appear in response.", queryMinT: minutes(0), queryMaxT: minutes(100), firstInOrderSampleAt: minutes(120), diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 2b19cdbb6f..2dd7ffe027 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -48,14 +48,14 @@ const ( MmapMarkers Type = 5 // Metadata is used to match WAL records of type Metadata. Metadata Type = 6 - // HistogramSamples is used to match WAL records of type Histograms. - HistogramSamples Type = 7 - // FloatHistogramSamples is used to match WAL records of type Float Histograms. - FloatHistogramSamples Type = 8 - // CustomBucketHistogramSamples is used to match WAL records of type Histogram with custom buckets. - CustomBucketHistogramSamples Type = 9 - // CustomBucketFloatHistogramSamples is used to match WAL records of type Float Histogram with custom buckets. - CustomBucketFloatHistogramSamples Type = 10 + // HistogramSamplesLegacy is used to match WAL records of type Histograms prior to intrdocuing support of custom buckets, to maintain backwards compatibility. + HistogramSamplesLegacy Type = 7 + // FloatHistogramSamplesLegacy is used to match WAL records of type Float Histograms proior to introducing support of custom buckets, to maintain backwards compatibility. + FloatHistogramSamplesLegacy Type = 8 + // HistogramSamples is used to match WAL records of type Histogram, and supports custom buckets. + HistogramSamples Type = 9 + // FloatHistogramSamples is used to match WAL records of type Float Histogram, and supports custom buckets. + FloatHistogramSamples Type = 10 ) func (rt Type) String() string { @@ -68,14 +68,14 @@ func (rt Type) String() string { return "tombstones" case Exemplars: return "exemplars" + case HistogramSamplesLegacy: + return "histogram_samples_legacy" + case FloatHistogramSamplesLegacy: + return "float_histogram_samples_legacy" case HistogramSamples: - return "histogram_samples" + return "histogram_sample" case FloatHistogramSamples: return "float_histogram_samples" - case CustomBucketHistogramSamples: - return "custom_bucket_histogram_samples" - case CustomBucketFloatHistogramSamples: - return "custom_bucket_float_histogram_samples" case MmapMarkers: return "mmapmarkers" case Metadata: @@ -215,7 +215,7 @@ func (d *Decoder) Type(rec []byte) Type { return Unknown } switch t := Type(rec[0]); t { - case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples, CustomBucketHistogramSamples, CustomBucketFloatHistogramSamples: + case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamplesLegacy, FloatHistogramSamplesLegacy, HistogramSamples, FloatHistogramSamples: return t } return Unknown @@ -436,7 +436,7 @@ func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMar func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) { dec := encoding.Decbuf{B: rec} t := Type(dec.Byte()) - if t != HistogramSamples && t != CustomBucketHistogramSamples { + if t != HistogramSamples && t != HistogramSamplesLegacy { return nil, errors.New("invalid record type") } if dec.Len() == 0 { @@ -528,7 +528,7 @@ func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) { func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) { dec := encoding.Decbuf{B: rec} t := Type(dec.Byte()) - if t != FloatHistogramSamples && t != CustomBucketFloatHistogramSamples { + if t != FloatHistogramSamples && t != FloatHistogramSamplesLegacy { return nil, errors.New("invalid record type") } if dec.Len() == 0 { @@ -744,40 +744,10 @@ func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte { return buf.Get() } -func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([]byte, bool) { +func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) []byte { buf := encoding.Encbuf{B: b} buf.PutByte(byte(HistogramSamples)) - if len(histograms) == 0 { - return buf.Get(), false - } - - // Store base timestamp and base reference number of first histogram. - // All histograms encode their timestamp and ref as delta to those. - first := histograms[0] - buf.PutBE64(uint64(first.Ref)) - buf.PutBE64int64(first.T) - - customBucketSamplesExist := false - for _, h := range histograms { - if h.H.UsesCustomBuckets() { - customBucketSamplesExist = true - continue - } - - buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) - buf.PutVarint64(h.T - first.T) - - EncodeHistogram(&buf, h.H) - } - - return buf.Get(), customBucketSamplesExist -} - -func (e *Encoder) CustomBucketHistogramSamples(histograms []RefHistogramSample, b []byte) []byte { - buf := encoding.Encbuf{B: b} - buf.PutByte(byte(CustomBucketHistogramSamples)) - if len(histograms) == 0 { return buf.Get() } @@ -789,12 +759,10 @@ func (e *Encoder) CustomBucketHistogramSamples(histograms []RefHistogramSample, buf.PutBE64int64(first.T) for _, h := range histograms { - if h.H.UsesCustomBuckets() { - buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) - buf.PutVarint64(h.T - first.T) + buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + buf.PutVarint64(h.T - first.T) - EncodeHistogram(&buf, h.H) - } + EncodeHistogram(&buf, h.H) } return buf.Get() @@ -841,40 +809,10 @@ func EncodeHistogram(buf *encoding.Encbuf, h *histogram.Histogram) { } } -func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) ([]byte, bool) { +func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { buf := encoding.Encbuf{B: b} buf.PutByte(byte(FloatHistogramSamples)) - if len(histograms) == 0 { - return buf.Get(), false - } - - // Store base timestamp and base reference number of first histogram. - // All histograms encode their timestamp and ref as delta to those. - first := histograms[0] - buf.PutBE64(uint64(first.Ref)) - buf.PutBE64int64(first.T) - - customBucketsExist := false - for _, h := range histograms { - if h.FH.UsesCustomBuckets() { - customBucketsExist = true - continue - } - - buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) - buf.PutVarint64(h.T - first.T) - - EncodeFloatHistogram(&buf, h.FH) - } - - return buf.Get(), customBucketsExist -} - -func (e *Encoder) CustomBucketFloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { - buf := encoding.Encbuf{B: b} - buf.PutByte(byte(CustomBucketFloatHistogramSamples)) - if len(histograms) == 0 { return buf.Get() } @@ -886,12 +824,10 @@ func (e *Encoder) CustomBucketFloatHistogramSamples(histograms []RefFloatHistogr buf.PutBE64int64(first.T) for _, h := range histograms { - if h.FH.UsesCustomBuckets() { - buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) - buf.PutVarint64(h.T - first.T) + buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + buf.PutVarint64(h.T - first.T) - EncodeFloatHistogram(&buf, h.FH) - } + EncodeFloatHistogram(&buf, h.FH) } return buf.Get() diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index af94f2b207..901fe2e9f6 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -166,12 +166,9 @@ func TestRecord_EncodeDecode(t *testing.T) { }, } - histSamples, _ := enc.HistogramSamples(histograms, nil) - customBucketHistSamples := enc.CustomBucketHistogramSamples(histograms, nil) + histSamples := enc.HistogramSamples(histograms, nil) decHistograms, err := dec.HistogramSamples(histSamples, nil) require.NoError(t, err) - decCustomBucketHistSamples, err := dec.HistogramSamples(customBucketHistSamples, nil) - decHistograms = append(decHistograms, decCustomBucketHistSamples...) require.Equal(t, histograms, decHistograms) floatHistograms := make([]RefFloatHistogramSample, len(histograms)) @@ -182,13 +179,9 @@ func TestRecord_EncodeDecode(t *testing.T) { FH: h.H.ToFloat(nil), } } - floatHistSamples, _ := enc.FloatHistogramSamples(floatHistograms, nil) - customBucketFloatHistSamples := enc.CustomBucketFloatHistogramSamples(floatHistograms, nil) + floatHistSamples := enc.FloatHistogramSamples(floatHistograms, nil) decFloatHistograms, err := dec.FloatHistogramSamples(floatHistSamples, nil) require.NoError(t, err) - decCustomBucketFloatHistograms, err := dec.FloatHistogramSamples(customBucketFloatHistSamples, nil) - require.NoError(t, err) - decFloatHistograms = append(decFloatHistograms, decCustomBucketFloatHistograms...) require.Equal(t, floatHistograms, decFloatHistograms) // Gauge integer histograms. @@ -196,13 +189,9 @@ func TestRecord_EncodeDecode(t *testing.T) { histograms[i].H.CounterResetHint = histogram.GaugeType } - gaugeHistSamples, _ := enc.HistogramSamples(histograms, nil) - customBucketGaugeHistSamples := enc.CustomBucketHistogramSamples(histograms, nil) + gaugeHistSamples := enc.HistogramSamples(histograms, nil) decGaugeHistograms, err := dec.HistogramSamples(gaugeHistSamples, nil) require.NoError(t, err) - decCustomBucketGaugeHistograms, err := dec.HistogramSamples(customBucketGaugeHistSamples, nil) - require.NoError(t, err) - decGaugeHistograms = append(decGaugeHistograms, decCustomBucketGaugeHistograms...) require.Equal(t, histograms, decGaugeHistograms) // Gauge float histograms. @@ -210,14 +199,10 @@ func TestRecord_EncodeDecode(t *testing.T) { floatHistograms[i].FH.CounterResetHint = histogram.GaugeType } - gaugeFloatHistSamples, _ := enc.FloatHistogramSamples(floatHistograms, nil) - customBucketGaugeFloatHistSamples := enc.CustomBucketFloatHistogramSamples(floatHistograms, nil) + gaugeFloatHistSamples := enc.FloatHistogramSamples(floatHistograms, nil) decGaugeFloatHistograms, err := dec.FloatHistogramSamples(gaugeFloatHistSamples, nil) require.NoError(t, err) - decCustomBucketGaugeFloatHistograms, err := dec.FloatHistogramSamples(customBucketGaugeFloatHistSamples, nil) - require.NoError(t, err) - decFloatHistograms = append(decGaugeFloatHistograms, decCustomBucketGaugeFloatHistograms...) - require.Equal(t, floatHistograms, decFloatHistograms) + require.Equal(t, floatHistograms, decGaugeFloatHistograms) } // TestRecord_Corrupted ensures that corrupted records return the correct error. @@ -318,14 +303,10 @@ func TestRecord_Corrupted(t *testing.T) { }, } - corruptedHists, _ := enc.HistogramSamples(histograms, nil) + corruptedHists := enc.HistogramSamples(histograms, nil) corruptedHists = corruptedHists[:8] - corruptedCustomBucketHists := enc.CustomBucketHistogramSamples(histograms, nil) - corruptedCustomBucketHists = corruptedCustomBucketHists[:8] _, err := dec.HistogramSamples(corruptedHists, nil) require.ErrorIs(t, err, encoding.ErrInvalidSize) - _, err = dec.HistogramSamples(corruptedCustomBucketHists, nil) - require.ErrorIs(t, err, encoding.ErrInvalidSize) }) } @@ -383,12 +364,9 @@ func TestRecord_Type(t *testing.T) { }, }, } - hists, _ := enc.HistogramSamples(histograms, nil) + hists := enc.HistogramSamples(histograms, nil) recordType = dec.Type(hists) require.Equal(t, HistogramSamples, recordType) - customBucketHists := enc.CustomBucketHistogramSamples(histograms, nil) - recordType = dec.Type(customBucketHists) - require.Equal(t, CustomBucketHistogramSamples, recordType) recordType = dec.Type(nil) require.Equal(t, Unknown, recordType) diff --git a/tsdb/tsdbutil/histogram.go b/tsdb/tsdbutil/histogram.go index 4b6cebd579..9230b68376 100644 --- a/tsdb/tsdbutil/histogram.go +++ b/tsdb/tsdbutil/histogram.go @@ -64,7 +64,6 @@ func GenerateTestCustomBucketsHistograms(n int) (r []*histogram.Histogram) { h.CounterResetHint = histogram.NotCounterReset } r = append(r, h) - } return r } diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index 5bb79595b9..ffb96dbe22 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -208,7 +208,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He stats.TotalSamples += len(samples) stats.DroppedSamples += len(samples) - len(repl) - case record.HistogramSamples: + case record.HistogramSamples, record.HistogramSamplesLegacy: histogramSamples, err = dec.HistogramSamples(rec, histogramSamples) if err != nil { return nil, fmt.Errorf("decode histogram samples: %w", err) @@ -221,28 +221,11 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } } if len(repl) > 0 { - buf, _ = enc.HistogramSamples(repl, buf) + buf = enc.HistogramSamples(repl, buf) } stats.TotalSamples += len(histogramSamples) stats.DroppedSamples += len(histogramSamples) - len(repl) - case record.CustomBucketHistogramSamples: - histogramSamples, err = dec.HistogramSamples(rec, histogramSamples) - if err != nil { - return nil, fmt.Errorf("decode histogram samples: %w", err) - } - // Drop irrelevant histogramSamples in place. - repl := histogramSamples[:0] - for _, h := range histogramSamples { - if h.T >= mint { - repl = append(repl, h) - } - } - if len(repl) > 0 { - buf = enc.CustomBucketHistogramSamples(repl, buf) - } - stats.TotalSamples += len(histogramSamples) - stats.DroppedSamples += len(histogramSamples) - len(repl) - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples) if err != nil { return nil, fmt.Errorf("decode float histogram samples: %w", err) @@ -255,24 +238,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } } if len(repl) > 0 { - buf, _ = enc.FloatHistogramSamples(repl, buf) - } - stats.TotalSamples += len(floatHistogramSamples) - stats.DroppedSamples += len(floatHistogramSamples) - len(repl) - case record.CustomBucketFloatHistogramSamples: - floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples) - if err != nil { - return nil, fmt.Errorf("decode float histogram samples: %w", err) - } - // Drop irrelevant floatHistogramSamples in place. - repl := floatHistogramSamples[:0] - for _, fh := range floatHistogramSamples { - if fh.T >= mint { - repl = append(repl, fh) - } - } - if len(repl) > 0 { - buf = enc.CustomBucketFloatHistogramSamples(repl, buf) + buf = enc.FloatHistogramSamples(repl, buf) } stats.TotalSamples += len(floatHistogramSamples) stats.DroppedSamples += len(floatHistogramSamples) - len(repl) diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go index f947f28095..b2c603f134 100644 --- a/tsdb/wlog/checkpoint_test.go +++ b/tsdb/wlog/checkpoint_test.go @@ -236,7 +236,7 @@ func TestCheckpoint(t *testing.T) { require.NoError(t, w.Log(b)) samplesInWAL += 4 h := makeHistogram(i) - b, _ = enc.HistogramSamples([]record.RefHistogramSample{ + b = enc.HistogramSamples([]record.RefHistogramSample{ {Ref: 0, T: last, H: h}, {Ref: 1, T: last + 10000, H: h}, {Ref: 2, T: last + 20000, H: h}, @@ -245,7 +245,7 @@ func TestCheckpoint(t *testing.T) { require.NoError(t, w.Log(b)) histogramsInWAL += 4 cbh := makeCustomBucketHistogram(i) - b = enc.CustomBucketHistogramSamples([]record.RefHistogramSample{ + b = enc.HistogramSamples([]record.RefHistogramSample{ {Ref: 0, T: last, H: cbh}, {Ref: 1, T: last + 10000, H: cbh}, {Ref: 2, T: last + 20000, H: cbh}, @@ -254,7 +254,7 @@ func TestCheckpoint(t *testing.T) { require.NoError(t, w.Log(b)) histogramsInWAL += 4 fh := makeFloatHistogram(i) - b, _ = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{ + b = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{ {Ref: 0, T: last, FH: fh}, {Ref: 1, T: last + 10000, FH: fh}, {Ref: 2, T: last + 20000, FH: fh}, @@ -263,7 +263,7 @@ func TestCheckpoint(t *testing.T) { require.NoError(t, w.Log(b)) floatHistogramsInWAL += 4 cbfh := makeCustomBucketFloatHistogram(i) - b = enc.CustomBucketFloatHistogramSamples([]record.RefFloatHistogramSample{ + b = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{ {Ref: 0, T: last, FH: cbfh}, {Ref: 1, T: last + 10000, FH: cbfh}, {Ref: 2, T: last + 20000, FH: cbfh}, @@ -330,14 +330,14 @@ func TestCheckpoint(t *testing.T) { require.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp") } samplesInCheckpoint += len(samples) - case record.HistogramSamples, record.CustomBucketHistogramSamples: + case record.HistogramSamples, record.HistogramSamplesLegacy: histograms, err := dec.HistogramSamples(rec, nil) require.NoError(t, err) for _, h := range histograms { require.GreaterOrEqual(t, h.T, last/2, "histogram with wrong timestamp") } histogramsInCheckpoint += len(histograms) - case record.FloatHistogramSamples, record.CustomBucketFloatHistogramSamples: + case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: floatHistograms, err := dec.FloatHistogramSamples(rec, nil) require.NoError(t, err) for _, h := range floatHistograms { diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index 169bd296fe..07f881eeaf 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -546,7 +546,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { } w.writer.AppendExemplars(exemplars) - case record.HistogramSamples, record.CustomBucketHistogramSamples: + case record.HistogramSamples, record.HistogramSamplesLegacy: // Skip if experimental "histograms over remote write" is not enabled. if !w.sendHistograms { break @@ -574,7 +574,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { histogramsToSend = histogramsToSend[:0] } - case record.FloatHistogramSamples, record.CustomBucketFloatHistogramSamples: + case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: // Skip if experimental "histograms over remote write" is not enabled. if !w.sendHistograms { break diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index 5ff70bb215..21490154d9 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -209,7 +209,7 @@ func TestTailSamples(t *testing.T) { NegativeBuckets: []int64{int64(-i) - 1}, } - histograms, _ := enc.HistogramSamples([]record.RefHistogramSample{{ + histograms := enc.HistogramSamples([]record.RefHistogramSample{{ Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, H: hist, @@ -226,21 +226,21 @@ func TestTailSamples(t *testing.T) { CustomValues: []float64{float64(i) + 2}, } - customBucketHistograms := enc.CustomBucketHistogramSamples([]record.RefHistogramSample{{ + customBucketHistograms := enc.HistogramSamples([]record.RefHistogramSample{{ Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, H: customBucketHist, }}, nil) require.NoError(t, w.Log(customBucketHistograms)) - floatHistograms, _ := enc.FloatHistogramSamples([]record.RefFloatHistogramSample{{ + floatHistograms := enc.FloatHistogramSamples([]record.RefFloatHistogramSample{{ Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, FH: hist.ToFloat(nil), }}, nil) require.NoError(t, w.Log(floatHistograms)) - customBucketFloatHistograms := enc.CustomBucketFloatHistogramSamples([]record.RefFloatHistogramSample{{ + customBucketFloatHistograms := enc.FloatHistogramSamples([]record.RefFloatHistogramSample{{ Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, FH: customBucketHist.ToFloat(nil), From f8a39767a43eee03396912a3b6028bd4b9a88284 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Tue, 26 Nov 2024 11:14:09 -0800 Subject: [PATCH 1037/1397] Update WAL doc to include native histogram encodings --- tsdb/docs/format/wal.md | 216 +++++++++++++++++++++++++++++++++++----- tsdb/head_append.go | 1 + tsdb/head_wal.go | 5 +- 3 files changed, 192 insertions(+), 30 deletions(-) diff --git a/tsdb/docs/format/wal.md b/tsdb/docs/format/wal.md index 835ede4113..092999a53a 100644 --- a/tsdb/docs/format/wal.md +++ b/tsdb/docs/format/wal.md @@ -79,32 +79,6 @@ The first sample record begins at the second row. └──────────────────────────────────────────────────────────────────┘ ``` -### Native histogram records - -Native histogram records are encoded as - -``` -┌──────────────────────────────────────────────────────────────────┐ -│ type = 2 <1b> │ -├──────────────────────────────────────────────────────────────────┤ -│ ┌────────────────────┬───────────────────────────┐ │ -│ │ id <8b> │ timestamp <8b> │ │ -│ └────────────────────┴───────────────────────────┘ │ -│ ┌────────────────────┬───────────────────────────┬ │ -│ │ id_delta │ timestamp_delta │ │ -│ ├────────────────────┴───────────────────────────┴─────────────┤ │ -│ │ n = len(labels) │ │ -│ ├──────────────────────┬───────────────────────────────────────┤ │ -│ │ len(str_1) │ str_1 │ │ -│ ├──────────────────────┴───────────────────────────────────────┤ │ -│ │ ... │ │ -│ ├───────────────────────┬──────────────────────────────────────┤ │ -│ │ len(str_2n) │ str_2n │ │ │ -│ └───────────────────────┴────────────────┴─────────────────────┘ │ -│ . . . │ -└──────────────────────────────────────────────────────────────────┘ -``` - ### Tombstone records Tombstone records encode tombstones as a list of triples `(series_id, min_time, max_time)` @@ -182,3 +156,193 @@ Metadata records encode the metadata updates associated with a series. └────────────────────────────────────────────┘ ``` +### Native histogram records + +Native histogram records are encoded as a list of histogram samples. +Series reference and timestamp are encoded as deltas w.r.t the first histogram sample. +The first row stores the starting id and the starting timestamp. +The first native histogram sample record begins at the second row. + +There are several different types of native histogram samples. + +Integer histogram encoding: + +``` +┌──────────────────────────────────────────────────────────────────┐ +│ type = 9 <1b> │ +├──────────────────────────────────────────────────────────────────┤ +│ ┌────────────────────┬───────────────────────────┐ │ +│ │ id <8b> │ timestamp <8b> │ │ +│ └────────────────────┴───────────────────────────┘ │ +│ ┌────────────────────┬───────────────────────────┬ │ +│ │ id_delta │ timestamp_delta │ │ +│ ├────────────────────┴────┬──────────────────────┤ │ +│ │ counter_reset_hint <8b> │ schema │ │ +│ ├──────────────────────┬──┴──────────────────────┤ │ +│ │ zero_threshold <8b> │ zero_count │ │ +│ ├──────────────────────┴┬────────────────────────┤ │ +│ │ count │ sum │ │ +│ ├───────────────────────┴────────┬───────────────┴─────────────┐ │ +│ │ len(positive_spans) │ offset │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ length │ │ +│ │ ├─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ len(negative_spans) │ offset │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ length │ │ +│ │ ├─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ len(positive_buckets) │ bucket_count │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ len(negative_buckets) │ bucket_count │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ └────────────────────────────────┴─────────────────────────────┘ │ +│ . . . │ +└──────────────────────────────────────────────────────────────────┘ +``` + +There are also integer histograms that have custom buckets, which will always +have a schem of 053. Custom bucket native histograms additionally encode +a field that specifies the custom values: + +``` +┌──────────────────────────────────────────────────────────────────┐ +│ type = 9 <1b> │ +├──────────────────────────────────────────────────────────────────┤ +│ ┌────────────────────┬───────────────────────────┐ │ +│ │ id <8b> │ timestamp <8b> │ │ +│ └────────────────────┴───────────────────────────┘ │ +│ ┌────────────────────┬───────────────────────────┬ │ +│ │ id_delta │ timestamp_delta │ │ +│ ├────────────────────┴────┬──────────────────────┤ │ +│ │ counter_reset_hint <8b> │ schema │ │ +│ ├──────────────────────┬──┴──────────────────────┤ │ +│ │ zero_threshold <8b> │ zero_count │ │ +│ ├──────────────────────┴┬────────────────────────┤ │ +│ │ count │ sum │ │ +│ ├───────────────────────┴────────┬───────────────┴─────────────┐ │ +│ │ len(positive_spans) │ offset │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ length │ │ +│ │ ├─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ len(negative_spans) │ offset │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ length │ │ +│ │ ├─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ len(positive_buckets) │ bucket_count │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ len(negative_buckets) │ bucket_count │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ len(custom_values) │ value <8b> │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ └────────────────────────────────┴─────────────────────────────┘ │ +│ . . . │ +└──────────────────────────────────────────────────────────────────┘ +``` + +(Note: negative spans and negative buckets will be empty for custom bucket native histograms.) + +Float histogram encoding: + +``` +┌──────────────────────────────────────────────────────────────────┐ +│ type = 10 <1b> │ +├──────────────────────────────────────────────────────────────────┤ +│ ┌────────────────────┬───────────────────────────┐ │ +│ │ id <8b> │ timestamp <8b> │ │ +│ └────────────────────┴───────────────────────────┘ │ +│ ┌────────────────────┬───────────────────────────┐ │ +│ │ id_delta │ timestamp_delta │ │ +│ ├────────────────────┴────┬──────────────────────┤ │ +│ │ counter_reset_hint <8b> │ schema │ │ +│ ├──────────────────────┬──┴──────────────────────┤ │ +│ │ zero_threshold <8b> │ zero_count <8b> │ │ +│ ├──────────────────────┴┬────────────────────────┤ │ +│ │ count <8b> │ sum <8b> │ │ +│ ├───────────────────────┴────────┬───────────────┴─────────────┐ │ +│ │ len(positive_spans) │ offset │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ length │ │ +│ │ ├─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ len(negative_spans) │ offset │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ length │ │ +│ │ ├─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ len(positive_buckets) │ bucket_count <8b> │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ len(negative_buckets) │ bucket_count <8b> │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ └────────────────────────────────┴─────────────────────────────┘ │ +│ . . . │ +└──────────────────────────────────────────────────────────────────┘ +``` + +There are also float histograms with custom buckets. + +``` +┌──────────────────────────────────────────────────────────────────┐ +│ type = 10 <1b> │ +├──────────────────────────────────────────────────────────────────┤ +│ ┌────────────────────┬───────────────────────────┐ │ +│ │ id <8b> │ timestamp <8b> │ │ +│ └────────────────────┴───────────────────────────┘ │ +│ ┌────────────────────┬───────────────────────────┐ │ +│ │ id_delta │ timestamp_delta │ │ +│ ├────────────────────┴────┬──────────────────────┤ │ +│ │ counter_reset_hint <8b> │ schema │ │ +│ ├──────────────────────┬──┴──────────────────────┤ │ +│ │ zero_threshold <8b> │ zero_count <8b> │ │ +│ ├──────────────────────┴┬────────────────────────┤ │ +│ │ count <8b> │ sum <8b> │ │ +│ ├───────────────────────┴────────┬───────────────┴─────────────┐ │ +│ │ len(positive_spans) │ offset │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ length │ │ +│ │ ├─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ len(negative_spans) │ offset │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ length │ │ +│ │ ├─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ len(positive_buckets) │ bucket_count <8b> │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ len(negative_buckets) │ bucket_count <8b> │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ len(custom_values) │ value <8b> │ │ +│ ├────────────────────────────────┼─────────────────────────────┤ │ +│ │ │ . . . │ │ +│ └────────────────────────────────┴─────────────────────────────┘ │ +│ . . . │ +└──────────────────────────────────────────────────────────────────┘ +``` + +(Note: negative spans and negative buckets will also be empty for custom bucket float native histograms.) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 7dacb9037b..1cac44e160 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -691,6 +691,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { s.lastHistogramValue = &histogram.Histogram{} } + // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // to skip that sample from the WAL and write only in the WBL. _, delta, err := s.appendableHistogram(t, h, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 458162522b..d71dc9d33d 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -58,7 +58,6 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch var unknownExemplarRefs atomic.Uint64 var unknownHistogramRefs atomic.Uint64 var unknownMetadataRefs atomic.Uint64 - // Track number of series records that had overlapping m-map chunks. var mmapOverlappingChunks atomic.Uint64 @@ -139,8 +138,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch dec := record.NewDecoder(syms) for r.Next() { rec := r.Record() - recType := dec.Type(rec) - switch recType { + switch dec.Type(rec) { case record.Series: series := seriesPool.Get()[:0] series, err = dec.Series(rec, series) @@ -618,7 +616,6 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp if s.t <= ms.mmMaxTime { continue } - var chunkCreated bool if s.h != nil { _, chunkCreated = ms.appendHistogram(s.t, s.h, 0, appendChunkOpts) From 6b44c1437f296569254c166e3c3ff0dc949f0e35 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Wed, 27 Nov 2024 09:24:59 -0800 Subject: [PATCH 1038/1397] Fix comment and histogram record string --- tsdb/record/record.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 2dd7ffe027..0707ed54fe 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -48,9 +48,9 @@ const ( MmapMarkers Type = 5 // Metadata is used to match WAL records of type Metadata. Metadata Type = 6 - // HistogramSamplesLegacy is used to match WAL records of type Histograms prior to intrdocuing support of custom buckets, to maintain backwards compatibility. + // HistogramSamplesLegacy is used to match WAL records of type Histograms prior to introducing support of custom buckets, for backwards compatibility. HistogramSamplesLegacy Type = 7 - // FloatHistogramSamplesLegacy is used to match WAL records of type Float Histograms proior to introducing support of custom buckets, to maintain backwards compatibility. + // FloatHistogramSamplesLegacy is used to match WAL records of type Float Histograms prior to introducing support of custom buckets, for backwards compatibility. FloatHistogramSamplesLegacy Type = 8 // HistogramSamples is used to match WAL records of type Histogram, and supports custom buckets. HistogramSamples Type = 9 @@ -73,7 +73,7 @@ func (rt Type) String() string { case FloatHistogramSamplesLegacy: return "float_histogram_samples_legacy" case HistogramSamples: - return "histogram_sample" + return "histogram_samples" case FloatHistogramSamples: return "float_histogram_samples" case MmapMarkers: From 45944c1847d453c41870d6664c352a44d02682e3 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Wed, 27 Nov 2024 09:26:46 -0800 Subject: [PATCH 1039/1397] Extend tsdb agent tests with custom bucket histograms --- tsdb/agent/db_test.go | 132 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 128 insertions(+), 4 deletions(-) diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index 8bcb71c86a..cc5e0d34cb 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -524,6 +524,19 @@ func TestPartialTruncateWAL(t *testing.T) { require.NoError(t, app.Commit()) } + lbls = labelsForTest(t.Name()+"_custom_buckets_histogram_batch-1", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numDatapoints) + + for i := 0; i < numDatapoints; i++ { + _, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil) + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + } + lbls = labelsForTest(t.Name()+"_float_histogram_batch-1", numSeries) for _, l := range lbls { lset := labels.New(l...) @@ -537,6 +550,19 @@ func TestPartialTruncateWAL(t *testing.T) { require.NoError(t, app.Commit()) } + lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram_batch-1", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numDatapoints) + + for i := 0; i < numDatapoints; i++ { + _, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i]) + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + } + // Create second batch of 800 series with 1000 data-points with a fixed lastTs as 600. lastTs = 600 lbls = labelsForTest(t.Name()+"batch-2", numSeries) @@ -563,6 +589,19 @@ func TestPartialTruncateWAL(t *testing.T) { require.NoError(t, app.Commit()) } + lbls = labelsForTest(t.Name()+"_custom_buckets_histogram_batch-2", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numDatapoints) + + for i := 0; i < numDatapoints; i++ { + _, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil) + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + } + lbls = labelsForTest(t.Name()+"_float_histogram_batch-2", numSeries) for _, l := range lbls { lset := labels.New(l...) @@ -576,11 +615,24 @@ func TestPartialTruncateWAL(t *testing.T) { require.NoError(t, app.Commit()) } + lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram_batch-2", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numDatapoints) + + for i := 0; i < numDatapoints; i++ { + _, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i]) + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + } + // Truncate WAL with mint to GC only the first batch of 800 series and retaining 2nd batch of 800 series. s.truncate(lastTs - 1) m := gatherFamily(t, reg, "prometheus_agent_deleted_series") - require.Equal(t, float64(numSeries*3), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count") + require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count") } func TestWALReplay(t *testing.T) { @@ -616,6 +668,18 @@ func TestWALReplay(t *testing.T) { } } + lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms) + + for i := 0; i < numHistograms; i++ { + _, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil) + require.NoError(t, err) + } + } + lbls = labelsForTest(t.Name()+"_float_histogram", numSeries) for _, l := range lbls { lset := labels.New(l...) @@ -628,6 +692,18 @@ func TestWALReplay(t *testing.T) { } } + lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms) + + for i := 0; i < numHistograms; i++ { + _, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i]) + require.NoError(t, err) + } + } + require.NoError(t, app.Commit()) require.NoError(t, s.Close()) @@ -646,7 +722,7 @@ func TestWALReplay(t *testing.T) { // Check if all the series are retrieved back from the WAL. m := gatherFamily(t, reg, "prometheus_agent_active_series") - require.Equal(t, float64(numSeries*3), m.Metric[0].Gauge.GetValue(), "agent wal replay mismatch of active series count") + require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal replay mismatch of active series count") // Check if lastTs of the samples retrieved from the WAL is retained. metrics := replayStorage.series.series @@ -878,6 +954,18 @@ func TestDBAllowOOOSamples(t *testing.T) { } } + lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms) + + for i := offset; i < numDatapoints+offset; i++ { + _, err := app.AppendHistogram(0, lset, int64(i), histograms[i-offset], nil) + require.NoError(t, err) + } + } + lbls = labelsForTest(t.Name()+"_float_histogram", numSeries) for _, l := range lbls { lset := labels.New(l...) @@ -890,10 +978,22 @@ func TestDBAllowOOOSamples(t *testing.T) { } } + lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries) + for _, l := range lbls { + lset := labels.New(l...) + + floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms) + + for i := offset; i < numDatapoints+offset; i++ { + _, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i-offset]) + require.NoError(t, err) + } + } + require.NoError(t, app.Commit()) m := gatherFamily(t, reg, "prometheus_agent_samples_appended_total") require.Equal(t, float64(20), m.Metric[0].Counter.GetValue(), "agent wal mismatch of total appended samples") - require.Equal(t, float64(40), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms") + require.Equal(t, float64(80), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms") require.NoError(t, s.Close()) // Hack: s.wal.Dir() is the /wal subdirectory of the original storage path. @@ -942,6 +1042,18 @@ func TestDBAllowOOOSamples(t *testing.T) { } } + lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries*2) + for _, l := range lbls { + lset := labels.New(l...) + + histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms) + + for i := 0; i < numDatapoints; i++ { + _, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil) + require.NoError(t, err) + } + } + lbls = labelsForTest(t.Name()+"_float_histogram", numSeries*2) for _, l := range lbls { lset := labels.New(l...) @@ -954,10 +1066,22 @@ func TestDBAllowOOOSamples(t *testing.T) { } } + lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries*2) + for _, l := range lbls { + lset := labels.New(l...) + + floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms) + + for i := 0; i < numDatapoints; i++ { + _, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i]) + require.NoError(t, err) + } + } + require.NoError(t, app.Commit()) m = gatherFamily(t, reg2, "prometheus_agent_samples_appended_total") require.Equal(t, float64(40), m.Metric[0].Counter.GetValue(), "agent wal mismatch of total appended samples") - require.Equal(t, float64(80), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms") + require.Equal(t, float64(160), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms") require.NoError(t, db.Close()) } From 7aa1f4571b09936e67997d25a610c2974dace810 Mon Sep 17 00:00:00 2001 From: machine424 Date: Thu, 5 Dec 2024 13:28:15 +0100 Subject: [PATCH 1040/1397] fix(remote): rename some remote read related metrics for better clarity. From the remote read handler side: prometheus_api_remote_read_queries -> prometheus_remote_read_handler_queries From the remote read client side: prometheus_remote_storage_read_queries_total -> prometheus_remote_read_client_queries_total prometheus_remote_storage_remote_read_queries -> prometheus_remote_read_client_queries prometheus_remote_storage_read_request_duration_seconds -> prometheus_remote_read_client_request_duration_seconds Signed-off-by: machine424 --- storage/remote/client.go | 12 ++++++------ storage/remote/read_handler.go | 8 ++++---- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/storage/remote/client.go b/storage/remote/client.go index ad766be9bf..2538ee90a0 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -81,8 +81,8 @@ var ( remoteReadQueriesTotal = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, - Subsystem: subsystem, - Name: "read_queries_total", + Subsystem: "remote_read_client", + Name: "queries_total", Help: "The total number of remote read queries.", }, []string{remoteName, endpoint, "response_type", "code"}, @@ -90,8 +90,8 @@ var ( remoteReadQueries = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, - Subsystem: subsystem, - Name: "remote_read_queries", + Subsystem: "remote_read_client", + Name: "queries", Help: "The number of in-flight remote read queries.", }, []string{remoteName, endpoint}, @@ -99,8 +99,8 @@ var ( remoteReadQueryDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: namespace, - Subsystem: subsystem, - Name: "read_request_duration_seconds", + Subsystem: "remote_read_client", + Name: "request_duration_seconds", Help: "Histogram of the latency for remote read requests. Note that for streamed responses this is only the duration of the initial call and does not include the processing of the stream.", Buckets: append(prometheus.DefBuckets, 25, 60), NativeHistogramBucketFactor: 1.1, diff --git a/storage/remote/read_handler.go b/storage/remote/read_handler.go index 8f2945f974..3e315a6157 100644 --- a/storage/remote/read_handler.go +++ b/storage/remote/read_handler.go @@ -56,10 +56,10 @@ func NewReadHandler(logger *slog.Logger, r prometheus.Registerer, queryable stor marshalPool: &sync.Pool{}, queries: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "prometheus", - Subsystem: "api", // TODO: changes to storage in Prometheus 3.0. - Name: "remote_read_queries", - Help: "The current number of remote read queries being executed or waiting.", + Namespace: namespace, + Subsystem: "remote_read_handler", + Name: "queries", + Help: "The current number of remote read queries that are either in execution or queued on the handler.", }), } if r != nil { From 713903fe48ff0ae5fbd45ffc958af86492266c2a Mon Sep 17 00:00:00 2001 From: Paulo Dias Date: Fri, 6 Dec 2024 17:58:21 +0000 Subject: [PATCH 1041/1397] fix: fix configuration and remove uneeded libs Signed-off-by: Paulo Dias --- discovery/openstack/loadbalancer.go | 2 -- docs/configuration/configuration.md | 6 +++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/discovery/openstack/loadbalancer.go b/discovery/openstack/loadbalancer.go index 5b72a7d538..7bdf1d2fdf 100644 --- a/discovery/openstack/loadbalancer.go +++ b/discovery/openstack/loadbalancer.go @@ -16,7 +16,6 @@ package openstack import ( "context" "fmt" - "log" "log/slog" "net" "strconv" @@ -120,7 +119,6 @@ func (i *LoadBalancerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Gro // Fetch all floating IPs with pagination fipPages, err := floatingips.List(networkClient, floatingips.ListOpts{}).AllPages() if err != nil { - log.Printf("Error calling OpenStack API: %v", err) return nil, fmt.Errorf("failed to list all fips: %w", err) } if err != nil { diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index da3af79600..3749681977 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1197,9 +1197,9 @@ The following meta labels are available on targets during [relabeling](#relabel_ #### `loadbalancer` -The `loadbalancer` role discovers one target per `PROMETHEUS` listener of -Octavia loa dbalancer. The target address defaults to the VIP address of the -load balancer. +The `loadbalancer` role discovers one target per Octavia loadbalancer with a +`PROMETHEUS` listener. The target address defaults to the VIP address +of the load balancer. The following meta labels are available on targets during [relabeling](#relabel_config): From a046417bc0fd9f741e42d785392a19d8b86547c7 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Fri, 6 Dec 2024 13:46:20 -0800 Subject: [PATCH 1042/1397] Use new record type only for NHCB --- tsdb/agent/db.go | 40 +++++++--- tsdb/agent/db_test.go | 11 ++- tsdb/db_test.go | 148 ++++++++++++++++++++++------------- tsdb/head_append.go | 63 ++++++++++++--- tsdb/head_test.go | 4 +- tsdb/head_wal.go | 8 +- tsdb/record/record.go | 103 ++++++++++++++++++++---- tsdb/record/record_test.go | 34 ++++++-- tsdb/testutil.go | 18 ++--- tsdb/wlog/checkpoint.go | 39 ++++++++- tsdb/wlog/checkpoint_test.go | 12 +-- tsdb/wlog/watcher.go | 4 +- tsdb/wlog/watcher_test.go | 8 +- 13 files changed, 357 insertions(+), 135 deletions(-) diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 5cf56d5871..0bcef8e7bc 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -463,7 +463,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H return } decoded <- samples - case record.HistogramSamples, record.HistogramSamplesLegacy: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: histograms := histogramsPool.Get()[:0] histograms, err = dec.HistogramSamples(rec, histograms) if err != nil { @@ -475,7 +475,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H return } decoded <- histograms - case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: floatHistograms := floatHistogramsPool.Get()[:0] floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms) if err != nil { @@ -1154,19 +1154,39 @@ func (a *appender) log() error { } if len(a.pendingHistograms) > 0 { - buf = encoder.HistogramSamples(a.pendingHistograms, buf) - if err := a.wal.Log(buf); err != nil { - return err + var customBucketsHistograms []record.RefHistogramSample + buf, customBucketsHistograms = encoder.HistogramSamples(a.pendingHistograms, buf) + if len(buf) > 0 { + if err := a.wal.Log(buf); err != nil { + return err + } + buf = buf[:0] + } + if len(customBucketsHistograms) > 0 { + buf = encoder.CustomBucketsHistogramSamples(customBucketsHistograms, nil) + if err := a.wal.Log(buf); err != nil { + return err + } + buf = buf[:0] } - buf = buf[:0] } if len(a.pendingFloatHistograms) > 0 { - buf = encoder.FloatHistogramSamples(a.pendingFloatHistograms, buf) - if err := a.wal.Log(buf); err != nil { - return err + var customBucketsFloatHistograms []record.RefFloatHistogramSample + buf, customBucketsFloatHistograms = encoder.FloatHistogramSamples(a.pendingFloatHistograms, buf) + if len(buf) > 0 { + if err := a.wal.Log(buf); err != nil { + return err + } + buf = buf[:0] + } + if len(customBucketsFloatHistograms) > 0 { + buf = encoder.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, nil) + if err := a.wal.Log(buf); err != nil { + return err + } + buf = buf[:0] } - buf = buf[:0] } if len(a.pendingExamplars) > 0 { diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index cc5e0d34cb..c81c63f739 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -217,8 +217,7 @@ func TestCommit(t *testing.T) { ) for r.Next() { rec := r.Record() - recType := dec.Type(rec) - switch recType { + switch dec.Type(rec) { case record.Series: var series []record.RefSeries series, err = dec.Series(rec, series) @@ -231,13 +230,13 @@ func TestCommit(t *testing.T) { require.NoError(t, err) walSamplesCount += len(samples) - case record.HistogramSamples, record.HistogramSamplesLegacy: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: var histograms []record.RefHistogramSample histograms, err = dec.HistogramSamples(rec, histograms) require.NoError(t, err) walHistogramCount += len(histograms) - case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: var floatHistograms []record.RefFloatHistogramSample floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms) require.NoError(t, err) @@ -370,13 +369,13 @@ func TestRollback(t *testing.T) { require.NoError(t, err) walExemplarsCount += len(exemplars) - case record.HistogramSamples, record.HistogramSamplesLegacy: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: var histograms []record.RefHistogramSample histograms, err = dec.HistogramSamples(rec, histograms) require.NoError(t, err) walHistogramCount += len(histograms) - case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: var floatHistograms []record.RefFloatHistogramSample floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms) require.NoError(t, err) diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 4bbf4b4656..5024a0cfbb 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -4556,11 +4556,11 @@ func testOOOWALWrite(t *testing.T, markers, err := dec.MmapMarkers(rec, nil) require.NoError(t, err) records = append(records, markers) - case record.HistogramSamples, record.HistogramSamplesLegacy: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: histogramSamples, err := dec.HistogramSamples(rec, nil) require.NoError(t, err) records = append(records, histogramSamples) - case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: floatHistogramSamples, err := dec.FloatHistogramSamples(rec, nil) require.NoError(t, err) records = append(records, floatHistogramSamples) @@ -6461,6 +6461,32 @@ func testOOOInterleavedImplicitCounterResets(t *testing.T, name string, scenario _, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh) return err } + case customBucketsIntHistogram: + appendFunc = func(app storage.Appender, ts, v int64) error { + h := &histogram.Histogram{ + Schema: -53, + Count: uint64(v), + Sum: float64(v), + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{v}, + CustomValues: []float64{float64(v)}, + } + _, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) + return err + } + case customBucketsFloatHistogram: + appendFunc = func(app storage.Appender, ts, v int64) error { + fh := &histogram.FloatHistogram{ + Schema: -53, + Count: float64(v), + Sum: float64(v), + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []float64{float64(v)}, + CustomValues: []float64{float64(v)}, + } + _, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh) + return err + } case gaugeIntHistogram, gaugeFloatHistogram: return } @@ -6491,29 +6517,29 @@ func testOOOInterleavedImplicitCounterResets(t *testing.T, name string, scenario // The expected counter reset hint for each chunk. expectedChunks []expectedChunk }{ - "counter reset in-order cleared by in-memory OOO chunk": { - samples: []tsValue{ - {1, 40}, // New in In-order. I1. - {4, 30}, // In-order counter reset. I2. - {2, 40}, // New in OOO. O1. - {3, 10}, // OOO counter reset. O2. - }, - oooCap: 30, - // Expect all to be set to UnknownCounterReset because we switch between - // in-order and out-of-order samples. - expectedSamples: []expectedTsValue{ - {1, 40, histogram.UnknownCounterReset}, // I1. - {2, 40, histogram.UnknownCounterReset}, // O1. - {3, 10, histogram.UnknownCounterReset}, // O2. - {4, 30, histogram.UnknownCounterReset}, // I2. Counter reset cleared by iterator change. - }, - expectedChunks: []expectedChunk{ - {histogram.UnknownCounterReset, 1}, // I1. - {histogram.UnknownCounterReset, 1}, // O1. - {histogram.UnknownCounterReset, 1}, // O2. - {histogram.UnknownCounterReset, 1}, // I2. - }, - }, + //"counter reset in-order cleared by in-memory OOO chunk": { + // samples: []tsValue{ + // {1, 40}, // New in In-order. I1. + // {4, 30}, // In-order counter reset. I2. + // {2, 40}, // New in OOO. O1. + // {3, 10}, // OOO counter reset. O2. + // }, + // oooCap: 30, + // // Expect all to be set to UnknownCounterReset because we switch between + // // in-order and out-of-order samples. + // expectedSamples: []expectedTsValue{ + // {1, 40, histogram.UnknownCounterReset}, // I1. + // {2, 40, histogram.UnknownCounterReset}, // O1. + // {3, 10, histogram.UnknownCounterReset}, // O2. + // {4, 30, histogram.UnknownCounterReset}, // I2. Counter reset cleared by iterator change. + // }, + // expectedChunks: []expectedChunk{ + // {histogram.UnknownCounterReset, 1}, // I1. + // {histogram.UnknownCounterReset, 1}, // O1. + // {histogram.UnknownCounterReset, 1}, // O2. + // {histogram.UnknownCounterReset, 1}, // I2. + // }, + //}, "counter reset in OOO mmapped chunk cleared by in-memory ooo chunk": { samples: []tsValue{ {8, 30}, // In-order, new chunk. I1. @@ -6544,36 +6570,36 @@ func testOOOInterleavedImplicitCounterResets(t *testing.T, name string, scenario {histogram.UnknownCounterReset, 1}, // I1. }, }, - "counter reset in OOO mmapped chunk cleared by another OOO mmapped chunk": { - samples: []tsValue{ - {8, 100}, // In-order, new chunk. I1. - {1, 50}, // OOO, new chunk (will be mmapped). MO1. - {5, 40}, // OOO, reset (will be mmapped). MO2. - {6, 50}, // OOO, no reset (will be mmapped). MO2. - {2, 10}, // OOO, new chunk no reset (will be mmapped). MO3. - {3, 20}, // OOO, no reset (will be mmapped). MO3. - {4, 30}, // OOO, no reset (will be mmapped). MO3. - {7, 60}, // OOO, no reset in memory. O1. - }, - oooCap: 3, - expectedSamples: []expectedTsValue{ - {1, 50, histogram.UnknownCounterReset}, // MO1. - {2, 10, histogram.UnknownCounterReset}, // MO3. - {3, 20, histogram.NotCounterReset}, // MO3. - {4, 30, histogram.NotCounterReset}, // MO3. - {5, 40, histogram.UnknownCounterReset}, // MO2. - {6, 50, histogram.NotCounterReset}, // MO2. - {7, 60, histogram.UnknownCounterReset}, // O1. - {8, 100, histogram.UnknownCounterReset}, // I1. - }, - expectedChunks: []expectedChunk{ - {histogram.UnknownCounterReset, 1}, // MO1. - {histogram.UnknownCounterReset, 3}, // MO3. - {histogram.UnknownCounterReset, 2}, // MO2. - {histogram.UnknownCounterReset, 1}, // O1. - {histogram.UnknownCounterReset, 1}, // I1. - }, - }, + //"counter reset in OOO mmapped chunk cleared by another OOO mmapped chunk": { + // samples: []tsValue{ + // {8, 100}, // In-order, new chunk. I1. + // {1, 50}, // OOO, new chunk (will be mmapped). MO1. + // {5, 40}, // OOO, reset (will be mmapped). MO2. + // {6, 50}, // OOO, no reset (will be mmapped). MO2. + // {2, 10}, // OOO, new chunk no reset (will be mmapped). MO3. + // {3, 20}, // OOO, no reset (will be mmapped). MO3. + // {4, 30}, // OOO, no reset (will be mmapped). MO3. + // {7, 60}, // OOO, no reset in memory. O1. + // }, + // oooCap: 3, + // expectedSamples: []expectedTsValue{ + // {1, 50, histogram.UnknownCounterReset}, // MO1. + // {2, 10, histogram.UnknownCounterReset}, // MO3. + // {3, 20, histogram.NotCounterReset}, // MO3. + // {4, 30, histogram.NotCounterReset}, // MO3. + // {5, 40, histogram.UnknownCounterReset}, // MO2. + // {6, 50, histogram.NotCounterReset}, // MO2. + // {7, 60, histogram.UnknownCounterReset}, // O1. + // {8, 100, histogram.UnknownCounterReset}, // I1. + // }, + // expectedChunks: []expectedChunk{ + // {histogram.UnknownCounterReset, 1}, // MO1. + // {histogram.UnknownCounterReset, 3}, // MO3. + // {histogram.UnknownCounterReset, 2}, // MO2. + // {histogram.UnknownCounterReset, 1}, // O1. + // {histogram.UnknownCounterReset, 1}, // I1. + // }, + //}, } for tcName, tc := range cases { @@ -6617,6 +6643,12 @@ func testOOOInterleavedImplicitCounterResets(t *testing.T, name string, scenario case floatHistogram: require.Equal(t, tc.expectedSamples[i].hint, s.FH().CounterResetHint, "sample %d", i) require.Equal(t, tc.expectedSamples[i].v, int64(s.FH().Count), "sample %d", i) + case customBucketsIntHistogram: + require.Equal(t, tc.expectedSamples[i].hint, s.H().CounterResetHint, "sample %d", i) + require.Equal(t, tc.expectedSamples[i].v, int64(s.H().Count), "sample %d", i) + case customBucketsFloatHistogram: + require.Equal(t, tc.expectedSamples[i].hint, s.FH().CounterResetHint, "sample %d", i) + require.Equal(t, tc.expectedSamples[i].v, int64(s.FH().Count), "sample %d", i) default: t.Fatalf("unexpected sample type %s", name) } @@ -6648,6 +6680,12 @@ func testOOOInterleavedImplicitCounterResets(t *testing.T, name string, scenario case floatHistogram: require.Equal(t, expectHint, s.FH().CounterResetHint, "sample %d", idx) require.Equal(t, tc.expectedSamples[idx].v, int64(s.FH().Count), "sample %d", idx) + case customBucketsIntHistogram: + require.Equal(t, expectHint, s.H().CounterResetHint, "sample %d", idx) + require.Equal(t, tc.expectedSamples[idx].v, int64(s.H().Count), "sample %d", idx) + case customBucketsFloatHistogram: + require.Equal(t, expectHint, s.FH().CounterResetHint, "sample %d", idx) + require.Equal(t, tc.expectedSamples[idx].v, int64(s.FH().Count), "sample %d", idx) default: t.Fatalf("unexpected sample type %s", name) } diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 1cac44e160..c94c42bc53 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -943,17 +943,37 @@ func (a *headAppender) log() error { } } if len(a.histograms) > 0 { - rec = enc.HistogramSamples(a.histograms, buf) + var customBucketsHistograms []record.RefHistogramSample + rec, customBucketsHistograms = enc.HistogramSamples(a.histograms, buf) buf = rec[:0] - if err := a.head.wal.Log(rec); err != nil { - return fmt.Errorf("log histograms: %w", err) + if len(rec) > 0 { + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log histograms: %w", err) + } + } + + if len(customBucketsHistograms) > 0 { + rec = enc.CustomBucketsHistogramSamples(customBucketsHistograms, buf) + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log custom buckets histograms: %w", err) + } } } if len(a.floatHistograms) > 0 { - rec = enc.FloatHistogramSamples(a.floatHistograms, buf) + var customBucketsFloatHistograms []record.RefFloatHistogramSample + rec, customBucketsFloatHistograms = enc.FloatHistogramSamples(a.floatHistograms, buf) buf = rec[:0] - if err := a.head.wal.Log(rec); err != nil { - return fmt.Errorf("log float histograms: %w", err) + if len(rec) > 0 { + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log float histograms: %w", err) + } + } + + if len(customBucketsFloatHistograms) > 0 { + rec = enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, buf) + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log custom buckets float histograms: %w", err) + } } } // Exemplars should be logged after samples (float/native histogram/etc), @@ -1070,12 +1090,24 @@ func (acc *appenderCommitContext) collectOOORecords(a *headAppender) { acc.oooRecords = append(acc.oooRecords, r) } if len(acc.wblHistograms) > 0 { - r := acc.enc.HistogramSamples(acc.wblHistograms, a.head.getBytesBuffer()) - acc.oooRecords = append(acc.oooRecords, r) + r, customBucketsHistograms := acc.enc.HistogramSamples(acc.wblHistograms, a.head.getBytesBuffer()) + if len(r) > 0 { + acc.oooRecords = append(acc.oooRecords, r) + } + if len(customBucketsHistograms) > 0 { + r := acc.enc.CustomBucketsHistogramSamples(customBucketsHistograms, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } } if len(acc.wblFloatHistograms) > 0 { - r := acc.enc.FloatHistogramSamples(acc.wblFloatHistograms, a.head.getBytesBuffer()) - acc.oooRecords = append(acc.oooRecords, r) + r, customBucketsFloatHistograms := acc.enc.FloatHistogramSamples(acc.wblFloatHistograms, a.head.getBytesBuffer()) + if len(r) > 0 { + acc.oooRecords = append(acc.oooRecords, r) + } + if len(customBucketsFloatHistograms) > 0 { + r := acc.enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } } acc.wblSamples = nil @@ -1459,6 +1491,17 @@ func (a *headAppender) Commit() (err error) { a.commitFloatHistograms(acc) a.commitMetadata() + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected)) + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected)) + a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOBRejected)) + a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatTooOldRejected)) + a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatsAppended)) + a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histogramsAppended)) + a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.oooFloatsAccepted)) + a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.oooHistogramAccepted)) + a.head.updateMinMaxTime(acc.inOrderMint, acc.inOrderMaxt) + a.head.updateMinOOOMaxOOOTime(acc.oooMinT, acc.oooMaxT) + acc.collectOOORecords(a) if a.head.wbl != nil { if err := a.head.wbl.Log(acc.oooRecords...); err != nil { diff --git a/tsdb/head_test.go b/tsdb/head_test.go index c3377fecff..b77da3e0a4 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -187,11 +187,11 @@ func readTestWAL(t testing.TB, dir string) (recs []interface{}) { samples, err := dec.Samples(rec, nil) require.NoError(t, err) recs = append(recs, samples) - case record.HistogramSamples, record.HistogramSamplesLegacy: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: samples, err := dec.HistogramSamples(rec, nil) require.NoError(t, err) recs = append(recs, samples) - case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: samples, err := dec.FloatHistogramSamples(rec, nil) require.NoError(t, err) recs = append(recs, samples) diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index d71dc9d33d..e9557c59f6 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -187,7 +187,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decoded <- exemplars - case record.HistogramSamples, record.HistogramSamplesLegacy: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: hists := histogramsPool.Get()[:0] hists, err = dec.HistogramSamples(rec, hists) if err != nil { @@ -199,7 +199,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decoded <- hists - case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: hists := floatHistogramsPool.Get()[:0] hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { @@ -723,7 +723,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decodedCh <- markers - case record.HistogramSamples, record.HistogramSamplesLegacy: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: hists := histogramSamplesPool.Get()[:0] hists, err = dec.HistogramSamples(rec, hists) if err != nil { @@ -735,7 +735,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decodedCh <- hists - case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: hists := floatHistogramSamplesPool.Get()[:0] hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 0707ed54fe..ccfbbfcef9 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -48,14 +48,14 @@ const ( MmapMarkers Type = 5 // Metadata is used to match WAL records of type Metadata. Metadata Type = 6 - // HistogramSamplesLegacy is used to match WAL records of type Histograms prior to introducing support of custom buckets, for backwards compatibility. - HistogramSamplesLegacy Type = 7 - // FloatHistogramSamplesLegacy is used to match WAL records of type Float Histograms prior to introducing support of custom buckets, for backwards compatibility. - FloatHistogramSamplesLegacy Type = 8 - // HistogramSamples is used to match WAL records of type Histogram, and supports custom buckets. - HistogramSamples Type = 9 - // FloatHistogramSamples is used to match WAL records of type Float Histogram, and supports custom buckets. - FloatHistogramSamples Type = 10 + // HistogramSamples is used to match WAL records of type Histograms. + HistogramSamples Type = 7 + // FloatHistogramSamples is used to match WAL records of type Float Histograms. + FloatHistogramSamples Type = 8 + // CustomBucketsHistogramSamples is used to match WAL records of type Histogram with custom buckets. + CustomBucketsHistogramSamples Type = 9 + // CustomBucketsFloatHistogramSamples is used to match WAL records of type Float Histogram with custom buckets. + CustomBucketsFloatHistogramSamples Type = 10 ) func (rt Type) String() string { @@ -68,14 +68,14 @@ func (rt Type) String() string { return "tombstones" case Exemplars: return "exemplars" - case HistogramSamplesLegacy: - return "histogram_samples_legacy" - case FloatHistogramSamplesLegacy: - return "float_histogram_samples_legacy" case HistogramSamples: return "histogram_samples" case FloatHistogramSamples: return "float_histogram_samples" + case CustomBucketsHistogramSamples: + return "custom_buckets_histogram_samples" + case CustomBucketsFloatHistogramSamples: + return "custom_buckets_float_histogram_samples" case MmapMarkers: return "mmapmarkers" case Metadata: @@ -215,7 +215,7 @@ func (d *Decoder) Type(rec []byte) Type { return Unknown } switch t := Type(rec[0]); t { - case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamplesLegacy, FloatHistogramSamplesLegacy, HistogramSamples, FloatHistogramSamples: + case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples, CustomBucketsHistogramSamples, CustomBucketsFloatHistogramSamples: return t } return Unknown @@ -436,7 +436,7 @@ func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMar func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) { dec := encoding.Decbuf{B: rec} t := Type(dec.Byte()) - if t != HistogramSamples && t != HistogramSamplesLegacy { + if t != HistogramSamples && t != CustomBucketsHistogramSamples { return nil, errors.New("invalid record type") } if dec.Len() == 0 { @@ -528,7 +528,7 @@ func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) { func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) { dec := encoding.Decbuf{B: rec} t := Type(dec.Byte()) - if t != FloatHistogramSamples && t != FloatHistogramSamplesLegacy { + if t != FloatHistogramSamples && t != CustomBucketsFloatHistogramSamples { return nil, errors.New("invalid record type") } if dec.Len() == 0 { @@ -744,10 +744,44 @@ func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte { return buf.Get() } -func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) []byte { +func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([]byte, []RefHistogramSample) { buf := encoding.Encbuf{B: b} buf.PutByte(byte(HistogramSamples)) + if len(histograms) == 0 { + return buf.Get(), nil + } + var customBucketHistograms []RefHistogramSample + + // Store base timestamp and base reference number of first histogram. + // All histograms encode their timestamp and ref as delta to those. + first := histograms[0] + buf.PutBE64(uint64(first.Ref)) + buf.PutBE64int64(first.T) + + for _, h := range histograms { + if h.H.UsesCustomBuckets() { + customBucketHistograms = append(customBucketHistograms, h) + continue + } + buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + buf.PutVarint64(h.T - first.T) + + EncodeHistogram(&buf, h.H) + } + + // Reset buffer if only custom bucket histograms existed in list of histogram samples + if len(histograms) == len(customBucketHistograms) { + buf.Reset() + } + + return buf.Get(), customBucketHistograms +} + +func (e *Encoder) CustomBucketsHistogramSamples(histograms []RefHistogramSample, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(CustomBucketsHistogramSamples)) + if len(histograms) == 0 { return buf.Get() } @@ -809,10 +843,45 @@ func EncodeHistogram(buf *encoding.Encbuf, h *histogram.Histogram) { } } -func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { +func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) ([]byte, []RefFloatHistogramSample) { buf := encoding.Encbuf{B: b} buf.PutByte(byte(FloatHistogramSamples)) + if len(histograms) == 0 { + return buf.Get(), nil + } + + var customBucketsFloatHistograms []RefFloatHistogramSample + + // Store base timestamp and base reference number of first histogram. + // All histograms encode their timestamp and ref as delta to those. + first := histograms[0] + buf.PutBE64(uint64(first.Ref)) + buf.PutBE64int64(first.T) + + for _, h := range histograms { + if h.FH.UsesCustomBuckets() { + customBucketsFloatHistograms = append(customBucketsFloatHistograms, h) + continue + } + buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + buf.PutVarint64(h.T - first.T) + + EncodeFloatHistogram(&buf, h.FH) + } + + // Reset buffer if only custom bucket histograms existed in list of histogram samples + if len(histograms) == len(customBucketsFloatHistograms) { + buf.Reset() + } + + return buf.Get(), customBucketsFloatHistograms +} + +func (e *Encoder) CustomBucketsFloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(CustomBucketsFloatHistogramSamples)) + if len(histograms) == 0 { return buf.Get() } diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index 901fe2e9f6..030b7e2bc7 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -166,9 +166,13 @@ func TestRecord_EncodeDecode(t *testing.T) { }, } - histSamples := enc.HistogramSamples(histograms, nil) + histSamples, customBucketsHistograms := enc.HistogramSamples(histograms, nil) + customBucketsHistSamples := enc.CustomBucketsHistogramSamples(customBucketsHistograms, nil) decHistograms, err := dec.HistogramSamples(histSamples, nil) require.NoError(t, err) + decCustomBucketsHistograms, err := dec.HistogramSamples(customBucketsHistSamples, nil) + require.NoError(t, err) + decHistograms = append(decHistograms, decCustomBucketsHistograms...) require.Equal(t, histograms, decHistograms) floatHistograms := make([]RefFloatHistogramSample, len(histograms)) @@ -179,9 +183,13 @@ func TestRecord_EncodeDecode(t *testing.T) { FH: h.H.ToFloat(nil), } } - floatHistSamples := enc.FloatHistogramSamples(floatHistograms, nil) + floatHistSamples, customBucketsFloatHistograms := enc.FloatHistogramSamples(floatHistograms, nil) + customBucketsFloatHistSamples := enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, nil) decFloatHistograms, err := dec.FloatHistogramSamples(floatHistSamples, nil) require.NoError(t, err) + decCustomBucketsFloatHistograms, err := dec.FloatHistogramSamples(customBucketsFloatHistSamples, nil) + require.NoError(t, err) + decFloatHistograms = append(decFloatHistograms, decCustomBucketsFloatHistograms...) require.Equal(t, floatHistograms, decFloatHistograms) // Gauge integer histograms. @@ -189,9 +197,13 @@ func TestRecord_EncodeDecode(t *testing.T) { histograms[i].H.CounterResetHint = histogram.GaugeType } - gaugeHistSamples := enc.HistogramSamples(histograms, nil) + gaugeHistSamples, customBucketsGaugeHistograms := enc.HistogramSamples(histograms, nil) + customBucketsGaugeHistSamples := enc.CustomBucketsHistogramSamples(customBucketsGaugeHistograms, nil) decGaugeHistograms, err := dec.HistogramSamples(gaugeHistSamples, nil) require.NoError(t, err) + decCustomBucketsGaugeHistograms, err := dec.HistogramSamples(customBucketsGaugeHistSamples, nil) + require.NoError(t, err) + decGaugeHistograms = append(decGaugeHistograms, decCustomBucketsGaugeHistograms...) require.Equal(t, histograms, decGaugeHistograms) // Gauge float histograms. @@ -199,9 +211,12 @@ func TestRecord_EncodeDecode(t *testing.T) { floatHistograms[i].FH.CounterResetHint = histogram.GaugeType } - gaugeFloatHistSamples := enc.FloatHistogramSamples(floatHistograms, nil) + gaugeFloatHistSamples, customBucketsGaugeFloatHistograms := enc.FloatHistogramSamples(floatHistograms, nil) + customBucketsGaugeFloatHistSamples := enc.CustomBucketsFloatHistogramSamples(customBucketsGaugeFloatHistograms, nil) decGaugeFloatHistograms, err := dec.FloatHistogramSamples(gaugeFloatHistSamples, nil) require.NoError(t, err) + decCustomBucketsGaugeFloatHistograms, err := dec.FloatHistogramSamples(customBucketsGaugeFloatHistSamples, nil) + decGaugeFloatHistograms = append(decGaugeFloatHistograms, decCustomBucketsGaugeFloatHistograms...) require.Equal(t, floatHistograms, decGaugeFloatHistograms) } @@ -303,10 +318,14 @@ func TestRecord_Corrupted(t *testing.T) { }, } - corruptedHists := enc.HistogramSamples(histograms, nil) + corruptedHists, customBucketsHists := enc.HistogramSamples(histograms, nil) corruptedHists = corruptedHists[:8] + corruptedCustomBucketsHists := enc.CustomBucketsHistogramSamples(customBucketsHists, nil) + corruptedCustomBucketsHists = corruptedCustomBucketsHists[:8] _, err := dec.HistogramSamples(corruptedHists, nil) require.ErrorIs(t, err, encoding.ErrInvalidSize) + _, err = dec.HistogramSamples(corruptedCustomBucketsHists, nil) + require.ErrorIs(t, err, encoding.ErrInvalidSize) }) } @@ -364,9 +383,12 @@ func TestRecord_Type(t *testing.T) { }, }, } - hists := enc.HistogramSamples(histograms, nil) + hists, customBucketsHistograms := enc.HistogramSamples(histograms, nil) recordType = dec.Type(hists) require.Equal(t, HistogramSamples, recordType) + customBucketsHists := enc.CustomBucketsHistogramSamples(customBucketsHistograms, nil) + recordType = dec.Type(customBucketsHists) + require.Equal(t, CustomBucketsHistogramSamples, recordType) recordType = dec.Type(nil) require.Equal(t, Unknown, recordType) diff --git a/tsdb/testutil.go b/tsdb/testutil.go index a13d89186e..ccfee182c6 100644 --- a/tsdb/testutil.go +++ b/tsdb/testutil.go @@ -29,13 +29,13 @@ import ( ) const ( - float = "float" - intHistogram = "integer histogram" - floatHistogram = "float histogram" - customBucketIntHistogram = "custom bucket int histogram" - customBucketFloatHistogram = "custom bucket float histogram" - gaugeIntHistogram = "gauge int histogram" - gaugeFloatHistogram = "gauge float histogram" + float = "float" + intHistogram = "integer histogram" + floatHistogram = "float histogram" + customBucketsIntHistogram = "custom buckets int histogram" + customBucketsFloatHistogram = "custom buckets float histogram" + gaugeIntHistogram = "gauge int histogram" + gaugeFloatHistogram = "gauge float histogram" ) type testValue struct { @@ -84,7 +84,7 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{ return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))} }, }, - customBucketIntHistogram: { + customBucketsIntHistogram: { sampleType: sampleMetricTypeHistogram, appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { s := sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(int(value))} @@ -95,7 +95,7 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{ return sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(int(value))} }, }, - customBucketFloatHistogram: { + customBucketsFloatHistogram: { sampleType: sampleMetricTypeHistogram, appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { s := sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(int(value))} diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index ffb96dbe22..63a7737b3a 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -208,7 +208,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He stats.TotalSamples += len(samples) stats.DroppedSamples += len(samples) - len(repl) - case record.HistogramSamples, record.HistogramSamplesLegacy: + case record.HistogramSamples: histogramSamples, err = dec.HistogramSamples(rec, histogramSamples) if err != nil { return nil, fmt.Errorf("decode histogram samples: %w", err) @@ -221,11 +221,25 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } } if len(repl) > 0 { - buf = enc.HistogramSamples(repl, buf) + buf, _ = enc.HistogramSamples(repl, buf) } stats.TotalSamples += len(histogramSamples) stats.DroppedSamples += len(histogramSamples) - len(repl) - case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: + case record.CustomBucketsHistogramSamples: + histogramSamples, err = dec.HistogramSamples(rec, histogramSamples) + // Drop irrelevant histogramSamples in place. + repl := histogramSamples[:0] + for _, h := range histogramSamples { + if h.T >= mint { + repl = append(repl, h) + } + } + if len(repl) > 0 { + buf = enc.CustomBucketsHistogramSamples(repl, buf) + } + stats.TotalSamples += len(histogramSamples) + stats.DroppedSamples += len(histogramSamples) - len(repl) + case record.FloatHistogramSamples: floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples) if err != nil { return nil, fmt.Errorf("decode float histogram samples: %w", err) @@ -238,7 +252,24 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } } if len(repl) > 0 { - buf = enc.FloatHistogramSamples(repl, buf) + buf, _ = enc.FloatHistogramSamples(repl, buf) + } + stats.TotalSamples += len(floatHistogramSamples) + stats.DroppedSamples += len(floatHistogramSamples) - len(repl) + case record.CustomBucketsFloatHistogramSamples: + floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples) + if err != nil { + return nil, fmt.Errorf("decode float histogram samples: %w", err) + } + // Drop irrelevant floatHistogramSamples in place. + repl := floatHistogramSamples[:0] + for _, fh := range floatHistogramSamples { + if fh.T >= mint { + repl = append(repl, fh) + } + } + if len(repl) > 0 { + buf = enc.CustomBucketsFloatHistogramSamples(repl, buf) } stats.TotalSamples += len(floatHistogramSamples) stats.DroppedSamples += len(floatHistogramSamples) - len(repl) diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go index b2c603f134..873513c4ec 100644 --- a/tsdb/wlog/checkpoint_test.go +++ b/tsdb/wlog/checkpoint_test.go @@ -236,7 +236,7 @@ func TestCheckpoint(t *testing.T) { require.NoError(t, w.Log(b)) samplesInWAL += 4 h := makeHistogram(i) - b = enc.HistogramSamples([]record.RefHistogramSample{ + b, _ = enc.HistogramSamples([]record.RefHistogramSample{ {Ref: 0, T: last, H: h}, {Ref: 1, T: last + 10000, H: h}, {Ref: 2, T: last + 20000, H: h}, @@ -245,7 +245,7 @@ func TestCheckpoint(t *testing.T) { require.NoError(t, w.Log(b)) histogramsInWAL += 4 cbh := makeCustomBucketHistogram(i) - b = enc.HistogramSamples([]record.RefHistogramSample{ + b = enc.CustomBucketsHistogramSamples([]record.RefHistogramSample{ {Ref: 0, T: last, H: cbh}, {Ref: 1, T: last + 10000, H: cbh}, {Ref: 2, T: last + 20000, H: cbh}, @@ -254,7 +254,7 @@ func TestCheckpoint(t *testing.T) { require.NoError(t, w.Log(b)) histogramsInWAL += 4 fh := makeFloatHistogram(i) - b = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{ + b, _ = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{ {Ref: 0, T: last, FH: fh}, {Ref: 1, T: last + 10000, FH: fh}, {Ref: 2, T: last + 20000, FH: fh}, @@ -263,7 +263,7 @@ func TestCheckpoint(t *testing.T) { require.NoError(t, w.Log(b)) floatHistogramsInWAL += 4 cbfh := makeCustomBucketFloatHistogram(i) - b = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{ + b = enc.CustomBucketsFloatHistogramSamples([]record.RefFloatHistogramSample{ {Ref: 0, T: last, FH: cbfh}, {Ref: 1, T: last + 10000, FH: cbfh}, {Ref: 2, T: last + 20000, FH: cbfh}, @@ -330,14 +330,14 @@ func TestCheckpoint(t *testing.T) { require.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp") } samplesInCheckpoint += len(samples) - case record.HistogramSamples, record.HistogramSamplesLegacy: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: histograms, err := dec.HistogramSamples(rec, nil) require.NoError(t, err) for _, h := range histograms { require.GreaterOrEqual(t, h.T, last/2, "histogram with wrong timestamp") } histogramsInCheckpoint += len(histograms) - case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: floatHistograms, err := dec.FloatHistogramSamples(rec, nil) require.NoError(t, err) for _, h := range floatHistograms { diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index 07f881eeaf..6f1bc1df35 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -546,7 +546,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { } w.writer.AppendExemplars(exemplars) - case record.HistogramSamples, record.HistogramSamplesLegacy: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: // Skip if experimental "histograms over remote write" is not enabled. if !w.sendHistograms { break @@ -574,7 +574,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { histogramsToSend = histogramsToSend[:0] } - case record.FloatHistogramSamples, record.FloatHistogramSamplesLegacy: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: // Skip if experimental "histograms over remote write" is not enabled. if !w.sendHistograms { break diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index 21490154d9..a793c90a95 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -209,7 +209,7 @@ func TestTailSamples(t *testing.T) { NegativeBuckets: []int64{int64(-i) - 1}, } - histograms := enc.HistogramSamples([]record.RefHistogramSample{{ + histograms, _ := enc.HistogramSamples([]record.RefHistogramSample{{ Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, H: hist, @@ -226,21 +226,21 @@ func TestTailSamples(t *testing.T) { CustomValues: []float64{float64(i) + 2}, } - customBucketHistograms := enc.HistogramSamples([]record.RefHistogramSample{{ + customBucketHistograms := enc.CustomBucketsHistogramSamples([]record.RefHistogramSample{{ Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, H: customBucketHist, }}, nil) require.NoError(t, w.Log(customBucketHistograms)) - floatHistograms := enc.FloatHistogramSamples([]record.RefFloatHistogramSample{{ + floatHistograms, _ := enc.FloatHistogramSamples([]record.RefFloatHistogramSample{{ Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, FH: hist.ToFloat(nil), }}, nil) require.NoError(t, w.Log(floatHistograms)) - customBucketFloatHistograms := enc.FloatHistogramSamples([]record.RefFloatHistogramSample{{ + customBucketFloatHistograms := enc.CustomBucketsFloatHistogramSamples([]record.RefFloatHistogramSample{{ Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, FH: customBucketHist.ToFloat(nil), From 1933ccc9be1fcd9ca14bfa653733dde21a59af40 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Fri, 6 Dec 2024 14:55:19 -0800 Subject: [PATCH 1043/1397] Fix test --- tsdb/db_test.go | 110 ++++++++++++++++++++++++------------------------ 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 5024a0cfbb..e67f4821a4 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -6469,7 +6469,7 @@ func testOOOInterleavedImplicitCounterResets(t *testing.T, name string, scenario Sum: float64(v), PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, PositiveBuckets: []int64{v}, - CustomValues: []float64{float64(v)}, + CustomValues: []float64{float64(1), float64(2), float64(3)}, } _, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) return err @@ -6482,7 +6482,7 @@ func testOOOInterleavedImplicitCounterResets(t *testing.T, name string, scenario Sum: float64(v), PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, PositiveBuckets: []float64{float64(v)}, - CustomValues: []float64{float64(v)}, + CustomValues: []float64{float64(1), float64(2), float64(3)}, } _, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh) return err @@ -6517,29 +6517,29 @@ func testOOOInterleavedImplicitCounterResets(t *testing.T, name string, scenario // The expected counter reset hint for each chunk. expectedChunks []expectedChunk }{ - //"counter reset in-order cleared by in-memory OOO chunk": { - // samples: []tsValue{ - // {1, 40}, // New in In-order. I1. - // {4, 30}, // In-order counter reset. I2. - // {2, 40}, // New in OOO. O1. - // {3, 10}, // OOO counter reset. O2. - // }, - // oooCap: 30, - // // Expect all to be set to UnknownCounterReset because we switch between - // // in-order and out-of-order samples. - // expectedSamples: []expectedTsValue{ - // {1, 40, histogram.UnknownCounterReset}, // I1. - // {2, 40, histogram.UnknownCounterReset}, // O1. - // {3, 10, histogram.UnknownCounterReset}, // O2. - // {4, 30, histogram.UnknownCounterReset}, // I2. Counter reset cleared by iterator change. - // }, - // expectedChunks: []expectedChunk{ - // {histogram.UnknownCounterReset, 1}, // I1. - // {histogram.UnknownCounterReset, 1}, // O1. - // {histogram.UnknownCounterReset, 1}, // O2. - // {histogram.UnknownCounterReset, 1}, // I2. - // }, - //}, + "counter reset in-order cleared by in-memory OOO chunk": { + samples: []tsValue{ + {1, 40}, // New in In-order. I1. + {4, 30}, // In-order counter reset. I2. + {2, 40}, // New in OOO. O1. + {3, 10}, // OOO counter reset. O2. + }, + oooCap: 30, + // Expect all to be set to UnknownCounterReset because we switch between + // in-order and out-of-order samples. + expectedSamples: []expectedTsValue{ + {1, 40, histogram.UnknownCounterReset}, // I1. + {2, 40, histogram.UnknownCounterReset}, // O1. + {3, 10, histogram.UnknownCounterReset}, // O2. + {4, 30, histogram.UnknownCounterReset}, // I2. Counter reset cleared by iterator change. + }, + expectedChunks: []expectedChunk{ + {histogram.UnknownCounterReset, 1}, // I1. + {histogram.UnknownCounterReset, 1}, // O1. + {histogram.UnknownCounterReset, 1}, // O2. + {histogram.UnknownCounterReset, 1}, // I2. + }, + }, "counter reset in OOO mmapped chunk cleared by in-memory ooo chunk": { samples: []tsValue{ {8, 30}, // In-order, new chunk. I1. @@ -6570,36 +6570,36 @@ func testOOOInterleavedImplicitCounterResets(t *testing.T, name string, scenario {histogram.UnknownCounterReset, 1}, // I1. }, }, - //"counter reset in OOO mmapped chunk cleared by another OOO mmapped chunk": { - // samples: []tsValue{ - // {8, 100}, // In-order, new chunk. I1. - // {1, 50}, // OOO, new chunk (will be mmapped). MO1. - // {5, 40}, // OOO, reset (will be mmapped). MO2. - // {6, 50}, // OOO, no reset (will be mmapped). MO2. - // {2, 10}, // OOO, new chunk no reset (will be mmapped). MO3. - // {3, 20}, // OOO, no reset (will be mmapped). MO3. - // {4, 30}, // OOO, no reset (will be mmapped). MO3. - // {7, 60}, // OOO, no reset in memory. O1. - // }, - // oooCap: 3, - // expectedSamples: []expectedTsValue{ - // {1, 50, histogram.UnknownCounterReset}, // MO1. - // {2, 10, histogram.UnknownCounterReset}, // MO3. - // {3, 20, histogram.NotCounterReset}, // MO3. - // {4, 30, histogram.NotCounterReset}, // MO3. - // {5, 40, histogram.UnknownCounterReset}, // MO2. - // {6, 50, histogram.NotCounterReset}, // MO2. - // {7, 60, histogram.UnknownCounterReset}, // O1. - // {8, 100, histogram.UnknownCounterReset}, // I1. - // }, - // expectedChunks: []expectedChunk{ - // {histogram.UnknownCounterReset, 1}, // MO1. - // {histogram.UnknownCounterReset, 3}, // MO3. - // {histogram.UnknownCounterReset, 2}, // MO2. - // {histogram.UnknownCounterReset, 1}, // O1. - // {histogram.UnknownCounterReset, 1}, // I1. - // }, - //}, + "counter reset in OOO mmapped chunk cleared by another OOO mmapped chunk": { + samples: []tsValue{ + {8, 100}, // In-order, new chunk. I1. + {1, 50}, // OOO, new chunk (will be mmapped). MO1. + {5, 40}, // OOO, reset (will be mmapped). MO2. + {6, 50}, // OOO, no reset (will be mmapped). MO2. + {2, 10}, // OOO, new chunk no reset (will be mmapped). MO3. + {3, 20}, // OOO, no reset (will be mmapped). MO3. + {4, 30}, // OOO, no reset (will be mmapped). MO3. + {7, 60}, // OOO, no reset in memory. O1. + }, + oooCap: 3, + expectedSamples: []expectedTsValue{ + {1, 50, histogram.UnknownCounterReset}, // MO1. + {2, 10, histogram.UnknownCounterReset}, // MO3. + {3, 20, histogram.NotCounterReset}, // MO3. + {4, 30, histogram.NotCounterReset}, // MO3. + {5, 40, histogram.UnknownCounterReset}, // MO2. + {6, 50, histogram.NotCounterReset}, // MO2. + {7, 60, histogram.UnknownCounterReset}, // O1. + {8, 100, histogram.UnknownCounterReset}, // I1. + }, + expectedChunks: []expectedChunk{ + {histogram.UnknownCounterReset, 1}, // MO1. + {histogram.UnknownCounterReset, 3}, // MO3. + {histogram.UnknownCounterReset, 2}, // MO2. + {histogram.UnknownCounterReset, 1}, // O1. + {histogram.UnknownCounterReset, 1}, // I1. + }, + }, } for tcName, tc := range cases { From 5479fb3cfd92f5f34e71b465c5caed5439ca7bc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Mon, 9 Dec 2024 08:17:32 +0200 Subject: [PATCH 1044/1397] scrape: update test after review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Giedrius Statkevičius --- scrape/scrape_test.go | 29 +++++++---------------------- 1 file changed, 7 insertions(+), 22 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index e8494e2db1..9e1bd81d69 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -4795,19 +4795,12 @@ func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice cha })), scrapedTwice } -type srvSameResp struct { - resp []byte -} - -func (s *srvSameResp) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.Write(s.resp) -} - -func TestTargetScraperSegfault(t *testing.T) { - emptySrv := &srvSameResp{ - resp: []byte{0x42, 0x42}, - } - h := httptest.NewServer(emptySrv) +func TestTargetScraperSetsMetricsField(t *testing.T) { + h := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte{0x42, 0x42}) + }, + )) t.Cleanup(h.Close) cfg := &config.ScrapeConfig{ @@ -4826,15 +4819,7 @@ func TestTargetScraperSegfault(t *testing.T) { }, } - p, err := newScrapePool( - cfg, - &nopAppendable{}, - 0, - nil, - pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }), - &Options{}, - newTestScrapeMetrics(t), - ) + p, err := newScrapePool(cfg, &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) require.NoError(t, err) t.Cleanup(p.stop) From eeef17ea0ab276cc8a651f344c42d3dd1e75431b Mon Sep 17 00:00:00 2001 From: bwplotka Date: Mon, 9 Dec 2024 11:45:46 +0000 Subject: [PATCH 1045/1397] docs: Added native histogram WAL record documentation. Signed-off-by: bwplotka --- tsdb/docs/format/wal.md | 136 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 123 insertions(+), 13 deletions(-) diff --git a/tsdb/docs/format/wal.md b/tsdb/docs/format/wal.md index db1ce97a8b..5d93eb6d87 100644 --- a/tsdb/docs/format/wal.md +++ b/tsdb/docs/format/wal.md @@ -1,15 +1,32 @@ # WAL Disk Format +This document describes the official Prometheus WAL format. + The write ahead log operates in segments that are numbered and sequential, -e.g. `000000`, `000001`, `000002`, etc., and are limited to 128MB by default. -A segment is written to in pages of 32KB. Only the last page of the most recent segment +and are limited to 128MB by default. + +## Segment filename + +The sequence number is captured in the segment filename, +e.g. `000000`, `000001`, `000002`, etc. The first unsigned integer represents +the sequence number of the segment, typically encoded with six digits. + +## Segment encoding + +This section describes the segment encoding. + +A segment encodes an array of records. It does not contain any header. A segment +is written to pages of 32KB. Only the last page of the most recent segment may be partial. A WAL record is an opaque byte slice that gets split up into sub-records should it exceed the remaining space of the current page. Records are never split across segment boundaries. If a single record exceeds the default segment size, a segment with a larger size will be created. + The encoding of pages is largely borrowed from [LevelDB's/RocksDB's write ahead log.](https://github.com/facebook/rocksdb/wiki/Write-Ahead-Log-File-Format) -Notable deviations are that the record fragment is encoded as: +### Records encoding + +Each record fragment is encoded as: ``` ┌───────────┬──────────┬────────────┬──────────────┐ @@ -17,7 +34,8 @@ Notable deviations are that the record fragment is encoded as: └───────────┴──────────┴────────────┴──────────────┘ ``` -The initial type byte is made up of three components: a 3-bit reserved field, a 1-bit zstd compression flag, a 1-bit snappy compression flag, and a 3-bit type flag. +The initial type byte is made up of three components: a 3-bit reserved field, +a 1-bit zstd compression flag, a 1-bit snappy compression flag, and a 3-bit type flag. ``` ┌─────────────────┬──────────────────┬────────────────────┬──────────────────┐ @@ -25,7 +43,7 @@ The initial type byte is made up of three components: a 3-bit reserved field, a └─────────────────┴──────────────────┴────────────────────┴──────────────────┘ ``` -The lowest 3 bits within this flag represent the record type as follows: +The lowest 3 bits within the type flag represent the record type as follows: * `0`: rest of page will be empty * `1`: a full record encoded in a single fragment @@ -33,11 +51,16 @@ The lowest 3 bits within this flag represent the record type as follows: * `3`: middle fragment of a record * `4`: final fragment of a record -## Record encoding +After the type byte, 2-byte length and then 4-byte checksum of the following data are encoded. -The records written to the write ahead log are encoded as follows: +All float values are represented using the [IEEE 754 format](https://en.wikipedia.org/wiki/IEEE_754). -### Series records +### Record types + +In the following sections, all the known record types are described. New types, +can be added in the future. + +#### Series records Series records encode the labels that identifies a series and its unique ID. @@ -58,7 +81,7 @@ Series records encode the labels that identifies a series and its unique ID. └────────────────────────────────────────────┘ ``` -### Sample records +#### Sample records Sample records encode samples as a list of triples `(series_id, timestamp, value)`. Series reference and timestamp are encoded as deltas w.r.t the first sample. @@ -79,7 +102,7 @@ The first sample record begins at the second row. └──────────────────────────────────────────────────────────────────┘ ``` -### Tombstone records +#### Tombstone records Tombstone records encode tombstones as a list of triples `(series_id, min_time, max_time)` and specify an interval for which samples of a series got deleted. @@ -95,9 +118,9 @@ and specify an interval for which samples of a series got deleted. └─────────────────────────────────────────────────────┘ ``` -### Exemplar records +#### Exemplar records -Exemplar records encode exemplars as a list of triples `(series_id, timestamp, value)` +Exemplar records encode exemplars as a list of triples `(series_id, timestamp, value)` plus the length of the labels list, and all the labels. The first row stores the starting id and the starting timestamp. Series reference and timestamp are encoded as deltas w.r.t the first exemplar. @@ -127,7 +150,7 @@ See: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/Op └──────────────────────────────────────────────────────────────────┘ ``` -### Metadata records +#### Metadata records Metadata records encode the metadata updates associated with a series. @@ -156,3 +179,90 @@ Metadata records encode the metadata updates associated with a series. └────────────────────────────────────────────┘ ``` +#### Histogram records + +Histogram records encode the integer and float native histogram samples. + +A record with the integer native histograms with the exponential bucketing: + +``` +┌───────────────────────────────────────────────────────────────────────┐ +│ type = 7 <1b> │ +├───────────────────────────────────────────────────────────────────────┤ +│ ┌────────────────────┬───────────────────────────┐ │ +│ │ id <8b> │ timestamp <8b> │ │ +│ └────────────────────┴───────────────────────────┘ │ +│ ┌────────────────────┬──────────────────────────────────────────────┐ │ +│ │ id_delta │ timestamp_delta │ │ +│ ├────────────────────┴────┬─────────────────────────────────────────┤ │ +│ │ counter_reset_hint <1b> │ schema │ │ +│ ├─────────────────────────┴────┬────────────────────────────────────┤ │ +│ │ zero_threshold (float) <8b> │ zero_count │ │ +│ ├─────────────────┬────────────┴────────────────────────────────────┤ │ +│ │ count │ sum (float) <8b> │ │ +│ ├─────────────────┴─────────────────────────────────────────────────┤ │ +│ │ positive_spans_num │ │ +│ ├─────────────────────────────────┬─────────────────────────────────┤ │ +│ │ positive_span_offset_1 │ positive_span_len_1 │ │ +│ ├─────────────────────────────────┴─────────────────────────────────┤ │ +│ │ . . . │ │ +│ ├───────────────────────────────────────────────────────────────────┤ │ +│ │ negative_spans_num │ │ +│ ├───────────────────────────────┬───────────────────────────────────┤ │ +│ │ negative_span_offset │ negative_span_len │ │ +│ ├───────────────────────────────┴───────────────────────────────────┤ │ +│ │ . . . │ │ +│ ├───────────────────────────────────────────────────────────────────┤ │ +│ │ positive_bkts_num │ │ +│ ├─────────────────────────┬───────┬─────────────────────────────────┤ │ +│ │ positive_bkt_1 │ . . . │ positive_bkt_n │ │ +│ ├─────────────────────────┴───────┴─────────────────────────────────┤ │ +│ │ negative_bkts_num │ │ +│ ├─────────────────────────┬───────┬─────────────────────────────────┤ │ +│ │ negative_bkt_1 │ . . . │ negative_bkt_n │ │ +│ └─────────────────────────┴───────┴─────────────────────────────────┘ │ +│ . . . │ +└───────────────────────────────────────────────────────────────────────┘ +``` + +A records with the Float histograms: + +``` +┌───────────────────────────────────────────────────────────────────────┐ +│ type = 8 <1b> │ +├───────────────────────────────────────────────────────────────────────┤ +│ ┌────────────────────┬───────────────────────────┐ │ +│ │ id <8b> │ timestamp <8b> │ │ +│ └────────────────────┴───────────────────────────┘ │ +│ ┌────────────────────┬──────────────────────────────────────────────┐ │ +│ │ id_delta │ timestamp_delta │ │ +│ ├────────────────────┴────┬─────────────────────────────────────────┤ │ +│ │ counter_reset_hint <1b> │ schema │ │ +│ ├─────────────────────────┴────┬────────────────────────────────────┤ │ +│ │ zero_threshold (float) <8b> │ zero_count (float) <8b> │ │ +│ ├────────────────────┬─────────┴────────────────────────────────────┤ │ +│ │ count (float) <8b> │ sum (float) <8b> │ │ +│ ├────────────────────┴──────────────────────────────────────────────┤ │ +│ │ positive_spans_num │ │ +│ ├─────────────────────────────────┬─────────────────────────────────┤ │ +│ │ positive_span_offset_1 │ positive_span_len_1 │ │ +│ ├─────────────────────────────────┴─────────────────────────────────┤ │ +│ │ . . . │ │ +│ ├───────────────────────────────────────────────────────────────────┤ │ +│ │ negative_spans_num │ │ +│ ├───────────────────────────────┬───────────────────────────────────┤ │ +│ │ negative_span_offset │ negative_span_len │ │ +│ ├───────────────────────────────┴───────────────────────────────────┤ │ +│ │ . . . │ │ +│ ├───────────────────────────────────────────────────────────────────┤ │ +│ │ positive_bkts_num │ │ +│ ├─────────────────────────────┬───────┬─────────────────────────────┤ │ +│ │ positive_bkt_1 (float) <8b> │ . . . │ positive_bkt_n (float) <8b> │ │ +│ ├─────────────────────────────┴───────┴─────────────────────────────┤ │ +│ │ negative_bkts_num │ │ +│ ├─────────────────────────────┬───────┬─────────────────────────────┤ │ +│ │ negative_bkt_1 (float) <8b> │ . . . │ negative_bkt_n (float) <8b> │ │ +│ └─────────────────────────────┴───────┴─────────────────────────────┘ │ +│ . . . │ +└───────────────────────────────────────────────────────────────────────┘ +``` From fe4e8170074940cf4249e03fa3cfa59d66c8653d Mon Sep 17 00:00:00 2001 From: avoidaway Date: Mon, 9 Dec 2024 21:07:47 +0800 Subject: [PATCH 1046/1397] chore: fix some function names in comment Signed-off-by: avoidaway --- storage/remote/queue_manager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index f6d6cbc7e9..475c126eff 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -1688,7 +1688,7 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl s.enqueuedHistograms.Sub(int64(histogramCount)) } -// sendSamples to the remote storage with backoff for recoverable errors. +// sendSamplesWithBackoff to the remote storage with backoff for recoverable errors. func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) (WriteResponseStats, error) { // Build the WriteRequest with no metadata. req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, buf, nil, enc) @@ -1802,7 +1802,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti return accumulatedStats, err } -// sendV2Samples to the remote storage with backoff for recoverable errors. +// sendV2SamplesWithBackoff to the remote storage with backoff for recoverable errors. func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) (WriteResponseStats, error) { // Build the WriteRequest with no metadata. req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, buf, nil, enc) From 5d76510bd6194fdf0a027749d2fa40ba46e541fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Mon, 9 Dec 2024 17:36:36 +0200 Subject: [PATCH 1047/1397] scrape: update test name MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Giedrius Statkevičius --- scrape/scrape_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 9e1bd81d69..b4a1ab7b9f 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -4795,7 +4795,8 @@ func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice cha })), scrapedTwice } -func TestTargetScraperSetsMetricsField(t *testing.T) { +// Regression test for the panic fixed in https://github.com/prometheus/prometheus/pull/15523. +func TestScrapePoolScrapeAfterReload(t *testing.T) { h := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { w.Write([]byte{0x42, 0x42}) From 0a7729469d90fa2d2fa74b156052d2be5c89480e Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Thu, 5 Dec 2024 16:31:45 +0100 Subject: [PATCH 1048/1397] RELEASE.md: schedule 3.[1,2,3,4] releases Signed-off-by: Jan Fajerski --- RELEASE.md | 64 ++++++++---------------------------------------------- 1 file changed, 9 insertions(+), 55 deletions(-) diff --git a/RELEASE.md b/RELEASE.md index 8e78a6a3ec..b481c89c30 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -5,61 +5,15 @@ This page describes the release process and the currently planned schedule for u ## Release schedule Release cadence of first pre-releases being cut is 6 weeks. +Please see [the v2.55 RELEASE.md](https://github.com/prometheus/prometheus/blob/release-2.55/RELEASE.md) for the v2 release series schedule. -| release series | date of first pre-release (year-month-day) | release shepherd | -|----------------|--------------------------------------------|---------------------------------------------| -| v2.4 | 2018-09-06 | Goutham Veeramachaneni (GitHub: @gouthamve) | -| v2.5 | 2018-10-24 | Frederic Branczyk (GitHub: @brancz) | -| v2.6 | 2018-12-05 | Simon Pasquier (GitHub: @simonpasquier) | -| v2.7 | 2019-01-16 | Goutham Veeramachaneni (GitHub: @gouthamve) | -| v2.8 | 2019-02-27 | Ganesh Vernekar (GitHub: @codesome) | -| v2.9 | 2019-04-10 | Brian Brazil (GitHub: @brian-brazil) | -| v2.10 | 2019-05-22 | Björn Rabenstein (GitHub: @beorn7) | -| v2.11 | 2019-07-03 | Frederic Branczyk (GitHub: @brancz) | -| v2.12 | 2019-08-14 | Julius Volz (GitHub: @juliusv) | -| v2.13 | 2019-09-25 | Krasi Georgiev (GitHub: @krasi-georgiev) | -| v2.14 | 2019-11-06 | Chris Marchbanks (GitHub: @csmarchbanks) | -| v2.15 | 2019-12-18 | Bartek Plotka (GitHub: @bwplotka) | -| v2.16 | 2020-01-29 | Callum Styan (GitHub: @cstyan) | -| v2.17 | 2020-03-11 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.18 | 2020-04-22 | Bartek Plotka (GitHub: @bwplotka) | -| v2.19 | 2020-06-03 | Ganesh Vernekar (GitHub: @codesome) | -| v2.20 | 2020-07-15 | Björn Rabenstein (GitHub: @beorn7) | -| v2.21 | 2020-08-26 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.22 | 2020-10-07 | Frederic Branczyk (GitHub: @brancz) | -| v2.23 | 2020-11-18 | Ganesh Vernekar (GitHub: @codesome) | -| v2.24 | 2020-12-30 | Björn Rabenstein (GitHub: @beorn7) | -| v2.25 | 2021-02-10 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.26 | 2021-03-24 | Bartek Plotka (GitHub: @bwplotka) | -| v2.27 | 2021-05-05 | Chris Marchbanks (GitHub: @csmarchbanks) | -| v2.28 | 2021-06-16 | Julius Volz (GitHub: @juliusv) | -| v2.29 | 2021-07-28 | Frederic Branczyk (GitHub: @brancz) | -| v2.30 | 2021-09-08 | Ganesh Vernekar (GitHub: @codesome) | -| v2.31 | 2021-10-20 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.32 | 2021-12-01 | Julius Volz (GitHub: @juliusv) | -| v2.33 | 2022-01-12 | Björn Rabenstein (GitHub: @beorn7) | -| v2.34 | 2022-02-23 | Chris Marchbanks (GitHub: @csmarchbanks) | -| v2.35 | 2022-04-06 | Augustin Husson (GitHub: @nexucis) | -| v2.36 | 2022-05-18 | Matthias Loibl (GitHub: @metalmatze) | -| v2.37 LTS | 2022-06-29 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.38 | 2022-08-10 | Julius Volz (GitHub: @juliusv) | -| v2.39 | 2022-09-21 | Ganesh Vernekar (GitHub: @codesome) | -| v2.40 | 2022-11-02 | Ganesh Vernekar (GitHub: @codesome) | -| v2.41 | 2022-12-14 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) | -| v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) | -| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) | -| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) | -| v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) | -| v2.48 | 2023-10-04 | Levi Harrison (GitHub: @LeviHarrison) | -| v2.49 | 2023-12-05 | Bartek Plotka (GitHub: @bwplotka) | -| v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) | -| v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) | -| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) | -| v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) | -| v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) | -| v2.55 | 2024-09-17 | Bryan Boreham (GitHub: @bboreham) | +| release series | date of first pre-release (year-month-day) | release shepherd | +|----------------|--------------------------------------------|-----------------------------------| +| v3.0 | 2024-11-14 | Jan Fajerski (GitHub: @jan--f) | +| v3.1 | 2024-12-17 | Bryan Boreham (GitHub: @bboreham) | +| v3.2 | 2025-01-28 | Jan Fajerski (GitHub: @jan--f) | +| v3.3 | 2025-03-11 | Ayoub Mrini (Github: @machine424) | +| v3.4 | 2025-04-22 | **volunteer welcome** | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. @@ -204,7 +158,7 @@ Then release with `git tag-release`. Signing a tag with a GPG key is appreciated, but in case you can't add a GPG key to your Github account using the following [procedure](https://help.github.com/articles/generating-a-gpg-key/), you can replace the `-s` flag by `-a` flag of the `git tag` command to only annotate the tag without signing. -Once a tag is created, the release process through CircleCI will be triggered for this tag and Circle CI will draft the GitHub release using the `prombot` account. +Once a tag is created, the release process through Github Actions will be triggered for this tag and Github Actions will draft the GitHub release using the `prombot` account. Finally, wait for the build step for the tag to finish. The point here is to wait for tarballs to be uploaded to the Github release and the container images to be pushed to the Docker Hub and Quay.io. Once that has happened, click _Publish release_, which will make the release publicly visible and create a GitHub notification. **Note:** for a release candidate version ensure the _This is a pre-release_ box is checked when drafting the release in the Github UI. The CI job should take care of this but it's a good idea to double check before clicking _Publish release_.` From 437362a7a789a803b970238fce46e2cdc20319e2 Mon Sep 17 00:00:00 2001 From: TJ Hoplock Date: Tue, 10 Dec 2024 02:24:20 -0500 Subject: [PATCH 1049/1397] fix(deduper): use ptr to sync.RWMutex, fix panic during concurrent use Resolves: #15559 As accurately noted in the issue description, the map is shared among child loggers that get created when `WithAttr()`/`WithGroup()` are called on the underlying handler, which happens via `log.With()` and `log.WithGroup()` respectively. The RW mutex was a value in the previous implementation that used go-kit/log, and I should've updated it to use a pointer when I converted the deduper. Also adds a test. Signed-off-by: TJ Hoplock --- util/logging/dedupe.go | 5 ++++- util/logging/dedupe_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/util/logging/dedupe.go b/util/logging/dedupe.go index e7dff20f78..8137f4f22b 100644 --- a/util/logging/dedupe.go +++ b/util/logging/dedupe.go @@ -33,7 +33,7 @@ type Deduper struct { next *slog.Logger repeat time.Duration quit chan struct{} - mtx sync.RWMutex + mtx *sync.RWMutex seen map[string]time.Time } @@ -43,6 +43,7 @@ func Dedupe(next *slog.Logger, repeat time.Duration) *Deduper { next: next, repeat: repeat, quit: make(chan struct{}), + mtx: new(sync.RWMutex), seen: map[string]time.Time{}, } go d.run() @@ -88,6 +89,7 @@ func (d *Deduper) WithAttrs(attrs []slog.Attr) slog.Handler { repeat: d.repeat, quit: d.quit, seen: d.seen, + mtx: d.mtx, } } @@ -103,6 +105,7 @@ func (d *Deduper) WithGroup(name string) slog.Handler { repeat: d.repeat, quit: d.quit, seen: d.seen, + mtx: d.mtx, } } diff --git a/util/logging/dedupe_test.go b/util/logging/dedupe_test.go index 5baa90b038..a8774aefd3 100644 --- a/util/logging/dedupe_test.go +++ b/util/logging/dedupe_test.go @@ -56,3 +56,27 @@ func TestDedupe(t *testing.T) { } require.Len(t, lines, 2) } + +func TestDedupeConcurrent(t *testing.T) { + d := Dedupe(promslog.New(&promslog.Config{}), 250*time.Millisecond) + dlog := slog.New(d) + defer d.Stop() + + concurrentWriteFunc := func() { + go func() { + dlog1 := dlog.With("writer", 1) + for i := 0; i < 10; i++ { + dlog1.With("foo", "bar").Info("test", "hello", "world") + } + }() + + go func() { + dlog2 := dlog.With("writer", 2) + for i := 0; i < 10; i++ { + dlog2.With("foo", "bar").Info("test", "hello", "world") + } + }() + } + + require.NotPanics(t, func() { concurrentWriteFunc() }) +} From b94c87bea627613d60fdf50e0084737abab4c5e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 10 Dec 2024 16:16:46 +0100 Subject: [PATCH 1050/1397] fix(test): TestCheckpoint segment size too low MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The segment size was too low for the additional NHCB data, thus it created more segments then expected. This meant that less were in the lower numbered segments, which meant more was kept. FAIL: TestCheckpoint (4.05s) FAIL: TestCheckpoint/compress=none (0.22s) checkpoint_test.go:361: Error Trace: /home/krajo/go/github.com/prometheus/prometheus/tsdb/wlog/checkpoint_test.go:361 Error: "0.8586956521739131" is not less than "0.8" Test: TestCheckpoint/compress=none Signed-off-by: György Krajcsovits --- tsdb/wlog/checkpoint_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go index 873513c4ec..a052de9258 100644 --- a/tsdb/wlog/checkpoint_test.go +++ b/tsdb/wlog/checkpoint_test.go @@ -195,7 +195,7 @@ func TestCheckpoint(t *testing.T) { require.NoError(t, w.Close()) // Start a WAL and write records to it as usual. - w, err = NewSize(nil, nil, dir, 64*1024, compress) + w, err = NewSize(nil, nil, dir, 128*1024, compress) require.NoError(t, err) samplesInWAL, histogramsInWAL, floatHistogramsInWAL := 0, 0, 0 From 8f572fe905c5fa9fcae8edad9b5300e18592887a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 10 Dec 2024 16:25:20 +0100 Subject: [PATCH 1051/1397] fix(lint): linter errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- tsdb/record/record_test.go | 1 + tsdb/wlog/checkpoint.go | 3 +++ 2 files changed, 4 insertions(+) diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index 030b7e2bc7..e26f964072 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -216,6 +216,7 @@ func TestRecord_EncodeDecode(t *testing.T) { decGaugeFloatHistograms, err := dec.FloatHistogramSamples(gaugeFloatHistSamples, nil) require.NoError(t, err) decCustomBucketsGaugeFloatHistograms, err := dec.FloatHistogramSamples(customBucketsGaugeFloatHistSamples, nil) + require.NoError(t, err) decGaugeFloatHistograms = append(decGaugeFloatHistograms, decCustomBucketsGaugeFloatHistograms...) require.Equal(t, floatHistograms, decGaugeFloatHistograms) } diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index 63a7737b3a..45c506e802 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -227,6 +227,9 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He stats.DroppedSamples += len(histogramSamples) - len(repl) case record.CustomBucketsHistogramSamples: histogramSamples, err = dec.HistogramSamples(rec, histogramSamples) + if err != nil { + return nil, fmt.Errorf("decode histogram samples: %w", err) + } // Drop irrelevant histogramSamples in place. repl := histogramSamples[:0] for _, h := range histogramSamples { From 07276aeece7b19ec637a5109754d01d318941bd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 10 Dec 2024 16:25:50 +0100 Subject: [PATCH 1052/1397] fix(test): if we are dereferencing a slice we should check its len MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- tsdb/agent/db_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index c81c63f739..0840cebe5c 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -631,6 +631,7 @@ func TestPartialTruncateWAL(t *testing.T) { s.truncate(lastTs - 1) m := gatherFamily(t, reg, "prometheus_agent_deleted_series") + require.Len(t, m.Metric, 1) require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count") } From f61e66a2b1e7b31253d5c62f3133e43b603277fa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 16:49:27 +0100 Subject: [PATCH 1053/1397] chore(deps-dev): bump globals from 15.10.0 to 15.13.0 in /web/ui (#15522) Bumps [globals](https://github.com/sindresorhus/globals) from 15.10.0 to 15.13.0. - [Release notes](https://github.com/sindresorhus/globals/releases) - [Commits](https://github.com/sindresorhus/globals/compare/v15.10.0...v15.13.0) --- updated-dependencies: - dependency-name: globals dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index dc32eee96d..7476bd2cc4 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -62,7 +62,7 @@ "eslint": "^9.11.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", - "globals": "^15.10.0", + "globals": "^15.13.0", "jsdom": "^25.0.1", "postcss": "^8.4.47", "postcss-preset-mantine": "^1.17.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 8a0df78d5b..115f7a0924 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -76,7 +76,7 @@ "eslint": "^9.11.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", - "globals": "^15.10.0", + "globals": "^15.13.0", "jsdom": "^25.0.1", "postcss": "^8.4.47", "postcss-preset-mantine": "^1.17.0", @@ -5340,9 +5340,9 @@ } }, "node_modules/globals": { - "version": "15.10.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-15.10.0.tgz", - "integrity": "sha512-tqFIbz83w4Y5TCbtgjZjApohbuh7K9BxGYFm7ifwDR240tvdb7P9x+/9VvUKlmkPoiknoJtanI8UOrqxS3a7lQ==", + "version": "15.13.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-15.13.0.tgz", + "integrity": "sha512-49TewVEz0UxZjr1WYYsWpPrhyC/B/pA8Bq0fUmet2n+eR7yn0IvNzNaoBwnK6mdkzcN+se7Ez9zUgULTz2QH4g==", "dev": true, "engines": { "node": ">=18" From 35047db9db56026dacf0d96315eb1afff6e70358 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 16:50:21 +0100 Subject: [PATCH 1054/1397] chore(deps): bump @tanstack/react-query from 5.59.0 to 5.62.0 in /web/ui (#15521) Bumps [@tanstack/react-query](https://github.com/TanStack/query/tree/HEAD/packages/react-query) from 5.59.0 to 5.62.0. - [Release notes](https://github.com/TanStack/query/releases) - [Commits](https://github.com/TanStack/query/commits/v5.62.0/packages/react-query) --- updated-dependencies: - dependency-name: "@tanstack/react-query" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 7476bd2cc4..449f1aa4f8 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -31,7 +31,7 @@ "@prometheus-io/codemirror-promql": "0.300.0", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^3.19.0", - "@tanstack/react-query": "^5.59.0", + "@tanstack/react-query": "^5.62.0", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.9", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 115f7a0924..8ec5ae5db0 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -45,7 +45,7 @@ "@prometheus-io/codemirror-promql": "0.300.0", "@reduxjs/toolkit": "^2.2.1", "@tabler/icons-react": "^3.19.0", - "@tanstack/react-query": "^5.59.0", + "@tanstack/react-query": "^5.62.0", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.9", @@ -2723,20 +2723,20 @@ } }, "node_modules/@tanstack/query-core": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.59.0.tgz", - "integrity": "sha512-WGD8uIhX6/deH/tkZqPNcRyAhDUqs729bWKoByYHSogcshXfFbppOdTER5+qY7mFvu8KEFJwT0nxr8RfPTVh0Q==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.62.0.tgz", + "integrity": "sha512-sx38bGrqF9bop92AXOvzDr0L9fWDas5zXdPglxa9cuqeVSWS7lY6OnVyl/oodfXjgOGRk79IfCpgVmxrbHuFHg==", "funding": { "type": "github", "url": "https://github.com/sponsors/tannerlinsley" } }, "node_modules/@tanstack/react-query": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.59.0.tgz", - "integrity": "sha512-YDXp3OORbYR+8HNQx+lf4F73NoiCmCcSvZvgxE29OifmQFk0sBlO26NWLHpcNERo92tVk3w+JQ53/vkcRUY1hA==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.62.0.tgz", + "integrity": "sha512-tj2ltjAn2a3fs+Dqonlvs6GyLQ/LKVJE2DVSYW+8pJ3P6/VCVGrfqv5UEchmlP7tLOvvtZcOuSyI2ooVlR5Yqw==", "dependencies": { - "@tanstack/query-core": "5.59.0" + "@tanstack/query-core": "5.62.0" }, "funding": { "type": "github", From 7d4096d38e5750ffb2450b9cfcf817f8fc09912e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 16:52:11 +0100 Subject: [PATCH 1055/1397] chore(deps): bump @codemirror/language from 6.10.2 to 6.10.6 in /web/ui (#15519) Signed-off-by: Ben Kochie --- web/ui/mantine-ui/package.json | 2 +- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 11 +++++------ 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 449f1aa4f8..fc7af52fbc 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -13,7 +13,7 @@ }, "dependencies": { "@codemirror/autocomplete": "^6.18.1", - "@codemirror/language": "^6.10.2", + "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.4.1", "@codemirror/view": "^6.34.1", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 8fb6dc4ba7..4ae6eb50b4 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -34,7 +34,7 @@ }, "devDependencies": { "@codemirror/autocomplete": "^6.18.1", - "@codemirror/language": "^6.10.2", + "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.34.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 8ec5ae5db0..6e4a2cb1fb 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -27,7 +27,7 @@ "version": "0.300.0", "dependencies": { "@codemirror/autocomplete": "^6.18.1", - "@codemirror/language": "^6.10.2", + "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.4.1", "@codemirror/view": "^6.34.1", @@ -157,7 +157,7 @@ }, "devDependencies": { "@codemirror/autocomplete": "^6.18.1", - "@codemirror/language": "^6.10.2", + "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.34.1", @@ -931,10 +931,9 @@ } }, "node_modules/@codemirror/language": { - "version": "6.10.2", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.2.tgz", - "integrity": "sha512-kgbTYTo0Au6dCSc/TFy7fK3fpJmgHDv1sG1KNQKJXVi+xBTEeBPY/M30YXiU6mMXeH+YIDLsbrT4ZwNRdtF+SA==", - "license": "MIT", + "version": "6.10.6", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.6.tgz", + "integrity": "sha512-KrsbdCnxEztLVbB5PycWXFxas4EOyk/fPAfruSOnDDppevQgid2XZ+KbJ9u+fDikP/e7MW7HPBTvTb8JlZK9vA==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.23.0", From 6539d2ccbc10814fb1b6b1295f90226f16942ed8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 16:52:58 +0100 Subject: [PATCH 1056/1397] chore(deps-dev): bump @eslint/js from 9.11.1 to 9.16.0 in /web/ui (#15518) Bumps [@eslint/js](https://github.com/eslint/eslint/tree/HEAD/packages/js) from 9.11.1 to 9.16.0. - [Release notes](https://github.com/eslint/eslint/releases) - [Changelog](https://github.com/eslint/eslint/blob/main/CHANGELOG.md) - [Commits](https://github.com/eslint/eslint/commits/v9.16.0/packages/js) --- updated-dependencies: - dependency-name: "@eslint/js" dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index fc7af52fbc..9c7b1548f8 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -53,7 +53,7 @@ "devDependencies": { "@eslint/compat": "^1.1.1", "@eslint/eslintrc": "^3.1.0", - "@eslint/js": "^9.11.1", + "@eslint/js": "^9.16.0", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", "@typescript-eslint/eslint-plugin": "^6.21.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 6e4a2cb1fb..3e7956b5e8 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -67,7 +67,7 @@ "devDependencies": { "@eslint/compat": "^1.1.1", "@eslint/eslintrc": "^3.1.0", - "@eslint/js": "^9.11.1", + "@eslint/js": "^9.16.0", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", "@typescript-eslint/eslint-plugin": "^6.21.0", @@ -147,6 +147,15 @@ } } }, + "mantine-ui/node_modules/eslint/node_modules/@eslint/js": { + "version": "9.11.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.11.1.tgz", + "integrity": "sha512-/qu+TWz8WwPWc7/HcIJKi+c+MOm46GdVaSlTTQcaqaL53+GsoA6MxWp5PtTx48qbSP7ylM1Kn7nhvkugfJvRSA==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", "version": "0.300.0", @@ -1493,9 +1502,9 @@ } }, "node_modules/@eslint/js": { - "version": "9.11.1", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.11.1.tgz", - "integrity": "sha512-/qu+TWz8WwPWc7/HcIJKi+c+MOm46GdVaSlTTQcaqaL53+GsoA6MxWp5PtTx48qbSP7ylM1Kn7nhvkugfJvRSA==", + "version": "9.16.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.16.0.tgz", + "integrity": "sha512-tw2HxzQkrbeuvyj1tG2Yqq+0H9wGoI2IMk4EOsQeX+vmd75FtJAzf+gTA69WF+baUKRYQ3x2kbLE08js5OsTVg==", "dev": true, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" From 2fe71b4d0955513f810faa5a5312cc7fc80db224 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:02:10 +0100 Subject: [PATCH 1057/1397] chore(deps-dev): bump @vitejs/plugin-react in /web/ui (#15517) Bumps [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/tree/HEAD/packages/plugin-react) from 4.3.1 to 4.3.4. - [Release notes](https://github.com/vitejs/vite-plugin-react/releases) - [Changelog](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/CHANGELOG.md) - [Commits](https://github.com/vitejs/vite-plugin-react/commits/v4.3.4/packages/plugin-react) --- updated-dependencies: - dependency-name: "@vitejs/plugin-react" dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 387 +++++++++++---------------------- 2 files changed, 125 insertions(+), 264 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 9c7b1548f8..b53ff1d18e 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -58,7 +58,7 @@ "@types/react-dom": "^18.3.0", "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", - "@vitejs/plugin-react": "^4.2.1", + "@vitejs/plugin-react": "^4.3.4", "eslint": "^9.11.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 3e7956b5e8..4d8a7bcffa 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -72,7 +72,7 @@ "@types/react-dom": "^18.3.0", "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", - "@vitejs/plugin-react": "^4.2.1", + "@vitejs/plugin-react": "^4.3.4", "eslint": "^9.11.1", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-refresh": "^0.4.12", @@ -225,12 +225,12 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.7.tgz", - "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==", - "license": "MIT", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", + "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==", "dependencies": { - "@babel/highlight": "^7.24.7", + "@babel/helper-validator-identifier": "^7.25.9", + "js-tokens": "^4.0.0", "picocolors": "^1.0.0" }, "engines": { @@ -238,32 +238,30 @@ } }, "node_modules/@babel/compat-data": { - "version": "7.25.4", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.25.4.tgz", - "integrity": "sha512-+LGRog6RAsCJrrrg/IO6LGmpphNe5DiK30dGjCoxxeGv49B10/3XYGxPsAwrDlMFcFEvdAUavDT8r9k/hSyQqQ==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.26.2.tgz", + "integrity": "sha512-Z0WgzSEa+aUcdiJuCIqgujCshpMWgUpgOxXotrYPSA53hA3qopNaqcJpyr0hVb1FeWdnqFA35/fUtXgBK8srQg==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/core": { - "version": "7.25.2", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.25.2.tgz", - "integrity": "sha512-BBt3opiCOxUr9euZ5/ro/Xv8/V7yJ5bjYMqG/C1YAo8MIKAnumZalCN+msbci3Pigy4lIQfPUpfMM27HMGaYEA==", + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.0.tgz", + "integrity": "sha512-i1SLeK+DzNnQ3LL/CswPCa/E5u4lh1k6IAEphON8F+cXt0t9euTshDru0q7/IqMa1PMPz5RnHuHscF8/ZJsStg==", "dev": true, - "license": "MIT", "dependencies": { "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.24.7", - "@babel/generator": "^7.25.0", - "@babel/helper-compilation-targets": "^7.25.2", - "@babel/helper-module-transforms": "^7.25.2", - "@babel/helpers": "^7.25.0", - "@babel/parser": "^7.25.0", - "@babel/template": "^7.25.0", - "@babel/traverse": "^7.25.2", - "@babel/types": "^7.25.2", + "@babel/code-frame": "^7.26.0", + "@babel/generator": "^7.26.0", + "@babel/helper-compilation-targets": "^7.25.9", + "@babel/helper-module-transforms": "^7.26.0", + "@babel/helpers": "^7.26.0", + "@babel/parser": "^7.26.0", + "@babel/template": "^7.25.9", + "@babel/traverse": "^7.25.9", + "@babel/types": "^7.26.0", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", @@ -289,31 +287,30 @@ } }, "node_modules/@babel/generator": { - "version": "7.25.6", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.25.6.tgz", - "integrity": "sha512-VPC82gr1seXOpkjAAKoLhP50vx4vGNlF4msF64dSFq1P8RfB+QAuJWGHPXXPc8QyfVWwwB/TNNU4+ayZmHNbZw==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.2.tgz", + "integrity": "sha512-zevQbhbau95nkoxSq3f/DC/SC+EEOUZd3DYqfSkMhY2/wfSeaHV1Ew4vk8e+x8lja31IbyuUa2uQ3JONqKbysw==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/types": "^7.25.6", + "@babel/parser": "^7.26.2", + "@babel/types": "^7.26.0", "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.25", - "jsesc": "^2.5.1" + "jsesc": "^3.0.2" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.25.2", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.2.tgz", - "integrity": "sha512-U2U5LsSaZ7TAt3cfaymQ8WHh0pxvdHoEk6HVpaexxixjyEquMh0L0YNJNM6CTGKMXV1iksi0iZkGw4AcFkPaaw==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.9.tgz", + "integrity": "sha512-j9Db8Suy6yV/VHa4qzrj9yZfZxhLWQdVnRlXxmKLYlhWUVB1sB2G5sxuWYXk/whHD9iW76PmNzxZ4UCnTQTVEQ==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/compat-data": "^7.25.2", - "@babel/helper-validator-option": "^7.24.8", - "browserslist": "^4.23.1", + "@babel/compat-data": "^7.25.9", + "@babel/helper-validator-option": "^7.25.9", + "browserslist": "^4.24.0", "lru-cache": "^5.1.1", "semver": "^6.3.1" }, @@ -326,7 +323,6 @@ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", "dev": true, - "license": "ISC", "dependencies": { "yallist": "^3.0.2" } @@ -336,36 +332,32 @@ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, - "license": "ISC", "bin": { "semver": "bin/semver.js" } }, "node_modules/@babel/helper-module-imports": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.7.tgz", - "integrity": "sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz", + "integrity": "sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/traverse": "^7.24.7", - "@babel/types": "^7.24.7" + "@babel/traverse": "^7.25.9", + "@babel/types": "^7.25.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-transforms": { - "version": "7.25.2", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.25.2.tgz", - "integrity": "sha512-BjyRAbix6j/wv83ftcVJmBt72QtHI56C7JXZoG2xATiLpmoC7dpd8WnkikExHDVPpi/3qCmO6WY1EaXOluiecQ==", + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz", + "integrity": "sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/helper-module-imports": "^7.24.7", - "@babel/helper-simple-access": "^7.24.7", - "@babel/helper-validator-identifier": "^7.24.7", - "@babel/traverse": "^7.25.2" + "@babel/helper-module-imports": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9", + "@babel/traverse": "^7.25.9" }, "engines": { "node": ">=6.9.0" @@ -375,166 +367,60 @@ } }, "node_modules/@babel/helper-plugin-utils": { - "version": "7.24.8", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.8.tgz", - "integrity": "sha512-FFWx5142D8h2Mgr/iPVGH5G7w6jDn4jUSpZTyDnQO0Yn7Ks2Kuz6Pci8H6MPCoUJegd/UZQ3tAvfLCxQSnWWwg==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.25.9.tgz", + "integrity": "sha512-kSMlyUVdWe25rEsRGviIgOWnoT/nfABVWlqt9N19/dIPWViAOW2s9wznP5tURbs/IDuNk4gPy3YdYRgH3uxhBw==", "dev": true, - "license": "MIT", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-simple-access": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.24.7.tgz", - "integrity": "sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@babel/traverse": "^7.24.7", - "@babel/types": "^7.24.7" - }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.24.8", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.8.tgz", - "integrity": "sha512-pO9KhhRcuUyGnJWwyEgnRJTSIZHiT+vMD0kPeD+so0l7mxkMT19g3pjY9GTnHySck/hDzq+dtW/4VgnMkippsQ==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz", + "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz", - "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==", - "license": "MIT", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", + "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-option": { - "version": "7.24.8", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.24.8.tgz", - "integrity": "sha512-xb8t9tD1MHLungh/AIoWYN+gVHaB9kwlu8gffXGSt3FFEIT7RjS+xWbc2vUD1UTZdIpKj/ab3rdqJ7ufngyi2Q==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz", + "integrity": "sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==", "dev": true, - "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helpers": { - "version": "7.25.6", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.25.6.tgz", - "integrity": "sha512-Xg0tn4HcfTijTwfDwYlvVCl43V6h4KyVVX2aEm4qdO/PC6L2YvzLHFdmxhoeSA3eslcE6+ZVXHgWwopXYLNq4Q==", + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.26.0.tgz", + "integrity": "sha512-tbhNuIxNcVb21pInl3ZSjksLCvgdZy9KwJ8brv993QtIVKJBBkYXz4q4ZbAv31GdnC+R90np23L5FbEBlthAEw==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/template": "^7.25.0", - "@babel/types": "^7.25.6" + "@babel/template": "^7.25.9", + "@babel/types": "^7.26.0" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/highlight": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.7.tgz", - "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==", - "license": "MIT", - "dependencies": { - "@babel/helper-validator-identifier": "^7.24.7", - "chalk": "^2.4.2", - "js-tokens": "^4.0.0", - "picocolors": "^1.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/highlight/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "license": "MIT", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "license": "MIT", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/highlight/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", - "license": "MIT" - }, - "node_modules/@babel/highlight/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "license": "MIT", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@babel/highlight/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "license": "MIT", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/@babel/parser": { - "version": "7.25.6", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.25.6.tgz", - "integrity": "sha512-trGdfBdbD0l1ZPmcJ83eNxB9rbEax4ALFTF7fN386TMYbeCQbyme5cOEXQhbGXKebwGaB/J52w1mrklMcbgy6Q==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.2.tgz", + "integrity": "sha512-DWMCZH9WA4Maitz2q21SRKHo9QXZxkDsbNZoVD62gusNtNBBqDg9i7uOhASfTfIGNzW+O+r7+jAlM8dwphcJKQ==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/types": "^7.25.6" + "@babel/types": "^7.26.0" }, "bin": { "parser": "bin/babel-parser.js" @@ -800,13 +686,12 @@ } }, "node_modules/@babel/plugin-transform-react-jsx-self": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.24.7.tgz", - "integrity": "sha512-fOPQYbGSgH0HUp4UJO4sMBFjY6DuWq+2i8rixyUMb3CdGixs/gccURvYOAhajBdKDoGajFr3mUq5rH3phtkGzw==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.25.9.tgz", + "integrity": "sha512-y8quW6p0WHkEhmErnfe58r7x0A70uKphQm8Sp8cV7tjNQwK56sNVK0M73LK3WuYmsuyrftut4xAkjjgU0twaMg==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" + "@babel/helper-plugin-utils": "^7.25.9" }, "engines": { "node": ">=6.9.0" @@ -816,13 +701,12 @@ } }, "node_modules/@babel/plugin-transform-react-jsx-source": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.24.7.tgz", - "integrity": "sha512-J2z+MWzZHVOemyLweMqngXrgGC42jQ//R0KdxqkIz/OrbVIIlhFI3WigZ5fO+nwFvBlncr4MGapd8vTyc7RPNQ==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.25.9.tgz", + "integrity": "sha512-+iqjT8xmXhhYv4/uiYd8FNQsraMFZIfxVSqxxVSZP0WbbSAWvBXAul0m/zu+7Vv4O/3WtApy9pmaTMiumEZgfg==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" + "@babel/helper-plugin-utils": "^7.25.9" }, "engines": { "node": ">=6.9.0" @@ -844,32 +728,30 @@ } }, "node_modules/@babel/template": { - "version": "7.25.0", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.25.0.tgz", - "integrity": "sha512-aOOgh1/5XzKvg1jvVz7AVrx2piJ2XBi227DHmbY6y+bM9H2FlN+IfecYu4Xl0cNiiVejlsCri89LUsbj8vJD9Q==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.25.9.tgz", + "integrity": "sha512-9DGttpmPvIxBb/2uwpVo3dqJ+O6RooAFOS+lB+xDqoE2PVCE8nfoHMdZLpfCQRLwvohzXISPZcgxt80xLfsuwg==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.24.7", - "@babel/parser": "^7.25.0", - "@babel/types": "^7.25.0" + "@babel/code-frame": "^7.25.9", + "@babel/parser": "^7.25.9", + "@babel/types": "^7.25.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.25.6", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.25.6.tgz", - "integrity": "sha512-9Vrcx5ZW6UwK5tvqsj0nGpp/XzqthkT0dqIc9g1AdtygFToNtTF67XzYS//dm+SAK9cp3B9R4ZO/46p63SCjlQ==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.25.9.tgz", + "integrity": "sha512-ZCuvfwOwlz/bawvAuvcj8rrithP2/N55Tzz342AkTvq4qaWbGfmCk/tKhNaV2cthijKrPAA8SRJV5WWe7IBMJw==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.24.7", - "@babel/generator": "^7.25.6", - "@babel/parser": "^7.25.6", - "@babel/template": "^7.25.0", - "@babel/types": "^7.25.6", + "@babel/code-frame": "^7.25.9", + "@babel/generator": "^7.25.9", + "@babel/parser": "^7.25.9", + "@babel/template": "^7.25.9", + "@babel/types": "^7.25.9", "debug": "^4.3.1", "globals": "^11.1.0" }, @@ -882,21 +764,18 @@ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", "dev": true, - "license": "MIT", "engines": { "node": ">=4" } }, "node_modules/@babel/types": { - "version": "7.25.6", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.25.6.tgz", - "integrity": "sha512-/l42B1qxpG6RdfYf343Uw1vmDjeNhneUXtzhojE7pDgfpEypmRhI6j1kr17XCVv4Cgl9HdAiQY2x0GwKm7rWCw==", + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.0.tgz", + "integrity": "sha512-Z/yiTPj+lDVnF7lWeKCIJzaIkI0vYO87dMpZ4bg4TDrFe4XXLFWL1TbXU27gBP3QccxV9mZICCrnjnYlJjXHOA==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/helper-string-parser": "^7.24.8", - "@babel/helper-validator-identifier": "^7.24.7", - "to-fast-properties": "^2.0.0" + "@babel/helper-string-parser": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9" }, "engines": { "node": ">=6.9.0" @@ -3375,15 +3254,14 @@ "peer": true }, "node_modules/@vitejs/plugin-react": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.3.1.tgz", - "integrity": "sha512-m/V2syj5CuVnaxcUJOQRel/Wr31FFXRFlnOoq1TVtkCxsY5veGMTEmpWHndrhB2U8ScHtCQB1e+4hWYExQc6Lg==", + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.3.4.tgz", + "integrity": "sha512-SCCPBJtYLdE8PX/7ZQAs1QAZ8Jqwih+0VBLum1EGqmCCQal+MIUqLCzj3ZUy8ufbC0cAM4LRlSTm7IQJwWT4ug==", "dev": true, - "license": "MIT", "dependencies": { - "@babel/core": "^7.24.5", - "@babel/plugin-transform-react-jsx-self": "^7.24.5", - "@babel/plugin-transform-react-jsx-source": "^7.24.1", + "@babel/core": "^7.26.0", + "@babel/plugin-transform-react-jsx-self": "^7.25.9", + "@babel/plugin-transform-react-jsx-source": "^7.25.9", "@types/babel__core": "^7.20.5", "react-refresh": "^0.14.2" }, @@ -3391,7 +3269,7 @@ "node": "^14.18.0 || >=16.0.0" }, "peerDependencies": { - "vite": "^4.2.0 || ^5.0.0" + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0" } }, "node_modules/@vitest/expect": { @@ -3838,9 +3716,9 @@ } }, "node_modules/browserslist": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.3.tgz", - "integrity": "sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==", + "version": "4.24.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.2.tgz", + "integrity": "sha512-ZIc+Q62revdMcqC6aChtW4jz3My3klmCO1fEmINZY/8J3EpBg5/A/D0AKmBveUh6pgoeycoMkVMko84tuYS+Gg==", "dev": true, "funding": [ { @@ -3856,12 +3734,11 @@ "url": "https://github.com/sponsors/ai" } ], - "license": "MIT", "dependencies": { - "caniuse-lite": "^1.0.30001646", - "electron-to-chromium": "^1.5.4", + "caniuse-lite": "^1.0.30001669", + "electron-to-chromium": "^1.5.41", "node-releases": "^2.0.18", - "update-browserslist-db": "^1.1.0" + "update-browserslist-db": "^1.1.1" }, "bin": { "browserslist": "cli.js" @@ -3956,9 +3833,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001655", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001655.tgz", - "integrity": "sha512-jRGVy3iSGO5Uutn2owlb5gR6qsGngTw9ZTb4ali9f3glshcNmJ2noam4Mo9zia5P9Dk3jNNydy7vQjuE5dQmfg==", + "version": "1.0.30001684", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001684.tgz", + "integrity": "sha512-G1LRwLIQjBQoyq0ZJGqGIJUXzJ8irpbjHLpVRXDvBEScFJ9b17sgK6vlx0GAJFE21okD7zXl08rRRUfq6HdoEQ==", "dev": true, "funding": [ { @@ -3973,8 +3850,7 @@ "type": "github", "url": "https://github.com/sponsors/ai" } - ], - "license": "CC-BY-4.0" + ] }, "node_modules/chai": { "version": "5.1.1", @@ -4485,11 +4361,10 @@ } }, "node_modules/electron-to-chromium": { - "version": "1.5.13", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.13.tgz", - "integrity": "sha512-lbBcvtIJ4J6sS4tb5TLp1b4LyfCdMkwStzXPyAgVgTRAsep4bvrAGaBOP7ZJtQMNJpSQ9SqG4brWOroNaQtm7Q==", - "dev": true, - "license": "ISC" + "version": "1.5.67", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.67.tgz", + "integrity": "sha512-nz88NNBsD7kQSAGGJyp8hS6xSPtWwqNogA0mjtc2nUYeEf3nURK9qpV18TuBdDmEDgVWotS8Wkzf+V52dSQ/LQ==", + "dev": true }, "node_modules/emittery": { "version": "0.13.1", @@ -6878,16 +6753,15 @@ } }, "node_modules/jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.0.2.tgz", + "integrity": "sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g==", "dev": true, - "license": "MIT", "bin": { "jsesc": "bin/jsesc" }, "engines": { - "node": ">=4" + "node": ">=6" } }, "node_modules/json-buffer": { @@ -7303,8 +7177,7 @@ "version": "2.0.18", "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/normalize-path": { "version": "3.0.0", @@ -8846,16 +8719,6 @@ "license": "BSD-3-Clause", "peer": true }, - "node_modules/to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4" - } - }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", @@ -9019,9 +8882,9 @@ "license": "MIT" }, "node_modules/update-browserslist-db": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz", - "integrity": "sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.1.tgz", + "integrity": "sha512-R8UzCaa9Az+38REPiJ1tXlImTJXlVfgHZsglwBD/k6nj76ctsH1E3q4doGrukiLQd3sGQYu56r5+lo5r94l29A==", "dev": true, "funding": [ { @@ -9037,10 +8900,9 @@ "url": "https://github.com/sponsors/ai" } ], - "license": "MIT", "dependencies": { - "escalade": "^3.1.2", - "picocolors": "^1.0.1" + "escalade": "^3.2.0", + "picocolors": "^1.1.0" }, "bin": { "update-browserslist-db": "cli.js" @@ -9583,8 +9445,7 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", - "dev": true, - "license": "ISC" + "dev": true }, "node_modules/yargs": { "version": "17.7.2", From 7afd2f191ff0da8cf2be8a659a1d19ee71d47e87 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:02:47 +0100 Subject: [PATCH 1058/1397] chore(deps): bump react-router-dom from 6.26.2 to 7.0.1 in /web/ui (#15514) Bumps [react-router-dom](https://github.com/remix-run/react-router/tree/HEAD/packages/react-router-dom) from 6.26.2 to 7.0.1. - [Release notes](https://github.com/remix-run/react-router/releases) - [Changelog](https://github.com/remix-run/react-router/blob/main/packages/react-router-dom/CHANGELOG.md) - [Commits](https://github.com/remix-run/react-router/commits/react-router-dom@7.0.1/packages/react-router-dom) --- updated-dependencies: - dependency-name: react-router-dom dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 69 ++++++++++++++++++++++------------ 2 files changed, 47 insertions(+), 24 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index b53ff1d18e..ed16d85e66 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -44,7 +44,7 @@ "react-dom": "^18.3.1", "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.1.2", - "react-router-dom": "^6.26.2", + "react-router-dom": "^7.0.1", "sanitize-html": "^2.13.0", "uplot": "^1.6.30", "uplot-react": "^1.2.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 4d8a7bcffa..5beb8fb5a6 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -58,7 +58,7 @@ "react-dom": "^18.3.1", "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.1.2", - "react-router-dom": "^6.26.2", + "react-router-dom": "^7.0.1", "sanitize-html": "^2.13.0", "uplot": "^1.6.30", "uplot-react": "^1.2.2", @@ -2258,14 +2258,6 @@ } } }, - "node_modules/@remix-run/router": { - "version": "1.19.2", - "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.19.2.tgz", - "integrity": "sha512-baiMx18+IMuD1yyvOGaHM9QrVUPGGG0jC+z+IPHnRJWUAUvaKuWKyE8gjDj2rzv3sz9zOGoRSPgeBVHRhZnBlA==", - "engines": { - "node": ">=14.0.0" - } - }, "node_modules/@rollup/plugin-node-resolve": { "version": "15.2.3", "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.2.3.tgz", @@ -2771,6 +2763,11 @@ "@babel/types": "^7.20.7" } }, + "node_modules/@types/cookie": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==" + }, "node_modules/@types/estree": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", @@ -4033,6 +4030,14 @@ "dev": true, "license": "MIT" }, + "node_modules/cookie": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.0.2.tgz", + "integrity": "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA==", + "engines": { + "node": ">=18" + } + }, "node_modules/create-jest": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", @@ -7961,33 +7966,41 @@ } }, "node_modules/react-router": { - "version": "6.26.2", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.26.2.tgz", - "integrity": "sha512-tvN1iuT03kHgOFnLPfLJ8V95eijteveqdOSk+srqfePtQvqCExB8eHOYnlilbOcyJyKnYkr1vJvf7YqotAJu1A==", + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.0.1.tgz", + "integrity": "sha512-WVAhv9oWCNsja5AkK6KLpXJDSJCQizOIyOd4vvB/+eHGbYx5vkhcmcmwWjQ9yqkRClogi+xjEg9fNEOd5EX/tw==", "dependencies": { - "@remix-run/router": "1.19.2" + "@types/cookie": "^0.6.0", + "cookie": "^1.0.1", + "set-cookie-parser": "^2.6.0", + "turbo-stream": "2.4.0" }, "engines": { - "node": ">=14.0.0" + "node": ">=20.0.0" }, "peerDependencies": { - "react": ">=16.8" + "react": ">=18", + "react-dom": ">=18" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + } } }, "node_modules/react-router-dom": { - "version": "6.26.2", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.26.2.tgz", - "integrity": "sha512-z7YkaEW0Dy35T3/QKPYB1LjMK2R1fxnHO8kWpUMTBdfVzZrWOiY9a7CtN8HqdWtDUWd5FY6Dl8HFsqVwH4uOtQ==", + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.0.1.tgz", + "integrity": "sha512-duBzwAAiIabhFPZfDjcYpJ+f08TMbPMETgq254GWne2NW1ZwRHhZLj7tpSp8KGb7JvZzlLcjGUnqLxpZQVEPng==", "dependencies": { - "@remix-run/router": "1.19.2", - "react-router": "6.26.2" + "react-router": "7.0.1" }, "engines": { - "node": ">=14.0.0" + "node": ">=20.0.0" }, "peerDependencies": { - "react": ">=16.8", - "react-dom": ">=16.8" + "react": ">=18", + "react-dom": ">=18" } }, "node_modules/react-style-singleton": { @@ -8319,6 +8332,11 @@ "integrity": "sha512-1chMo1dST4pFA9RDXAtF0Rbjaut4is7bzFbI1Z26IuMub68pNCILku85aYmeFhvnY//BXUPUhoRMjYcsT93J/Q==", "license": "ISC" }, + "node_modules/set-cookie-parser": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.1.tgz", + "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==" + }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -8825,6 +8843,11 @@ "integrity": "sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==", "license": "0BSD" }, + "node_modules/turbo-stream": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/turbo-stream/-/turbo-stream-2.4.0.tgz", + "integrity": "sha512-FHncC10WpBd2eOmGwpmQsWLDoK4cqsA/UT/GqNoaKOQnT8uzhtCbg3EoUDMvqpOSAI0S26mr0rkjzbOO6S3v1g==" + }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", From 0ae424d80ae124c79923d1ffa8e717e495e93786 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:03:37 +0100 Subject: [PATCH 1059/1397] chore(deps): bump @mantine/code-highlight in /web/ui (#15515) Bumps [@mantine/code-highlight](https://github.com/mantinedev/mantine/tree/HEAD/packages/@mantine/code-highlight) from 7.13.1 to 7.14.3. - [Release notes](https://github.com/mantinedev/mantine/releases) - [Changelog](https://github.com/mantinedev/mantine/blob/master/CHANGELOG.md) - [Commits](https://github.com/mantinedev/mantine/commits/7.14.3/packages/@mantine/code-highlight) --- updated-dependencies: - dependency-name: "@mantine/code-highlight" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 116 ++++++++++++++------------------- 2 files changed, 50 insertions(+), 68 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index ed16d85e66..6a0dd7dc90 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -20,7 +20,7 @@ "@floating-ui/dom": "^1.6.7", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.1", - "@mantine/code-highlight": "^7.13.1", + "@mantine/code-highlight": "^7.14.3", "@mantine/core": "^7.11.2", "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 5beb8fb5a6..c073ebe27a 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -34,7 +34,7 @@ "@floating-ui/dom": "^1.6.7", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.1", - "@mantine/code-highlight": "^7.13.1", + "@mantine/code-highlight": "^7.14.3", "@mantine/core": "^7.11.2", "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", @@ -1430,13 +1430,12 @@ } }, "node_modules/@floating-ui/react": { - "version": "0.26.23", - "resolved": "https://registry.npmjs.org/@floating-ui/react/-/react-0.26.23.tgz", - "integrity": "sha512-9u3i62fV0CFF3nIegiWiRDwOs7OW/KhSUJDNx2MkQM3LbE5zQOY01sL3nelcVBXvX7Ovvo3A49I8ql+20Wg/Hw==", - "license": "MIT", + "version": "0.26.28", + "resolved": "https://registry.npmjs.org/@floating-ui/react/-/react-0.26.28.tgz", + "integrity": "sha512-yORQuuAtVpiRjpMhdc0wJj06b9JFjrYF4qp96j++v2NBpbi6SEGF7donUJ3TMieerQ6qVkAv1tgr7L4r5roTqw==", "dependencies": { - "@floating-ui/react-dom": "^2.1.1", - "@floating-ui/utils": "^0.2.7", + "@floating-ui/react-dom": "^2.1.2", + "@floating-ui/utils": "^0.2.8", "tabbable": "^6.0.0" }, "peerDependencies": { @@ -1445,10 +1444,9 @@ } }, "node_modules/@floating-ui/react-dom": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.1.tgz", - "integrity": "sha512-4h84MJt3CHrtG18mGsXuLCHMrug49d7DFkU0RMIyshRveBeyV2hmV/pDaF2Uxtu8kgq5r46llp5E5FQiR0K2Yg==", - "license": "MIT", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.2.tgz", + "integrity": "sha512-06okr5cgPzMNBy+Ycse2A6udMi4bqwW/zgBF/rwjcNqWkyr82Mcg8b0vjX8OJpZFy/FKjJmw6wV7t44kK6kW7A==", "dependencies": { "@floating-ui/dom": "^1.0.0" }, @@ -1458,10 +1456,9 @@ } }, "node_modules/@floating-ui/utils": { - "version": "0.2.7", - "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.7.tgz", - "integrity": "sha512-X8R8Oj771YRl/w+c1HqAC1szL8zWQRwFvgDwT129k9ACdBoud/+/rX9V0qiMl6LWUdP9voC2nDVZYPMQQsb6eA==", - "license": "MIT" + "version": "0.2.8", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.8.tgz", + "integrity": "sha512-kym7SodPp8/wloecOpcmSnWJsK7M0E5Wg8UcFA+uO4B9s5d0ywXOEro/8HM9x0rW+TljRzul/14UYz3TleT3ig==" }, "node_modules/@humanwhocodes/config-array": { "version": "0.13.0", @@ -2073,36 +2070,36 @@ } }, "node_modules/@mantine/code-highlight": { - "version": "7.13.1", - "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-7.13.1.tgz", - "integrity": "sha512-7Iz6ymlTFf8hRu7OBUDOaevr2cnOPtktnDJ+9KtYibA7iZoaMxtv7CfarhfcYghDdPK9HOIQpAJkbzD5NgwjYQ==", + "version": "7.14.3", + "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-7.14.3.tgz", + "integrity": "sha512-iTdirW7R4A8qhacwNJwzBRteLX3hlVlxc8TKEfBJ4KM9Ke5wZ7hrsclzXg3vsFDQHd+oKLtWUBsehecre4VR2Q==", "dependencies": { "clsx": "^2.1.1", - "highlight.js": "^11.9.0" + "highlight.js": "^11.10.0" }, "peerDependencies": { - "@mantine/core": "7.13.1", - "@mantine/hooks": "7.13.1", - "react": "^18.2.0", - "react-dom": "^18.2.0" + "@mantine/core": "7.14.3", + "@mantine/hooks": "7.14.3", + "react": "^18.x || ^19.x", + "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/core": { - "version": "7.13.1", - "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.13.1.tgz", - "integrity": "sha512-KH/WrcY/5pf3FxUUbtG77xyd7kfp6SRPAJFkxjFlg9kXroiQ7baljphY371CwPYPINERShUdvCQLpz4r4WMIHA==", + "version": "7.14.3", + "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.14.3.tgz", + "integrity": "sha512-niAi+ZYBr4KrG+X2Mx+muvEzUOOHc/Rx0vsbIGYeNe7urwHSm/xNEGsaapmCqeRC0CSL4KI6TJOq8QhnSuQZcw==", "dependencies": { - "@floating-ui/react": "^0.26.9", + "@floating-ui/react": "^0.26.28", "clsx": "^2.1.1", - "react-number-format": "^5.3.1", - "react-remove-scroll": "^2.5.7", - "react-textarea-autosize": "8.5.3", - "type-fest": "^4.12.0" + "react-number-format": "^5.4.2", + "react-remove-scroll": "^2.6.0", + "react-textarea-autosize": "8.5.5", + "type-fest": "^4.27.0" }, "peerDependencies": { - "@mantine/hooks": "7.13.1", - "react": "^18.2.0", - "react-dom": "^18.2.0" + "@mantine/hooks": "7.14.3", + "react": "^18.x || ^19.x", + "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/dates": { @@ -2121,11 +2118,11 @@ } }, "node_modules/@mantine/hooks": { - "version": "7.13.1", - "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.13.1.tgz", - "integrity": "sha512-Hfd4v380pPJUKDbARk+egdAanx7bpGZmaOn8G3QBZjwDufVopxip0WPkifUKUIMeYY1CTboa+1go9l56ZWrrSg==", + "version": "7.14.3", + "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.14.3.tgz", + "integrity": "sha512-cU3R9b8GLs6aCvpsVC56ZOOJCUIoDqX3RcLWkcfpA5a47LjWa/rzegP4YWfNW6/E9vodPJT4AEbYXVffYlyNwA==", "peerDependencies": { - "react": "^18.2.0" + "react": "^18.x || ^19.x" } }, "node_modules/@mantine/notifications": { @@ -4238,8 +4235,7 @@ "node_modules/detect-node-es": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", - "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==", - "license": "MIT" + "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==" }, "node_modules/diff-sequences": { "version": "29.6.3", @@ -5161,7 +5157,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", "integrity": "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==", - "license": "MIT", "engines": { "node": ">=6" } @@ -5499,7 +5494,6 @@ "version": "2.2.4", "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", - "license": "MIT", "dependencies": { "loose-envify": "^1.0.0" } @@ -7876,10 +7870,9 @@ "peer": true }, "node_modules/react-number-format": { - "version": "5.4.1", - "resolved": "https://registry.npmjs.org/react-number-format/-/react-number-format-5.4.1.tgz", - "integrity": "sha512-NICOjo/70dcAiwVmH6zMWoZrTQDlBrEXV/f7S0t/ewlpzp4z00pasg5G1yBX6NHLafwOF3QZ+VvK/XApwSKxdA==", - "license": "MIT", + "version": "5.4.2", + "resolved": "https://registry.npmjs.org/react-number-format/-/react-number-format-5.4.2.tgz", + "integrity": "sha512-cg//jVdS49PYDgmcYoBnMMHl4XNTMuV723ZnHD2aXYtWWWqbVF3hjQ8iB+UZEuXapLbeA8P8H+1o6ZB1lcw3vg==", "peerDependencies": { "react": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0", "react-dom": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0" @@ -7919,10 +7912,9 @@ } }, "node_modules/react-remove-scroll": { - "version": "2.5.10", - "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.10.tgz", - "integrity": "sha512-m3zvBRANPBw3qxVVjEIPEQinkcwlFZ4qyomuWVpNJdv4c6MvHfXV0C3L9Jx5rr3HeBHKNRX+1jreB5QloDIJjA==", - "license": "MIT", + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.6.0.tgz", + "integrity": "sha512-I2U4JVEsQenxDAKaVa3VZ/JeJZe0/2DxPWL8Tj8yLKctQJQiZM52pn/GWFpSp8dftjM3pSAHVJZscAnC/y+ySQ==", "dependencies": { "react-remove-scroll-bar": "^2.3.6", "react-style-singleton": "^2.2.1", @@ -7947,7 +7939,6 @@ "version": "2.3.6", "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.6.tgz", "integrity": "sha512-DtSYaao4mBmX+HDo5YWYdBWQwYIQQshUV/dVxFxK+KM26Wjwp1gZ6rv6OC3oujI6Bfu6Xyg3TwK533AQutsn/g==", - "license": "MIT", "dependencies": { "react-style-singleton": "^2.2.1", "tslib": "^2.0.0" @@ -8007,7 +7998,6 @@ "version": "2.2.1", "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.1.tgz", "integrity": "sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==", - "license": "MIT", "dependencies": { "get-nonce": "^1.0.0", "invariant": "^2.2.4", @@ -8027,10 +8017,9 @@ } }, "node_modules/react-textarea-autosize": { - "version": "8.5.3", - "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.3.tgz", - "integrity": "sha512-XT1024o2pqCuZSuBt9FwHlaDeNtVrtCXu0Rnz88t1jUGheCLa3PhjE1GH8Ctm2axEtvdCl5SUHYschyQ0L5QHQ==", - "license": "MIT", + "version": "8.5.5", + "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.5.tgz", + "integrity": "sha512-CVA94zmfp8m4bSHtWwmANaBR8EPsKy2aZ7KwqhoS4Ftib87F9Kvi7XQhOixypPLMc6kVYgOXvKFuuzZDpHGRPg==", "dependencies": { "@babel/runtime": "^7.20.13", "use-composed-ref": "^1.3.0", @@ -8635,8 +8624,7 @@ "node_modules/tabbable": { "version": "6.2.0", "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz", - "integrity": "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==", - "license": "MIT" + "integrity": "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==" }, "node_modules/test-exclude": { "version": "6.0.0", @@ -8873,10 +8861,9 @@ } }, "node_modules/type-fest": { - "version": "4.26.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.26.0.tgz", - "integrity": "sha512-OduNjVJsFbifKb57UqZ2EMP1i4u64Xwow3NYXUtBbD4vIwJdQd4+xl8YDou1dlm4DVrtwT/7Ky8z8WyCULVfxw==", - "license": "(MIT OR CC0-1.0)", + "version": "4.29.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.29.0.tgz", + "integrity": "sha512-RPYt6dKyemXJe7I6oNstcH24myUGSReicxcHTvCLgzm4e0n8y05dGvcGB15/SoPRBmhlMthWQ9pvKyL81ko8nQ==", "engines": { "node": ">=16" }, @@ -8967,7 +8954,6 @@ "version": "1.3.2", "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.2.tgz", "integrity": "sha512-elOQwe6Q8gqZgDA8mrh44qRTQqpIHDcZ3hXTLjBe1i4ph8XpNJnO+aQf3NaG+lriLopI4HMx9VjQLfPQ6vhnoA==", - "license": "MIT", "dependencies": { "tslib": "^2.0.0" }, @@ -8988,7 +8974,6 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/use-composed-ref/-/use-composed-ref-1.3.0.tgz", "integrity": "sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==", - "license": "MIT", "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0" } @@ -8997,7 +8982,6 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz", "integrity": "sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==", - "license": "MIT", "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0" }, @@ -9011,7 +8995,6 @@ "version": "1.2.1", "resolved": "https://registry.npmjs.org/use-latest/-/use-latest-1.2.1.tgz", "integrity": "sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw==", - "license": "MIT", "dependencies": { "use-isomorphic-layout-effect": "^1.1.1" }, @@ -9051,7 +9034,6 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.2.tgz", "integrity": "sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==", - "license": "MIT", "dependencies": { "detect-node-es": "^1.1.0", "tslib": "^2.0.0" From 64dd7079fa284d45b5e80ed1f66e29d5109637cd Mon Sep 17 00:00:00 2001 From: SuperQ Date: Tue, 10 Dec 2024 17:09:39 +0100 Subject: [PATCH 1060/1397] Add old UI to dependabot Until we have removed the code for the old UI, we should maintain the dependabot configuration for security warnings. Signed-off-by: SuperQ --- .github/dependabot.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 89b2f4d0b6..35f2b45dfc 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -16,8 +16,23 @@ updates: directory: "/documentation/examples/remote_storage" schedule: interval: "monthly" + # New manteen-ui packages. - package-ecosystem: "npm" directory: "/web/ui" + labels: + - dependencies + - javascript + - manteen-ui + schedule: + interval: "monthly" + open-pull-requests-limit: 20 + # Old react-app packages. + - package-ecosystem: "npm" + directory: "/web/ui/react-app" + labels: + - dependencies + - javascript + - old-react-ui schedule: interval: "monthly" open-pull-requests-limit: 20 From a325ff142cd5822c3835c202e300a093d50768f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 10 Dec 2024 17:30:46 +0100 Subject: [PATCH 1061/1397] fix(test): do not run automatic WAL truncate during test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the 2 minute timeout as the default is 2 hours and wouldn't interfere. With the test. Otherwise the extra samples combined with race detection can push the test over 2 minutes and make it fail. Signed-off-by: György Krajcsovits --- tsdb/agent/db_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index 0840cebe5c..0238a8e140 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -488,7 +488,6 @@ func TestPartialTruncateWAL(t *testing.T) { ) opts := DefaultOptions() - opts.TruncateFrequency = time.Minute * 2 reg := prometheus.NewRegistry() s := createTestAgentDB(t, reg, opts) From 6daf75d8ce1d8aa868afdc808fef8a95525b514a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:58:39 +0100 Subject: [PATCH 1062/1397] chore(deps): bump @types/lodash from 4.17.9 to 4.17.13 in /web/ui (#15265) Bumps [@types/lodash](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/lodash) from 4.17.9 to 4.17.13. - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/lodash) --- updated-dependencies: - dependency-name: "@types/lodash" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 6a0dd7dc90..545e35c27e 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -34,7 +34,7 @@ "@tanstack/react-query": "^5.62.0", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", - "@types/lodash": "^4.17.9", + "@types/lodash": "^4.17.13", "@types/sanitize-html": "^2.13.0", "@uiw/react-codemirror": "^4.23.3", "clsx": "^2.1.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index c073ebe27a..1b853d89ca 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -48,7 +48,7 @@ "@tanstack/react-query": "^5.62.0", "@testing-library/jest-dom": "^6.5.0", "@testing-library/react": "^16.0.1", - "@types/lodash": "^4.17.9", + "@types/lodash": "^4.17.13", "@types/sanitize-html": "^2.13.0", "@uiw/react-codemirror": "^4.23.3", "clsx": "^2.1.1", @@ -2861,9 +2861,9 @@ "dev": true }, "node_modules/@types/lodash": { - "version": "4.17.9", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.9.tgz", - "integrity": "sha512-w9iWudx1XWOHW5lQRS9iKpK/XuRhnN+0T7HvdCCd802FYkT1AMTnxndJHGrNJwRoRHkslGr4S29tjm1cT7x/7w==" + "version": "4.17.13", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.13.tgz", + "integrity": "sha512-lfx+dftrEZcdBPczf9d0Qv0x+j/rfNCMuC6OcfXmO8gkfeNAY88PgKUbvG56whcN23gc27yenwF6oJZXGFpYxg==" }, "node_modules/@types/node": { "version": "22.5.4", From f0077f0fb30a7b9bad0fff9528ac7b8a1c8df6d9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:59:43 +0100 Subject: [PATCH 1063/1397] chore(deps-dev): bump @types/jest from 29.5.13 to 29.5.14 in /web/ui (#15275) Bumps [@types/jest](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/jest) from 29.5.13 to 29.5.14. - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/jest) --- updated-dependencies: - dependency-name: "@types/jest" dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/package-lock.json | 8 ++++---- web/ui/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 1b853d89ca..96c723b0b2 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -12,7 +12,7 @@ "module/*" ], "devDependencies": { - "@types/jest": "^29.5.13", + "@types/jest": "^29.5.14", "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "eslint-config-prettier": "^9.1.0", @@ -2810,9 +2810,9 @@ } }, "node_modules/@types/jest": { - "version": "29.5.13", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.13.tgz", - "integrity": "sha512-wd+MVEZCHt23V0/L642O5APvspWply/rGY5BcW4SUETo2UzPU3Z26qr8jC2qxpimI2jjx9h7+2cj2FwIr01bXg==", + "version": "29.5.14", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz", + "integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==", "dev": true, "dependencies": { "expect": "^29.0.0", diff --git a/web/ui/package.json b/web/ui/package.json index ef5bdb81fc..b5324df694 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -15,7 +15,7 @@ "module/*" ], "devDependencies": { - "@types/jest": "^29.5.13", + "@types/jest": "^29.5.14", "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "eslint-config-prettier": "^9.1.0", From ee18650636ded348348f648dc3edd90633dd0480 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 18:00:33 +0100 Subject: [PATCH 1064/1397] chore(deps-dev): bump @eslint/compat from 1.1.1 to 1.2.4 in /web/ui (#15569) Signed-off-by: Ben Kochie --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 17 ++++++++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 545e35c27e..5f87a19b3b 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -51,7 +51,7 @@ "use-query-params": "^2.2.1" }, "devDependencies": { - "@eslint/compat": "^1.1.1", + "@eslint/compat": "^1.2.4", "@eslint/eslintrc": "^3.1.0", "@eslint/js": "^9.16.0", "@types/react": "^18.3.5", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 96c723b0b2..9c2eac8ca3 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -65,7 +65,7 @@ "use-query-params": "^2.2.1" }, "devDependencies": { - "@eslint/compat": "^1.1.1", + "@eslint/compat": "^1.2.4", "@eslint/eslintrc": "^3.1.0", "@eslint/js": "^9.16.0", "@types/react": "^18.3.5", @@ -1312,13 +1312,20 @@ } }, "node_modules/@eslint/compat": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@eslint/compat/-/compat-1.1.1.tgz", - "integrity": "sha512-lpHyRyplhGPL5mGEh6M9O5nnKk0Gz4bFI+Zu6tKlPpDUN7XshWvH9C/px4UVm87IAANE0W81CEsNGbS1KlzXpA==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@eslint/compat/-/compat-1.2.4.tgz", + "integrity": "sha512-S8ZdQj/N69YAtuqFt7653jwcvuUj131+6qGLUyDqfDg1OIoBQ66OCuXC473YQfO2AaxITTutiRQiDwoo7ZLYyg==", "dev": true, - "license": "Apache-2.0", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "peerDependencies": { + "eslint": "^9.10.0" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + } } }, "node_modules/@eslint/config-array": { From 018f4f3d3fef8972b0a14cd52a9e374e2f8538a8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 18:01:02 +0100 Subject: [PATCH 1065/1397] chore(deps): bump @reduxjs/toolkit from 2.2.7 to 2.4.0 in /web/ui (#15513) Bumps [@reduxjs/toolkit](https://github.com/reduxjs/redux-toolkit) from 2.2.7 to 2.4.0. - [Release notes](https://github.com/reduxjs/redux-toolkit/releases) - [Commits](https://github.com/reduxjs/redux-toolkit/compare/v2.2.7...v2.4.0) --- updated-dependencies: - dependency-name: "@reduxjs/toolkit" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 5f87a19b3b..f90f1529c7 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -29,7 +29,7 @@ "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.300.0", - "@reduxjs/toolkit": "^2.2.1", + "@reduxjs/toolkit": "^2.4.0", "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.62.0", "@testing-library/jest-dom": "^6.5.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 9c2eac8ca3..631f150de2 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -43,7 +43,7 @@ "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.300.0", - "@reduxjs/toolkit": "^2.2.1", + "@reduxjs/toolkit": "^2.4.0", "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.62.0", "@testing-library/jest-dom": "^6.5.0", @@ -2239,10 +2239,9 @@ "link": true }, "node_modules/@reduxjs/toolkit": { - "version": "2.2.7", - "resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-2.2.7.tgz", - "integrity": "sha512-faI3cZbSdFb8yv9dhDTmGwclW0vk0z5o1cia+kf7gCbaCwHI5e+7tP57mJUv22pNcNbeA62GSrPpfrUfdXcQ6g==", - "license": "MIT", + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-2.4.0.tgz", + "integrity": "sha512-wJZEuSKj14tvNfxiIiJws0tQN77/rDqucBq528ApebMIRHyWpCanJVQRxQ8WWZC19iCDKxDsGlbAir3F1layxA==", "dependencies": { "immer": "^10.0.3", "redux": "^5.0.1", From 8784e28e18d90cea790bcd0107385c8fe24b198a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 18:17:22 +0100 Subject: [PATCH 1066/1397] chore(deps): bump sanitize-html from 2.13.0 to 2.13.1 in /web/ui (#15268) Signed-off-by: Ben Kochie --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index f90f1529c7..4f08c0ce76 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -45,7 +45,7 @@ "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.1.2", "react-router-dom": "^7.0.1", - "sanitize-html": "^2.13.0", + "sanitize-html": "^2.13.1", "uplot": "^1.6.30", "uplot-react": "^1.2.2", "use-query-params": "^2.2.1" diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 631f150de2..4b9653bb65 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -59,7 +59,7 @@ "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.1.2", "react-router-dom": "^7.0.1", - "sanitize-html": "^2.13.0", + "sanitize-html": "^2.13.1", "uplot": "^1.6.30", "uplot-react": "^1.2.2", "use-query-params": "^2.2.1" @@ -8273,10 +8273,9 @@ "license": "MIT" }, "node_modules/sanitize-html": { - "version": "2.13.0", - "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.13.0.tgz", - "integrity": "sha512-Xff91Z+4Mz5QiNSLdLWwjgBDm5b1RU6xBT0+12rapjiaR7SwfRdjw8f+6Rir2MXKLrDicRFHdb51hGOAxmsUIA==", - "license": "MIT", + "version": "2.13.1", + "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.13.1.tgz", + "integrity": "sha512-ZXtKq89oue4RP7abL9wp/9URJcqQNABB5GGJ2acW1sdO8JTVl92f4ygD7Yc9Ze09VAZhnt2zegeU0tbNsdcLYg==", "dependencies": { "deepmerge": "^4.2.2", "escape-string-regexp": "^4.0.0", From 2c279de368842d9599c911c5cd3adb6a2554a6d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 00:32:36 +0100 Subject: [PATCH 1067/1397] chore(deps): bump @testing-library/jest-dom in /web/ui (#15276) Bumps [@testing-library/jest-dom](https://github.com/testing-library/jest-dom) from 6.5.0 to 6.6.3. - [Release notes](https://github.com/testing-library/jest-dom/releases) - [Changelog](https://github.com/testing-library/jest-dom/blob/main/CHANGELOG.md) - [Commits](https://github.com/testing-library/jest-dom/compare/v6.5.0...v6.6.3) --- updated-dependencies: - dependency-name: "@testing-library/jest-dom" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 4f08c0ce76..46c65febe4 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -32,7 +32,7 @@ "@reduxjs/toolkit": "^2.4.0", "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.62.0", - "@testing-library/jest-dom": "^6.5.0", + "@testing-library/jest-dom": "^6.6.3", "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.13", "@types/sanitize-html": "^2.13.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 4b9653bb65..d6211e7ee9 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -46,7 +46,7 @@ "@reduxjs/toolkit": "^2.4.0", "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.62.0", - "@testing-library/jest-dom": "^6.5.0", + "@testing-library/jest-dom": "^6.6.3", "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.13", "@types/sanitize-html": "^2.13.0", @@ -2649,10 +2649,9 @@ } }, "node_modules/@testing-library/jest-dom": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.5.0.tgz", - "integrity": "sha512-xGGHpBXYSHUUr6XsKBfs85TWlYKpTc37cSBBVrXcib2MkHLboWlkClhWF37JKlDb9KEq3dHs+f2xR7XJEWGBxA==", - "license": "MIT", + "version": "6.6.3", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.6.3.tgz", + "integrity": "sha512-IteBhl4XqYNkM54f4ejhLRJiZNqcSCoXUOG2CPK7qbD322KjQozM4kHQOfkG2oln9b9HTYqs+Sae8vBATubxxA==", "dependencies": { "@adobe/css-tools": "^4.4.0", "aria-query": "^5.0.0", From 6674991f9fe90ed35ee2dd8d30e72428623b6be2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 00:37:21 +0100 Subject: [PATCH 1068/1397] chore(deps): bump uplot from 1.6.30 to 1.6.31 in /web/ui (#15277) Bumps [uplot](https://github.com/leeoniya/uPlot) from 1.6.30 to 1.6.31. - [Release notes](https://github.com/leeoniya/uPlot/releases) - [Commits](https://github.com/leeoniya/uPlot/compare/1.6.30...1.6.31) --- updated-dependencies: - dependency-name: uplot dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 46c65febe4..fdb9256b77 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -46,7 +46,7 @@ "react-redux": "^9.1.2", "react-router-dom": "^7.0.1", "sanitize-html": "^2.13.1", - "uplot": "^1.6.30", + "uplot": "^1.6.31", "uplot-react": "^1.2.2", "use-query-params": "^2.2.1" }, diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index d6211e7ee9..0ad01a8d29 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -60,7 +60,7 @@ "react-redux": "^9.1.2", "react-router-dom": "^7.0.1", "sanitize-html": "^2.13.1", - "uplot": "^1.6.30", + "uplot": "^1.6.31", "uplot-react": "^1.2.2", "use-query-params": "^2.2.1" }, @@ -8926,10 +8926,9 @@ } }, "node_modules/uplot": { - "version": "1.6.30", - "resolved": "https://registry.npmjs.org/uplot/-/uplot-1.6.30.tgz", - "integrity": "sha512-48oVVRALM/128ttW19F2a2xobc2WfGdJ0VJFX00099CfqbCTuML7L2OrTKxNzeFP34eo1+yJbqFSoFAp2u28/Q==", - "license": "MIT" + "version": "1.6.31", + "resolved": "https://registry.npmjs.org/uplot/-/uplot-1.6.31.tgz", + "integrity": "sha512-sQZqSwVCbJGnFB4IQjQYopzj5CoTZJ4Br1fG/xdONimqgHmsacvCjNesdGDypNKFbrhLGIeshYhy89FxPF+H+w==" }, "node_modules/uplot-react": { "version": "1.2.2", From 1055502b9c1d8ce769ebca36c7e2fda0146de453 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 00:37:42 +0100 Subject: [PATCH 1069/1397] chore(deps): bump @lezer/common from 1.2.1 to 1.2.3 in /web/ui (#15278) Bumps [@lezer/common](https://github.com/lezer-parser/common) from 1.2.1 to 1.2.3. - [Changelog](https://github.com/lezer-parser/common/blob/main/CHANGELOG.md) - [Commits](https://github.com/lezer-parser/common/compare/1.2.1...1.2.3) --- updated-dependencies: - dependency-name: "@lezer/common" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 11 +++++------ 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index fdb9256b77..2c3fa5a2ad 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -18,7 +18,7 @@ "@codemirror/state": "^6.4.1", "@codemirror/view": "^6.34.1", "@floating-ui/dom": "^1.6.7", - "@lezer/common": "^1.2.1", + "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.1", "@mantine/code-highlight": "^7.14.3", "@mantine/core": "^7.11.2", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 4ae6eb50b4..e8c4534069 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -38,7 +38,7 @@ "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.34.1", - "@lezer/common": "^1.2.1", + "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", "eslint-plugin-prettier": "^5.1.3", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 0ad01a8d29..15193f7036 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -32,7 +32,7 @@ "@codemirror/state": "^6.4.1", "@codemirror/view": "^6.34.1", "@floating-ui/dom": "^1.6.7", - "@lezer/common": "^1.2.1", + "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.1", "@mantine/code-highlight": "^7.14.3", "@mantine/core": "^7.11.2", @@ -170,7 +170,7 @@ "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.34.1", - "@lezer/common": "^1.2.1", + "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", "eslint-plugin-prettier": "^5.1.3", @@ -2039,10 +2039,9 @@ } }, "node_modules/@lezer/common": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.1.tgz", - "integrity": "sha512-yemX0ZD2xS/73llMZIK6KplkjIjf2EvAHcinDi/TfJ9hS25G0388+ClHt6/3but0oOxinTcQHJLDXh6w1crzFQ==", - "license": "MIT" + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.3.tgz", + "integrity": "sha512-w7ojc8ejBqr2REPsWxJjrMFsA/ysDCFICn8zEOR9mrqzOu2amhITYuLD8ag6XZf0CFXDrhKqw7+tW8cX66NaDA==" }, "node_modules/@lezer/generator": { "version": "1.7.1", From faf398e38059083c22c1bc7b8a725b9274148e17 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 00:37:59 +0100 Subject: [PATCH 1070/1397] chore(deps): bump @uiw/react-codemirror from 4.23.3 to 4.23.6 in /web/ui (#15281) Bumps [@uiw/react-codemirror](https://github.com/uiwjs/react-codemirror) from 4.23.3 to 4.23.6. - [Release notes](https://github.com/uiwjs/react-codemirror/releases) - [Commits](https://github.com/uiwjs/react-codemirror/compare/v4.23.3...v4.23.6) --- updated-dependencies: - dependency-name: "@uiw/react-codemirror" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 2c3fa5a2ad..97da095cf1 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -36,7 +36,7 @@ "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.13", "@types/sanitize-html": "^2.13.0", - "@uiw/react-codemirror": "^4.23.3", + "@uiw/react-codemirror": "^4.23.6", "clsx": "^2.1.1", "dayjs": "^1.11.10", "lodash": "^4.17.21", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 15193f7036..978b285737 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -50,7 +50,7 @@ "@testing-library/react": "^16.0.1", "@types/lodash": "^4.17.13", "@types/sanitize-html": "^2.13.0", - "@uiw/react-codemirror": "^4.23.3", + "@uiw/react-codemirror": "^4.23.6", "clsx": "^2.1.1", "dayjs": "^1.11.10", "lodash": "^4.17.21", @@ -3194,9 +3194,9 @@ } }, "node_modules/@uiw/codemirror-extensions-basic-setup": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.23.3.tgz", - "integrity": "sha512-nEMjgbCyeLx+UQgOGAAoUWYFE34z5TlyaKNszuig/BddYFDb0WKcgmC37bDFxR2dZssf3K/lwGWLpXnGKXePbA==", + "version": "4.23.6", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.23.6.tgz", + "integrity": "sha512-bvtq8IOvdkLJMhoJBRGPEzU51fMpPDwEhcAHp9xCR05MtbIokQgsnLXrmD1aZm6e7s/3q47H+qdSfAAkR5MkLA==", "dependencies": { "@codemirror/autocomplete": "^6.0.0", "@codemirror/commands": "^6.0.0", @@ -3220,15 +3220,15 @@ } }, "node_modules/@uiw/react-codemirror": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.23.3.tgz", - "integrity": "sha512-TBBLUbeqXmfQSfO+f3rPNOAb+QXbSm7KPB64FHQWLGg2WJNbpOhjLOWMyL+C4ZP3aSCNc2Y5aftEK1vp3wCKTA==", + "version": "4.23.6", + "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.23.6.tgz", + "integrity": "sha512-caYKGV6TfGLRV1HHD3p0G3FiVzKL1go7wes5XT2nWjB0+dTdyzyb81MKRSacptgZcotujfNO6QXn65uhETRAMw==", "dependencies": { "@babel/runtime": "^7.18.6", "@codemirror/commands": "^6.1.0", "@codemirror/state": "^6.1.1", "@codemirror/theme-one-dark": "^6.0.0", - "@uiw/codemirror-extensions-basic-setup": "4.23.3", + "@uiw/codemirror-extensions-basic-setup": "4.23.6", "codemirror": "^6.0.0" }, "funding": { From 431fb1aea8b440ccb9c20b8237a07d034e3a1453 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 00:41:01 +0100 Subject: [PATCH 1071/1397] chore(deps): bump google.golang.org/api from 0.204.0 to 0.209.0 (#15501) Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.204.0 to 0.209.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.204.0...v0.209.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index f86ade0a35..f4b824d16e 100644 --- a/go.mod +++ b/go.mod @@ -79,7 +79,7 @@ require ( golang.org/x/sys v0.27.0 golang.org/x/text v0.20.0 golang.org/x/tools v0.27.0 - google.golang.org/api v0.204.0 + google.golang.org/api v0.209.0 google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.35.2 @@ -93,7 +93,7 @@ require ( ) require ( - cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth v0.10.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect @@ -138,7 +138,7 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect - github.com/googleapis/gax-go/v2 v2.13.0 // indirect + github.com/googleapis/gax-go/v2 v2.14.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect @@ -192,8 +192,8 @@ require ( golang.org/x/mod v0.22.0 // indirect golang.org/x/net v0.31.0 // indirect golang.org/x/term v0.26.0 // indirect - golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + golang.org/x/time v0.8.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241113202542-65e8d215514f // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index ff0d5e691d..190edd9e11 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= -cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth v0.10.2 h1:oKF7rgBfSHdp/kuhXtqU/tNDr0mZqhYbEh+6SiqzkKo= +cloud.google.com/go/auth v0.10.2/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= @@ -225,8 +225,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= -github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= -github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= +github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw= github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -626,8 +626,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -644,8 +644,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= -google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= +google.golang.org/api v0.209.0 h1:Ja2OXNlyRlWCWu8o+GgI4yUn/wz9h/5ZfFbKz+dQX+w= +google.golang.org/api v0.209.0/go.mod h1:I53S168Yr/PNDNMi5yPnDc0/LGRZO6o7PoEbl/HY3CM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -654,8 +654,8 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241113202542-65e8d215514f h1:C1QccEa9kUwvMgEUORqQD9S17QesQijxjZ84sO82mfo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241113202542-65e8d215514f/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= From d1c251ff775b36ac7e84360659cd9ed82a314882 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 00:41:49 +0100 Subject: [PATCH 1072/1397] chore(deps): bump github.com/ionos-cloud/sdk-go/v6 from 6.2.1 to 6.3.0 (#15497) Bumps [github.com/ionos-cloud/sdk-go/v6](https://github.com/ionos-cloud/sdk-go) from 6.2.1 to 6.3.0. - [Release notes](https://github.com/ionos-cloud/sdk-go/releases) - [Changelog](https://github.com/ionos-cloud/sdk-go/blob/master/docs/CHANGELOG.md) - [Commits](https://github.com/ionos-cloud/sdk-go/compare/v6.2.1...v6.3.0) --- updated-dependencies: - dependency-name: github.com/ionos-cloud/sdk-go/v6 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f4b824d16e..0941c32995 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( github.com/hashicorp/consul/api v1.30.0 github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 github.com/hetznercloud/hcloud-go/v2 v2.17.0 - github.com/ionos-cloud/sdk-go/v6 v6.2.1 + github.com/ionos-cloud/sdk-go/v6 v6.3.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b diff --git a/go.sum b/go.sum index 190edd9e11..50f5f0fd19 100644 --- a/go.sum +++ b/go.sum @@ -291,8 +291,8 @@ github.com/hetznercloud/hcloud-go/v2 v2.17.0 h1:ge0w2piey9SV6XGyU/wQ6HBR24QyMbJ3 github.com/hetznercloud/hcloud-go/v2 v2.17.0/go.mod h1:zfyZ4Orx+mPpYDzWAxXR7DHGL50nnlZ5Edzgs1o6f/s= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY= -github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= +github.com/ionos-cloud/sdk-go/v6 v6.3.0 h1:/lTieTH9Mo/CWm3cTlFLnK10jgxjUGkAqRffGqvPteY= +github.com/ionos-cloud/sdk-go/v6 v6.3.0/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= From 944c804eccdad1c8958797aa0175c9f17e42323b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 01:32:31 +0100 Subject: [PATCH 1073/1397] chore(deps): bump google.golang.org/grpc from 1.67.1 to 1.68.1 (#15576) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.67.1 to 1.68.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.67.1...v1.68.1) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 0941c32995..5e94985788 100644 --- a/go.mod +++ b/go.mod @@ -81,7 +81,7 @@ require ( golang.org/x/tools v0.27.0 google.golang.org/api v0.209.0 google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 - google.golang.org/grpc v1.67.1 + google.golang.org/grpc v1.68.1 google.golang.org/protobuf v1.35.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -104,7 +104,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cilium/ebpf v0.11.0 // indirect - github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 // indirect + github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect diff --git a/go.sum b/go.sum index 50f5f0fd19..dc6e4f0427 100644 --- a/go.sum +++ b/go.sum @@ -74,8 +74,8 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= -github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -662,8 +662,8 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From e1bfdd22571d874e1f9154d1a35ce3a8569ded36 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 01:33:42 +0100 Subject: [PATCH 1074/1397] chore(deps-dev): bump eslint-plugin-react-hooks in /web/ui (#15568) Bumps [eslint-plugin-react-hooks](https://github.com/facebook/react/tree/HEAD/packages/eslint-plugin-react-hooks) from 5.1.0-rc-fb9a90fa48-20240614 to 5.1.0. - [Release notes](https://github.com/facebook/react/releases) - [Changelog](https://github.com/facebook/react/blob/main/CHANGELOG.md) - [Commits](https://github.com/facebook/react/commits/HEAD/packages/eslint-plugin-react-hooks) --- updated-dependencies: - dependency-name: eslint-plugin-react-hooks dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 97da095cf1..4a4c02668f 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -60,7 +60,7 @@ "@typescript-eslint/parser": "^6.21.0", "@vitejs/plugin-react": "^4.3.4", "eslint": "^9.11.1", - "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", + "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.13.0", "jsdom": "^25.0.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 978b285737..b725b7c661 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -74,7 +74,7 @@ "@typescript-eslint/parser": "^6.21.0", "@vitejs/plugin-react": "^4.3.4", "eslint": "^9.11.1", - "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", + "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.13.0", "jsdom": "^25.0.1", @@ -4578,11 +4578,10 @@ } }, "node_modules/eslint-plugin-react-hooks": { - "version": "5.1.0-rc-fb9a90fa48-20240614", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.1.0-rc-fb9a90fa48-20240614.tgz", - "integrity": "sha512-xsiRwaDNF5wWNC4ZHLut+x/YcAxksUd9Rizt7LaEn3bV8VyYRpXnRJQlLOfYaVy9esk4DFP4zPPnoNVjq5Gc0w==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.1.0.tgz", + "integrity": "sha512-mpJRtPgHN2tNAvZ35AMfqeB3Xqeo273QxrHJsbBEPWODRM4r0yB6jfoROqKEYrOn27UtRPpcpHc2UqyBSuUNTw==", "dev": true, - "license": "MIT", "engines": { "node": ">=10" }, From b0e7de154ee6f6de867fb79739d8406a8bea9b05 Mon Sep 17 00:00:00 2001 From: Achille Roussel Date: Tue, 10 Dec 2024 17:36:44 -0800 Subject: [PATCH 1075/1397] promqltest: allow running tests with custom storage implementation Signed-off-by: Achille Roussel --- promql/promqltest/test.go | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index 60df005036..efa2136f10 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -70,7 +70,7 @@ var testStartTime = time.Unix(0, 0).UTC() // LoadedStorage returns storage with generated data using the provided load statements. // Non-load statements will cause test errors. func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage { - test, err := newTest(t, input, false) + test, err := newTest(t, input, false, newTestStorage) require.NoError(t, err) for _, cmd := range test.cmds { @@ -81,7 +81,7 @@ func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage { t.Errorf("only 'load' commands accepted, got '%s'", cmd) } } - return test.storage + return test.storage.(*teststorage.TestStorage) } // NewTestEngine creates a promql.Engine with enablePerStepStats, lookbackDelta and maxSamples, and returns it. @@ -112,6 +112,11 @@ func NewTestEngineWithOpts(tb testing.TB, opts promql.EngineOpts) *promql.Engine // RunBuiltinTests runs an acceptance test suite against the provided engine. func RunBuiltinTests(t TBRun, engine promql.QueryEngine) { + RunBuiltinTestsWithStorage(t, engine, newTestStorage) +} + +// RunBuiltinTestsWithStorage runs an acceptance test suite against the provided engine and storage. +func RunBuiltinTestsWithStorage(t TBRun, engine promql.QueryEngine, newStorage func(testutil.T) storage.Storage) { t.Cleanup(func() { parser.EnableExperimentalFunctions = false }) parser.EnableExperimentalFunctions = true @@ -122,24 +127,29 @@ func RunBuiltinTests(t TBRun, engine promql.QueryEngine) { t.Run(fn, func(t *testing.T) { content, err := fs.ReadFile(testsFs, fn) require.NoError(t, err) - RunTest(t, string(content), engine) + RunTestWithStorage(t, string(content), engine, newStorage) }) } } // RunTest parses and runs the test against the provided engine. func RunTest(t testutil.T, input string, engine promql.QueryEngine) { - require.NoError(t, runTest(t, input, engine, false)) + RunTestWithStorage(t, input, engine, newTestStorage) +} + +// RunTestWithStorage parses and runs the test against the provided engine and storage. +func RunTestWithStorage(t testutil.T, input string, engine promql.QueryEngine, newStorage func(testutil.T) storage.Storage) { + require.NoError(t, runTest(t, input, engine, newStorage, false)) } // testTest allows tests to be run in "test-the-test" mode (true for // testingMode). This is a special mode for testing test code execution itself. func testTest(t testutil.T, input string, engine promql.QueryEngine) error { - return runTest(t, input, engine, true) + return runTest(t, input, engine, newTestStorage, true) } -func runTest(t testutil.T, input string, engine promql.QueryEngine, testingMode bool) error { - test, err := newTest(t, input, testingMode) +func runTest(t testutil.T, input string, engine promql.QueryEngine, newStorage func(testutil.T) storage.Storage, testingMode bool) error { + test, err := newTest(t, input, testingMode, newStorage) // Why do this before checking err? newTest() can create the test storage and then return an error, // and we want to make sure to clean that up to avoid leaking goroutines. @@ -179,18 +189,20 @@ type test struct { cmds []testCommand - storage *teststorage.TestStorage + open func(testutil.T) storage.Storage + storage storage.Storage context context.Context cancelCtx context.CancelFunc } // newTest returns an initialized empty Test. -func newTest(t testutil.T, input string, testingMode bool) (*test, error) { +func newTest(t testutil.T, input string, testingMode bool, newStorage func(testutil.T) storage.Storage) (*test, error) { test := &test{ T: t, cmds: []testCommand{}, testingMode: testingMode, + open: newStorage, } err := test.parse(input) test.clear() @@ -198,6 +210,8 @@ func newTest(t testutil.T, input string, testingMode bool) (*test, error) { return test, err } +func newTestStorage(t testutil.T) storage.Storage { return teststorage.New(t) } + //go:embed testdata var testsFs embed.FS @@ -1271,7 +1285,7 @@ func (t *test) clear() { if t.cancelCtx != nil { t.cancelCtx() } - t.storage = teststorage.New(t) + t.storage = t.open(t.T) t.context, t.cancelCtx = context.WithCancel(context.Background()) } From 30a4493b8720b88c4f1dd4f4bcc6e6602c2187ec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 08:14:29 +0000 Subject: [PATCH 1076/1397] chore(deps): bump the go-opentelemetry-io group with 2 updates Bumps the go-opentelemetry-io group with 2 updates: [go.opentelemetry.io/collector/pdata](https://github.com/open-telemetry/opentelemetry-collector) and [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector). Updates `go.opentelemetry.io/collector/pdata` from 1.20.0 to 1.21.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.20.0...pdata/v1.21.0) Updates `go.opentelemetry.io/collector/semconv` from 0.114.0 to 0.115.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.114.0...v0.115.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/collector/pdata dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/semconv dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 5e94985788..ee25b78525 100644 --- a/go.mod +++ b/go.mod @@ -60,8 +60,8 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.10.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v1.20.0 - go.opentelemetry.io/collector/semconv v0.114.0 + go.opentelemetry.io/collector/pdata v1.21.0 + go.opentelemetry.io/collector/semconv v0.115.0 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 go.opentelemetry.io/otel v1.32.0 diff --git a/go.sum b/go.sum index dc6e4f0427..94b4055fbd 100644 --- a/go.sum +++ b/go.sum @@ -503,10 +503,10 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.20.0 h1:ePcwt4bdtISP0loHaE+C9xYoU2ZkIvWv89Fob16o9SM= -go.opentelemetry.io/collector/pdata v1.20.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs= -go.opentelemetry.io/collector/semconv v0.114.0 h1:/eKcCJwZepQUtEuFuxa0thx2XIOvhFpaf214ZG1a11k= -go.opentelemetry.io/collector/semconv v0.114.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= +go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= +go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= +go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= +go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0 h1:7F3XCD6WYzDkwbi8I8N+oYJWquPVScnRosKGgqjsR8c= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0/go.mod h1:Dk3C0BfIlZDZ5c6eVS7TYiH2vssuyUU3vUsgbrR+5V4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= From 333547ad3fd1a5c37f073b03358b87e42d568c3d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 08:15:05 +0000 Subject: [PATCH 1077/1397] chore(deps): bump github.com/docker/docker Bumps [github.com/docker/docker](https://github.com/docker/docker) from 27.3.1+incompatible to 27.4.0+incompatible. - [Release notes](https://github.com/docker/docker/releases) - [Commits](https://github.com/docker/docker/compare/v27.3.1...v27.4.0) --- updated-dependencies: - dependency-name: github.com/docker/docker dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5e94985788..6ab46d1e5c 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 github.com/digitalocean/godo v1.131.0 - github.com/docker/docker v27.3.1+incompatible + github.com/docker/docker v27.4.0+incompatible github.com/edsrzf/mmap-go v1.2.0 github.com/envoyproxy/go-control-plane v0.13.1 github.com/envoyproxy/protoc-gen-validate v1.1.0 diff --git a/go.sum b/go.sum index dc6e4f0427..a5f83c2fa4 100644 --- a/go.sum +++ b/go.sum @@ -97,8 +97,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= -github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.4.0+incompatible h1:I9z7sQ5qyzO0BfAb9IMOawRkAGxhYsidKiTMcm0DU+A= +github.com/docker/docker v27.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= From ebfa1dd82282ae5cae4e8334185c7600da727979 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 11 Dec 2024 12:03:19 +0100 Subject: [PATCH 1078/1397] promql: Purge Holt-Winters from a doc comment `funcDoubleExponentialSmoothing` did not get its doc comment updated when we renamed it from the confusing `funcHoltWinters`. Signed-off-by: beorn7 --- promql/functions.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index 016e676d31..da1821fd18 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -345,11 +345,14 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { return x + y } -// Holt-Winters is similar to a weighted moving average, where historical data has exponentially less influence on the current data. -// Holt-Winter also accounts for trends in data. The smoothing factor (0 < sf < 1) affects how historical data will affect the current -// data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects -// how trends in historical data will affect the current data. A higher trend factor increases the influence. -// of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". +// Double exponential smoothing is similar to a weighted moving average, where +// historical data has exponentially less influence on the current data. It also +// accounts for trends in data. The smoothing factor (0 < sf < 1) affects how +// historical data will affect the current data. A lower smoothing factor +// increases the influence of historical data. The trend factor (0 < tf < 1) +// affects how trends in historical data will affect the current data. A higher +// trend factor increases the influence. of trends. Algorithm taken from +// https://en.wikipedia.org/wiki/Exponential_smoothing . func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] From f44bba31b3ce436f03f217b34a722d6fe2daabd6 Mon Sep 17 00:00:00 2001 From: bwplotka Date: Wed, 11 Dec 2024 11:01:15 +0000 Subject: [PATCH 1079/1397] config: Remove validation GetScrapeConfigs; require using config.Load. We should never modify (or even shallow copy) Config after config.Load; added comments and modified GetScrapeConfigs to do so. For GetScrapeConfigs the validation (even repeated) was likely doing writes (because global fields was 0). We GetScrapeConfigs concurrently in tests and ApplyConfig causing test races. In prod there were races but likelyt only to replace 0 with 0, so not too severe. I removed validation since I don't see anyone using our config.Config without Load. I had to refactor one test that was doing it, all others use yaml config. Fixes #15538 Previous attempt: https://github.com/prometheus/prometheus/pull/15634 Signed-off-by: bwplotka --- cmd/prometheus/main.go | 4 +- config/config.go | 35 +++--- config/config_default_test.go | 2 + config/config_test.go | 30 ++++-- config/config_windows_test.go | 2 + scrape/manager_test.go | 195 +++++++++++++++------------------- 6 files changed, 134 insertions(+), 134 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index f8a4ecd8c9..cd0775bbdd 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -593,12 +593,14 @@ func main() { logger.Error(fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err) os.Exit(2) } + // Get scrape configs to validate dynamically loaded scrape_config_files. + // They can change over time, but do the extra validation on startup for better experience. if _, err := cfgFile.GetScrapeConfigs(); err != nil { absPath, pathErr := filepath.Abs(cfg.configFile) if pathErr != nil { absPath = cfg.configFile } - logger.Error(fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err) + logger.Error(fmt.Sprintf("Error loading dynamic scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err) os.Exit(2) } if cfg.tsdb.EnableExemplarStorage { diff --git a/config/config.go b/config/config.go index 86d8563536..551bbd5127 100644 --- a/config/config.go +++ b/config/config.go @@ -117,11 +117,12 @@ func Load(s string, logger *slog.Logger) (*Config, error) { default: return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy) } - + cfg.loaded = true return cfg, nil } -// LoadFile parses the given YAML file into a Config. +// LoadFile parses and validates the given YAML file into a read-only Config. +// Callers should never write to or shallow copy the returned Config. func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, error) { content, err := os.ReadFile(filename) if err != nil { @@ -270,9 +271,12 @@ type Config struct { RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"` RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"` OTLPConfig OTLPConfig `yaml:"otlp,omitempty"` + + loaded bool // Certain methods require configuration to use Load validation. } // SetDirectory joins any relative file paths with dir. +// This method writes to config, and it's not concurrency safe. func (c *Config) SetDirectory(dir string) { c.GlobalConfig.SetDirectory(dir) c.AlertingConfig.SetDirectory(dir) @@ -302,24 +306,24 @@ func (c Config) String() string { return string(b) } -// GetScrapeConfigs returns the scrape configurations. +// GetScrapeConfigs returns the read-only, validated scrape configurations including +// the ones from the scrape_config_files. +// This method does not write to config, and it's concurrency safe (the pointer receiver is for efficiency). +// This method also assumes the Config was created by Load, due to races, +// read mode https://github.com/prometheus/prometheus/issues/15538. func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { - scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs)) + if !c.loaded { + return nil, errors.New("main config scrape configs was not validated and loaded; GetScrapeConfigs method can only be used on configuration from the config.Load or config.LoadFile") + } + scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs)) jobNames := map[string]string{} for i, scfg := range c.ScrapeConfigs { - // We do these checks for library users that would not call validate in - // Unmarshal. - if err := scfg.Validate(c.GlobalConfig); err != nil { - return nil, err - } - - if _, ok := jobNames[scfg.JobName]; ok { - return nil, fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName) - } jobNames[scfg.JobName] = "main config file" scfgs[i] = scfg } + + // Re-read and validate the dynamic scrape config rules. for _, pat := range c.ScrapeConfigFiles { fs, err := filepath.Glob(pat) if err != nil { @@ -355,6 +359,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. +// NOTE: This method should not be used outside of this package. Use Load or LoadFile instead. func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = DefaultConfig // We want to set c to the defaults and then overwrite it with the input. @@ -391,18 +396,18 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { } } - // Do global overrides and validate unique names. + // Do global overrides and validation. jobNames := map[string]struct{}{} for _, scfg := range c.ScrapeConfigs { if err := scfg.Validate(c.GlobalConfig); err != nil { return err } - if _, ok := jobNames[scfg.JobName]; ok { return fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName) } jobNames[scfg.JobName] = struct{}{} } + rwNames := map[string]struct{}{} for _, rwcfg := range c.RemoteWriteConfigs { if rwcfg == nil { diff --git a/config/config_default_test.go b/config/config_default_test.go index 31133f1e04..2faaf98cf9 100644 --- a/config/config_default_test.go +++ b/config/config_default_test.go @@ -18,6 +18,8 @@ package config const ruleFilesConfigFile = "testdata/rules_abs_path.good.yml" var ruleFilesExpectedConf = &Config{ + loaded: true, + GlobalConfig: DefaultGlobalConfig, Runtime: DefaultRuntimeConfig, RuleFiles: []string{ diff --git a/config/config_test.go b/config/config_test.go index 4b5b11a9fd..0b1feea8b1 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -87,6 +87,7 @@ const ( ) var expectedConf = &Config{ + loaded: true, GlobalConfig: GlobalConfig{ ScrapeInterval: model.Duration(15 * time.Second), ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, @@ -1512,10 +1513,10 @@ func TestYAMLRoundtrip(t *testing.T) { require.NoError(t, err) out, err := yaml.Marshal(want) - require.NoError(t, err) - got := &Config{} - require.NoError(t, yaml.UnmarshalStrict(out, got)) + + got, err := Load(string(out), promslog.NewNopLogger()) + require.NoError(t, err) require.Equal(t, want, got) } @@ -1525,10 +1526,10 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) { require.NoError(t, err) out, err := yaml.Marshal(want) - require.NoError(t, err) - got := &Config{} - require.NoError(t, yaml.UnmarshalStrict(out, got)) + + got, err := Load(string(out), promslog.NewNopLogger()) + require.NoError(t, err) require.True(t, got.RemoteWriteConfigs[0].QueueConfig.RetryOnRateLimit) require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit) @@ -2219,6 +2220,7 @@ func TestEmptyConfig(t *testing.T) { c, err := Load("", promslog.NewNopLogger()) require.NoError(t, err) exp := DefaultConfig + exp.loaded = true require.Equal(t, exp, *c) } @@ -2268,6 +2270,7 @@ func TestEmptyGlobalBlock(t *testing.T) { require.NoError(t, err) exp := DefaultConfig exp.Runtime = DefaultRuntimeConfig + exp.loaded = true require.Equal(t, exp, *c) } @@ -2548,3 +2551,18 @@ func TestScrapeProtocolHeader(t *testing.T) { }) } } + +// Regression test against https://github.com/prometheus/prometheus/issues/15538 +func TestGetScrapeConfigs_Loaded(t *testing.T) { + t.Run("without load", func(t *testing.T) { + c := &Config{} + _, err := c.GetScrapeConfigs() + require.EqualError(t, err, "main config scrape configs was not validated and loaded; GetScrapeConfigs method can only be used on configuration from the config.Load or config.LoadFile") + }) + t.Run("with load", func(t *testing.T) { + c, err := Load("", promslog.NewNopLogger()) + require.NoError(t, err) + _, err = c.GetScrapeConfigs() + require.NoError(t, err) + }) +} diff --git a/config/config_windows_test.go b/config/config_windows_test.go index db4d46ef13..9d338b99e7 100644 --- a/config/config_windows_test.go +++ b/config/config_windows_test.go @@ -16,6 +16,8 @@ package config const ruleFilesConfigFile = "testdata/rules_abs_path_windows.good.yml" var ruleFilesExpectedConf = &Config{ + loaded: true, + GlobalConfig: DefaultGlobalConfig, Runtime: DefaultRuntimeConfig, RuleFiles: []string{ diff --git a/scrape/manager_test.go b/scrape/manager_test.go index f446c99789..6887ca1c43 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -38,6 +38,8 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/yaml.v2" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/config" @@ -468,10 +470,8 @@ func TestPopulateLabels(t *testing.T) { func loadConfiguration(t testing.TB, c string) *config.Config { t.Helper() - cfg := &config.Config{} - err := yaml.UnmarshalStrict([]byte(c), cfg) - require.NoError(t, err, "Unable to load YAML config.") - + cfg, err := config.Load(c, promslog.NewNopLogger()) + require.NoError(t, err) return cfg } @@ -724,33 +724,6 @@ scrape_configs: require.ElementsMatch(t, []string{"job1", "job3"}, scrapeManager.ScrapePools()) } -func setupScrapeManager(t *testing.T, honorTimestamps, enableCTZeroIngestion bool) (*collectResultAppender, *Manager) { - app := &collectResultAppender{} - scrapeManager, err := NewManager( - &Options{ - EnableCreatedTimestampZeroIngestion: enableCTZeroIngestion, - skipOffsetting: true, - }, - promslog.New(&promslog.Config{}), - nil, - &collectResultAppendable{app}, - prometheus.NewRegistry(), - ) - require.NoError(t, err) - - require.NoError(t, scrapeManager.ApplyConfig(&config.Config{ - GlobalConfig: config.GlobalConfig{ - // Disable regular scrapes. - ScrapeInterval: model.Duration(9999 * time.Minute), - ScrapeTimeout: model.Duration(5 * time.Second), - ScrapeProtocols: []config.ScrapeProtocol{config.OpenMetricsText1_0_0, config.PrometheusProto}, - }, - ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test", HonorTimestamps: honorTimestamps}}, - })) - - return app, scrapeManager -} - func setupTestServer(t *testing.T, typ string, toWrite []byte) *httptest.Server { once := sync.Once{} @@ -789,6 +762,9 @@ func TestManagerCTZeroIngestion(t *testing.T) { t.Run(fmt.Sprintf("withCT=%v", testWithCT), func(t *testing.T) { for _, testCTZeroIngest := range []bool{false, true} { t.Run(fmt.Sprintf("ctZeroIngest=%v", testCTZeroIngest), func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sampleTs := time.Now() ctTs := time.Time{} if testWithCT { @@ -797,10 +773,45 @@ func TestManagerCTZeroIngestion(t *testing.T) { // TODO(bwplotka): Add more types than just counter? encoded := prepareTestEncodedCounter(t, testFormat, expectedMetricName, expectedSampleValue, sampleTs, ctTs) - app, scrapeManager := setupScrapeManager(t, true, testCTZeroIngest) - // Perform the test. - doOneScrape(t, scrapeManager, app, setupTestServer(t, config.ScrapeProtocolsHeaders[testFormat], encoded)) + app := &collectResultAppender{} + discoveryManager, scrapeManager := runManagers(t, ctx, &Options{ + EnableCreatedTimestampZeroIngestion: testCTZeroIngest, + skipOffsetting: true, + }, &collectResultAppendable{app}) + defer scrapeManager.Stop() + + server := setupTestServer(t, config.ScrapeProtocolsHeaders[testFormat], encoded) + serverURL, err := url.Parse(server.URL) + require.NoError(t, err) + + testConfig := fmt.Sprintf(` +global: + # Disable regular scrapes. + scrape_interval: 9999m + scrape_timeout: 5s + +scrape_configs: +- job_name: test + honor_timestamps: true + static_configs: + - targets: ['%s'] +`, serverURL.Host) + applyConfig(t, testConfig, scrapeManager, discoveryManager) + + // Wait for one scrape. + ctx, cancel = context.WithTimeout(ctx, 1*time.Minute) + defer cancel() + require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { + app.mtx.Lock() + defer app.mtx.Unlock() + + // Check if scrape happened and grab the relevant samples. + if len(app.resultFloats) > 0 { + return nil + } + return errors.New("expected some float samples, got none") + }), "after 1 minute") // Verify results. // Verify what we got vs expectations around CT injection. @@ -871,39 +882,6 @@ func prepareTestEncodedCounter(t *testing.T, format config.ScrapeProtocol, mName } } -func doOneScrape(t *testing.T, manager *Manager, appender *collectResultAppender, server *httptest.Server) { - t.Helper() - - serverURL, err := url.Parse(server.URL) - require.NoError(t, err) - - // Add fake target directly into tsets + reload - manager.updateTsets(map[string][]*targetgroup.Group{ - "test": {{ - Targets: []model.LabelSet{{ - model.SchemeLabel: model.LabelValue(serverURL.Scheme), - model.AddressLabel: model.LabelValue(serverURL.Host), - }}, - }}, - }) - manager.reload() - - // Wait for one scrape. - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) - defer cancel() - require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { - appender.mtx.Lock() - defer appender.mtx.Unlock() - - // Check if scrape happened and grab the relevant samples. - if len(appender.resultFloats) > 0 { - return nil - } - return errors.New("expected some float samples, got none") - }), "after 1 minute") - manager.Stop() -} - func findSamplesForMetric(floats []floatSample, metricName string) (ret []floatSample) { for _, f := range floats { if f.metric.Get(model.MetricNameLabel) == metricName { @@ -978,37 +956,22 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - app := &collectResultAppender{} - scrapeManager, err := NewManager( - &Options{ - EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion, - EnableNativeHistogramsIngestion: true, - skipOffsetting: true, - }, - promslog.New(&promslog.Config{}), - nil, - &collectResultAppendable{app}, - prometheus.NewRegistry(), - ) - require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - require.NoError(t, scrapeManager.ApplyConfig(&config.Config{ - GlobalConfig: config.GlobalConfig{ - // Disable regular scrapes. - ScrapeInterval: model.Duration(9999 * time.Minute), - ScrapeTimeout: model.Duration(5 * time.Second), - // Ensure the proto is chosen. We need proto as it's the only protocol - // with the CT parsing support. - ScrapeProtocols: []config.ScrapeProtocol{config.PrometheusProto}, - }, - ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test"}}, - })) + app := &collectResultAppender{} + discoveryManager, scrapeManager := runManagers(t, ctx, &Options{ + EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion, + EnableNativeHistogramsIngestion: true, + skipOffsetting: true, + }, &collectResultAppendable{app}) + defer scrapeManager.Stop() once := sync.Once{} // Start fake HTTP target to that allow one scrape only. server := httptest.NewServer( http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fail := true // TODO(bwplotka): Kill or use? + fail := true once.Do(func() { fail = false w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`) @@ -1031,22 +994,23 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) { serverURL, err := url.Parse(server.URL) require.NoError(t, err) - // Add fake target directly into tsets + reload. Normally users would use - // Manager.Run and wait for minimum 5s refresh interval. - scrapeManager.updateTsets(map[string][]*targetgroup.Group{ - "test": {{ - Targets: []model.LabelSet{{ - model.SchemeLabel: model.LabelValue(serverURL.Scheme), - model.AddressLabel: model.LabelValue(serverURL.Host), - }}, - }}, - }) - scrapeManager.reload() + testConfig := fmt.Sprintf(` +global: + # Disable regular scrapes. + scrape_interval: 9999m + scrape_timeout: 5s + +scrape_configs: +- job_name: test + static_configs: + - targets: ['%s'] +`, serverURL.Host) + applyConfig(t, testConfig, scrapeManager, discoveryManager) var got []histogramSample // Wait for one scrape. - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + ctx, cancel = context.WithTimeout(ctx, 1*time.Minute) defer cancel() require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { app.mtx.Lock() @@ -1064,7 +1028,6 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) { } return errors.New("expected some histogram samples, got none") }), "after 1 minute") - scrapeManager.Stop() // Check for zero samples, assuming we only injected always one histogram sample. // Did it contain CT to inject? If yes, was CT zero enabled? @@ -1118,9 +1081,17 @@ func applyConfig( require.NoError(t, discoveryManager.ApplyConfig(c)) } -func runManagers(t *testing.T, ctx context.Context) (*discovery.Manager, *Manager) { +func runManagers(t *testing.T, ctx context.Context, opts *Options, app storage.Appendable) (*discovery.Manager, *Manager) { t.Helper() + if opts == nil { + opts = &Options{} + } + opts.DiscoveryReloadInterval = model.Duration(100 * time.Millisecond) + if app == nil { + app = nopAppendable{} + } + reg := prometheus.NewRegistry() sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg)) require.NoError(t, err) @@ -1132,10 +1103,10 @@ func runManagers(t *testing.T, ctx context.Context) (*discovery.Manager, *Manage discovery.Updatert(100*time.Millisecond), ) scrapeManager, err := NewManager( - &Options{DiscoveryReloadInterval: model.Duration(100 * time.Millisecond)}, + opts, nil, nil, - nopAppendable{}, + app, prometheus.NewRegistry(), ) require.NoError(t, err) @@ -1213,7 +1184,7 @@ scrape_configs: - files: ['%s'] ` - discoveryManager, scrapeManager := runManagers(t, ctx) + discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil) defer scrapeManager.Stop() applyConfig( @@ -1312,7 +1283,7 @@ scrape_configs: file_sd_configs: - files: ['%s', '%s'] ` - discoveryManager, scrapeManager := runManagers(t, ctx) + discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil) defer scrapeManager.Stop() applyConfig( @@ -1372,7 +1343,7 @@ scrape_configs: file_sd_configs: - files: ['%s'] ` - discoveryManager, scrapeManager := runManagers(t, ctx) + discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil) defer scrapeManager.Stop() applyConfig( @@ -1439,7 +1410,7 @@ scrape_configs: - targets: ['%s'] ` - discoveryManager, scrapeManager := runManagers(t, ctx) + discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil) defer scrapeManager.Stop() // Apply the initial config with an existing file From 9895d882b4fbb2f45864520e7daf32142d2a296a Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Wed, 11 Dec 2024 15:12:53 +0100 Subject: [PATCH 1080/1397] OTLP receiver: Use stdlib for splitting metric name in classical mode (#15573) In non-UTF-8 mode, use strings.FieldsFunc to split string into tokens, as it was before PR #15258. This makes the code more robust and readable. Signed-off-by: Arve Knudsen --- .../prometheus/normalize_name.go | 29 ++++++++++++------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index 335705aa8d..b5e07b02bd 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -122,17 +122,22 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix // Build a normalized name for the specified metric. func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) string { - var translationFunc func(rune) bool + var nameTokens []string + var separators []string if !allowUTF8 { nonTokenMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:]`) - translationFunc = func(r rune) bool { return nonTokenMetricCharRE.MatchString(string(r)) } + // Split metric name into "tokens" (of supported metric name runes). + // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. + // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. + nameTokens = strings.FieldsFunc( + metric.Name(), + func(r rune) bool { return nonTokenMetricCharRE.MatchString(string(r)) }, + ) } else { - translationFunc = func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != ':' } + translationFunc := func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != ':' } + // Split metric name into "tokens" (of supported metric name runes). + nameTokens, separators = fieldsFunc(metric.Name(), translationFunc) } - // Split metric name into "tokens" (of supported metric name runes). - // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. - // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. - nameTokens, separators := fieldsFunc(metric.Name(), translationFunc) // Split unit at the '/' if any unitTokens := strings.SplitN(metric.Unit(), "/", 2) @@ -201,12 +206,14 @@ func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) stri nameTokens = append([]string{namespace}, nameTokens...) } - // Build the string from the tokens + separators. - // If UTF-8 isn't allowed, we'll use underscores as separators. + var normalizedName string if !allowUTF8 { - separators = []string{} + // Build the string from the tokens, separated with underscores + normalizedName = strings.Join(nameTokens, "_") + } else { + // Build the string from the tokens + separators. + normalizedName = join(nameTokens, separators, "_") } - normalizedName := join(nameTokens, separators, "_") // Metric name cannot start with a digit, so prefix it with "_" in this case if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) { From dd147ec1e5ffb23a3850601ad26db421cb6ec989 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:23:14 +0100 Subject: [PATCH 1081/1397] chore(deps): bump @codemirror/view in /web/ui/react-app (#15609) Bumps [@codemirror/view](https://github.com/codemirror/view) from 6.33.0 to 6.35.3. - [Changelog](https://github.com/codemirror/view/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/view/compare/6.33.0...6.35.3) --- updated-dependencies: - dependency-name: "@codemirror/view" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 30 ++++++++++++++++++++---------- web/ui/react-app/package.json | 2 +- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 01d23458dd..35c49ef88f 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -14,7 +14,7 @@ "@codemirror/lint": "^6.8.1", "@codemirror/search": "^6.5.6", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.29.1", + "@codemirror/view": "^6.35.3", "@forevolve/bootstrap-dark": "^4.0.2", "@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2", @@ -2302,17 +2302,21 @@ } }, "node_modules/@codemirror/state": { - "version": "6.4.1", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.4.1.tgz", - "integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A==" - }, - "node_modules/@codemirror/view": { - "version": "6.33.0", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.33.0.tgz", - "integrity": "sha512-AroaR3BvnjRW8fiZBalAaK+ZzB5usGgI014YKElYZvQdNH5ZIidHlO+cyf/2rWzyBFRkvG6VhiXeAEbC53P2YQ==", + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.5.0.tgz", + "integrity": "sha512-MwBHVK60IiIHDcoMet78lxt6iw5gJOGSbNbOIVBHWVXIH4/Nq1+GQgLLGgI1KlnN86WDXsPudVaqYHKBIx7Eyw==", "license": "MIT", "dependencies": { - "@codemirror/state": "^6.4.0", + "@marijn/find-cluster-break": "^1.0.0" + } + }, + "node_modules/@codemirror/view": { + "version": "6.35.3", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.35.3.tgz", + "integrity": "sha512-ScY7L8+EGdPl4QtoBiOzE4FELp7JmNUsBvgBcCakXWM2uiv/K89VAzU3BMDscf0DsACLvTKePbd5+cFDTcei6g==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^6.5.0", "style-mod": "^4.1.0", "w3c-keyname": "^2.2.4" } @@ -4201,6 +4205,12 @@ "@lezer/common": "^1.0.0" } }, + "node_modules/@marijn/find-cluster-break": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@marijn/find-cluster-break/-/find-cluster-break-1.0.2.tgz", + "integrity": "sha512-l0h88YhZFyKdXIFNfSWpyjStDjGHwZ/U7iobcK1cQQD8sejsONdQtTVU+1wVN1PBw40PiiHB1vA5S7VTfQiP9g==", + "license": "MIT" + }, "node_modules/@nexucis/fuzzy": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/@nexucis/fuzzy/-/fuzzy-0.4.1.tgz", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 30db488ccc..5752fe9658 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -9,7 +9,7 @@ "@codemirror/lint": "^6.8.1", "@codemirror/search": "^6.5.6", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.29.1", + "@codemirror/view": "^6.35.3", "@forevolve/bootstrap-dark": "^4.0.2", "@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2", From 9438966a401a54006062ebad3245306a087e1fcc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:24:57 +0100 Subject: [PATCH 1082/1397] chore(deps-dev): bump @types/jest in /web/ui/react-app (#15631) Bumps [@types/jest](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/jest) from 29.5.12 to 29.5.14. - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/jest) --- updated-dependencies: - dependency-name: "@types/jest" dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 9 +++++---- web/ui/react-app/package.json | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 35c49ef88f..7653cccffd 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -51,7 +51,7 @@ "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.18", "@types/flot": "0.0.36", - "@types/jest": "^29.5.12", + "@types/jest": "^29.5.14", "@types/jquery": "^3.5.30", "@types/node": "^20.14.9", "@types/react": "^17.0.71", @@ -5027,10 +5027,11 @@ } }, "node_modules/@types/jest": { - "version": "29.5.12", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.12.tgz", - "integrity": "sha512-eDC8bTvT/QhYdxJAulQikueigY5AsdBRH2yDKW3yveW7svY3+DzN84/2NUgkw10RTiJbWqZrTtoGVdYlvFJdLw==", + "version": "29.5.14", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz", + "integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==", "dev": true, + "license": "MIT", "dependencies": { "expect": "^29.0.0", "pretty-format": "^29.0.0" diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 5752fe9658..4e5cbd06bd 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -68,7 +68,7 @@ "@testing-library/react-hooks": "^7.0.2", "@types/enzyme": "^3.10.18", "@types/flot": "0.0.36", - "@types/jest": "^29.5.12", + "@types/jest": "^29.5.14", "@types/jquery": "^3.5.30", "@types/node": "^20.14.9", "@types/react": "^17.0.71", From a976b8b135abb017bd4102328be762fd92d8114b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:26:22 +0100 Subject: [PATCH 1083/1397] chore(deps): bump @mantine/code-highlight in /web/ui (#15630) Bumps [@mantine/code-highlight](https://github.com/mantinedev/mantine/tree/HEAD/packages/@mantine/code-highlight) from 7.14.3 to 7.15.0. - [Release notes](https://github.com/mantinedev/mantine/releases) - [Changelog](https://github.com/mantinedev/mantine/blob/master/CHANGELOG.md) - [Commits](https://github.com/mantinedev/mantine/commits/7.15.0/packages/@mantine/code-highlight) --- updated-dependencies: - dependency-name: "@mantine/code-highlight" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 29 ++++++++++++++++------------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 4a4c02668f..5b70e795b6 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -20,7 +20,7 @@ "@floating-ui/dom": "^1.6.7", "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.1", - "@mantine/code-highlight": "^7.14.3", + "@mantine/code-highlight": "^7.15.0", "@mantine/core": "^7.11.2", "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index b725b7c661..7516e4bc37 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -34,7 +34,7 @@ "@floating-ui/dom": "^1.6.7", "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.1", - "@mantine/code-highlight": "^7.14.3", + "@mantine/code-highlight": "^7.15.0", "@mantine/core": "^7.11.2", "@mantine/dates": "^7.13.1", "@mantine/hooks": "^7.11.2", @@ -2076,24 +2076,26 @@ } }, "node_modules/@mantine/code-highlight": { - "version": "7.14.3", - "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-7.14.3.tgz", - "integrity": "sha512-iTdirW7R4A8qhacwNJwzBRteLX3hlVlxc8TKEfBJ4KM9Ke5wZ7hrsclzXg3vsFDQHd+oKLtWUBsehecre4VR2Q==", + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-7.15.0.tgz", + "integrity": "sha512-UJ1Qfjs7LigFQt/yuvjjWv7y6AGhfAc177tmlegs1E2OTVL8f11pM4PKGuoQxRD70Wwz+hG23ZNMumibEcRejg==", + "license": "MIT", "dependencies": { "clsx": "^2.1.1", "highlight.js": "^11.10.0" }, "peerDependencies": { - "@mantine/core": "7.14.3", - "@mantine/hooks": "7.14.3", + "@mantine/core": "7.15.0", + "@mantine/hooks": "7.15.0", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/core": { - "version": "7.14.3", - "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.14.3.tgz", - "integrity": "sha512-niAi+ZYBr4KrG+X2Mx+muvEzUOOHc/Rx0vsbIGYeNe7urwHSm/xNEGsaapmCqeRC0CSL4KI6TJOq8QhnSuQZcw==", + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.15.0.tgz", + "integrity": "sha512-tZlRydfaEaaCNIi3tl1u+VtJxNyBoml2iCJAy6ZqcNNcjy/krJmta5lVtjUeApZfE33rkvr+3WVqGk0YjW6oSQ==", + "license": "MIT", "dependencies": { "@floating-ui/react": "^0.26.28", "clsx": "^2.1.1", @@ -2103,7 +2105,7 @@ "type-fest": "^4.27.0" }, "peerDependencies": { - "@mantine/hooks": "7.14.3", + "@mantine/hooks": "7.15.0", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } @@ -2124,9 +2126,10 @@ } }, "node_modules/@mantine/hooks": { - "version": "7.14.3", - "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.14.3.tgz", - "integrity": "sha512-cU3R9b8GLs6aCvpsVC56ZOOJCUIoDqX3RcLWkcfpA5a47LjWa/rzegP4YWfNW6/E9vodPJT4AEbYXVffYlyNwA==", + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.15.0.tgz", + "integrity": "sha512-AV4ItRbVIWVDzpOPyj3ICrfQq7HEdKhO7IE7xxkdbJ4oj73DAq2jFsMoNdj3dN9u2tOn1bhPBIaP+8gKd0oAcw==", + "license": "MIT", "peerDependencies": { "react": "^18.x || ^19.x" } From 29846ddac45ccea8d8c5823bddde22e70ce0c6ab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:30:44 +0100 Subject: [PATCH 1084/1397] chore(deps-dev): bump eslint from 9.11.1 to 9.16.0 in /web/ui (#15628) Bumps [eslint](https://github.com/eslint/eslint) from 9.11.1 to 9.16.0. - [Release notes](https://github.com/eslint/eslint/releases) - [Changelog](https://github.com/eslint/eslint/blob/main/CHANGELOG.md) - [Commits](https://github.com/eslint/eslint/compare/v9.11.1...v9.16.0) --- updated-dependencies: - dependency-name: eslint dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 216 +++++++++++++++++++++------------ 2 files changed, 140 insertions(+), 78 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 5b70e795b6..91044d82e7 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -59,7 +59,7 @@ "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "@vitejs/plugin-react": "^4.3.4", - "eslint": "^9.11.1", + "eslint": "^9.16.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.13.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 7516e4bc37..becfc573d1 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -73,7 +73,7 @@ "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "@vitejs/plugin-react": "^4.3.4", - "eslint": "^9.11.1", + "eslint": "^9.16.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.12", "globals": "^15.13.0", @@ -86,31 +86,32 @@ } }, "mantine-ui/node_modules/eslint": { - "version": "9.11.1", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.11.1.tgz", - "integrity": "sha512-MobhYKIoAO1s1e4VUrgx1l1Sk2JBR/Gqjjgw8+mfgoLE2xwsHur4gdfTxyTgShrhvdVFTaJSgMiQBl1jv/AWxg==", + "version": "9.16.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.16.0.tgz", + "integrity": "sha512-whp8mSQI4C8VXd+fLgSM0lh3UlmcFtVwUQjyKCFfsp+2ItAIYhlq/hqGahGqHE6cv9unM41VlqKk2VtKYR2TaA==", "dev": true, + "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", - "@eslint-community/regexpp": "^4.11.0", - "@eslint/config-array": "^0.18.0", - "@eslint/core": "^0.6.0", - "@eslint/eslintrc": "^3.1.0", - "@eslint/js": "9.11.1", - "@eslint/plugin-kit": "^0.2.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.19.0", + "@eslint/core": "^0.9.0", + "@eslint/eslintrc": "^3.2.0", + "@eslint/js": "9.16.0", + "@eslint/plugin-kit": "^0.2.3", + "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", - "@humanwhocodes/retry": "^0.3.0", - "@nodelib/fs.walk": "^1.2.8", + "@humanwhocodes/retry": "^0.4.1", "@types/estree": "^1.0.6", "@types/json-schema": "^7.0.15", "ajv": "^6.12.4", "chalk": "^4.0.0", - "cross-spawn": "^7.0.2", + "cross-spawn": "^7.0.5", "debug": "^4.3.2", "escape-string-regexp": "^4.0.0", - "eslint-scope": "^8.0.2", - "eslint-visitor-keys": "^4.0.0", - "espree": "^10.1.0", + "eslint-scope": "^8.2.0", + "eslint-visitor-keys": "^4.2.0", + "espree": "^10.3.0", "esquery": "^1.5.0", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", @@ -120,14 +121,11 @@ "ignore": "^5.2.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", - "is-path-inside": "^3.0.3", "json-stable-stringify-without-jsonify": "^1.0.1", "lodash.merge": "^4.6.2", "minimatch": "^3.1.2", "natural-compare": "^1.4.0", - "optionator": "^0.9.3", - "strip-ansi": "^6.0.1", - "text-table": "^0.2.0" + "optionator": "^0.9.3" }, "bin": { "eslint": "bin/eslint.js" @@ -147,15 +145,6 @@ } } }, - "mantine-ui/node_modules/eslint/node_modules/@eslint/js": { - "version": "9.11.1", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.11.1.tgz", - "integrity": "sha512-/qu+TWz8WwPWc7/HcIJKi+c+MOm46GdVaSlTTQcaqaL53+GsoA6MxWp5PtTx48qbSP7ylM1Kn7nhvkugfJvRSA==", - "dev": true, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - } - }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", "version": "0.300.0", @@ -1302,9 +1291,9 @@ } }, "node_modules/@eslint-community/regexpp": { - "version": "4.11.0", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.11.0.tgz", - "integrity": "sha512-G/M/tIiMrTAxEWRfLfQJMmGNX28IxBg4PBz8XqQhqUHLFI6TL2htpIB1iQCj144V5ee/JaKyT9/WZ0MGZWfA7A==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz", + "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==", "dev": true, "license": "MIT", "engines": { @@ -1329,12 +1318,13 @@ } }, "node_modules/@eslint/config-array": { - "version": "0.18.0", - "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.18.0.tgz", - "integrity": "sha512-fTxvnS1sRMu3+JjXwJG0j/i4RT9u4qJ+lqS/yCGap4lH4zZGzQ7tu+xZqQmcMZq5OBZDL4QRxQzRjkWcGt8IVw==", + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.19.1.tgz", + "integrity": "sha512-fo6Mtm5mWyKjA/Chy1BYTdn5mGJoDNjC7C64ug20ADsRDGrA85bN3uK3MaKbeRkRuuIEAR5N33Jr1pbm411/PA==", "dev": true, + "license": "Apache-2.0", "dependencies": { - "@eslint/object-schema": "^2.1.4", + "@eslint/object-schema": "^2.1.5", "debug": "^4.3.1", "minimatch": "^3.1.2" }, @@ -1343,18 +1333,22 @@ } }, "node_modules/@eslint/core": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.6.0.tgz", - "integrity": "sha512-8I2Q8ykA4J0x0o7cg67FPVnehcqWTBehu/lmY+bolPFHGjh49YzGBMXTvpqVgEbBdvNCSxj6iFgiIyHzf03lzg==", + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.9.1.tgz", + "integrity": "sha512-GuUdqkyyzQI5RMIWkHhvTWLCyLo1jNK3vzkSyaExH5kHPDHcuL2VOpHjmMY+y3+NC69qAKToBqldTBgYeLSr9Q==", "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@types/json-schema": "^7.0.15" + }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, "node_modules/@eslint/eslintrc": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.1.0.tgz", - "integrity": "sha512-4Bfj15dVJdoy3RfZmmo86RK1Fwzn6SstsvK9JS+BaVKqC6QQQQyXekNaC+g+LKNgkQ+2VhGAzm6hO40AhMR3zQ==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.2.0.tgz", + "integrity": "sha512-grOjVNN8P3hjJn/eIETF1wwd12DdnwFDoyceUJLYYdkpbwq3nLi+4fqrTAONx7XDALqlL220wC/RHSC/QTI/0w==", "dev": true, "license": "MIT", "dependencies": { @@ -1397,19 +1391,21 @@ } }, "node_modules/@eslint/object-schema": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.4.tgz", - "integrity": "sha512-BsWiH1yFGjXXS2yvrf5LyuoSIIbPrGUWob917o+BTKuZ7qJdxX8aJLRxs1fS9n6r7vESrq1OUqb68dANcFXuQQ==", + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.5.tgz", + "integrity": "sha512-o0bhxnL89h5Bae5T318nFoFzGy+YE5i/gGkoPAgkmTVdRKTiv3p8JHevPiPaMwoloKfEiiaHlawCqaZMqRm+XQ==", "dev": true, + "license": "Apache-2.0", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, "node_modules/@eslint/plugin-kit": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.0.tgz", - "integrity": "sha512-vH9PiIMMwvhCx31Af3HiGzsVNULDbyVkHXwlemn/B0TFj/00ho3y55efXrUZTfQipxoHC5u4xq6zblww1zm1Ig==", + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.4.tgz", + "integrity": "sha512-zSkKow6H5Kdm0ZUQUB2kV5JIXqoG0+uH5YADhaEHswm664N9Db8dXSi0nMJpacpMf+MyyglF1vnZohpEg5yUtg==", "dev": true, + "license": "Apache-2.0", "dependencies": { "levn": "^0.4.1" }, @@ -1467,12 +1463,51 @@ "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.8.tgz", "integrity": "sha512-kym7SodPp8/wloecOpcmSnWJsK7M0E5Wg8UcFA+uO4B9s5d0ywXOEro/8HM9x0rW+TljRzul/14UYz3TleT3ig==" }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.6", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.6.tgz", + "integrity": "sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.3.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node/node_modules/@humanwhocodes/retry": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.3.1.tgz", + "integrity": "sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, "node_modules/@humanwhocodes/config-array": { "version": "0.13.0", "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", "deprecated": "Use @eslint/config-array instead", "dev": true, + "license": "Apache-2.0", "peer": true, "dependencies": { "@humanwhocodes/object-schema": "^2.0.3", @@ -1503,13 +1538,15 @@ "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", "deprecated": "Use @eslint/object-schema instead", "dev": true, + "license": "BSD-3-Clause", "peer": true }, "node_modules/@humanwhocodes/retry": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.3.0.tgz", - "integrity": "sha512-d2CGZR2o7fS6sWB7DG/3a95bGKQyHMACZ5aW8qGkkqQpUoZV6C0X7Pc7l4ZNMZkfNBf4VWNe9E1jRsf0G146Ew==", + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.1.tgz", + "integrity": "sha512-c7hNEllBlenFTHBky65mhq8WD2kbN9Q6gk0bTk8lSBvc554jpXSkST1iePudpt7+A/AQvuHs9EMqjHDXMY1lrA==", "dev": true, + "license": "Apache-2.0", "engines": { "node": ">=18.18" }, @@ -3248,10 +3285,11 @@ } }, "node_modules/@ungap/structured-clone": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", - "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.1.tgz", + "integrity": "sha512-fEzPV3hSkSMltkw152tJKNARhOupqbH96MZWyRjNaYZOMIzbrTeQDG+MTc6Mr2pgzFQzFxAfmhGDNP5QK++2ZA==", "dev": true, + "license": "ISC", "peer": true }, "node_modules/@vitejs/plugin-react": { @@ -3381,9 +3419,9 @@ } }, "node_modules/acorn": { - "version": "8.12.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.12.1.tgz", - "integrity": "sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==", + "version": "8.14.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz", + "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==", "dev": true, "license": "MIT", "bin": { @@ -3469,6 +3507,7 @@ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -4072,9 +4111,9 @@ "license": "MIT" }, "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "dev": true, "license": "MIT", "dependencies": { @@ -4272,6 +4311,7 @@ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", "dev": true, + "license": "Apache-2.0", "peer": true, "dependencies": { "esutils": "^2.0.2" @@ -4484,7 +4524,9 @@ "version": "8.57.1", "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", + "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", "dev": true, + "license": "MIT", "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", @@ -4602,10 +4644,11 @@ } }, "node_modules/eslint-scope": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.1.0.tgz", - "integrity": "sha512-14dSvlhaVhKKsa9Fx1l8A17s7ah7Ef7wCakJ10LYk6+GYmP9yDti2oq2SEwcyndt6knfcZyhyxwY3i9yL78EQw==", + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.2.0.tgz", + "integrity": "sha512-PHlWUfG6lvPc3yvP5A4PNyBL1W8fkDUccmI21JUu/+GKZBoH/W5u6usENXUrWFRsyoW5ACUjFGgAFQp5gUlb/A==", "dev": true, + "license": "BSD-2-Clause", "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" @@ -4618,9 +4661,9 @@ } }, "node_modules/eslint-visitor-keys": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.0.0.tgz", - "integrity": "sha512-OtIRv/2GyiF6o/d8K7MYKKbXrOUBIK6SfkIRM4Z0dY3w+LiQ0vy3F57m0Z71bjbyeiWFiHJ8brqnmE6H6/jEuw==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", + "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", "dev": true, "license": "Apache-2.0", "engines": { @@ -4635,6 +4678,7 @@ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", "dev": true, + "license": "MIT", "peer": true, "dependencies": { "ajv": "^6.12.4", @@ -4659,6 +4703,7 @@ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", "dev": true, + "license": "MIT", "peer": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -4669,6 +4714,7 @@ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", "dev": true, + "license": "BSD-2-Clause", "peer": true, "dependencies": { "esrecurse": "^4.3.0", @@ -4686,6 +4732,7 @@ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", "dev": true, + "license": "Apache-2.0", "peer": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -4699,6 +4746,7 @@ "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", "dev": true, + "license": "BSD-2-Clause", "peer": true, "dependencies": { "acorn": "^8.9.0", @@ -4717,6 +4765,7 @@ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", "dev": true, + "license": "MIT", "peer": true, "dependencies": { "flat-cache": "^3.0.4" @@ -4730,6 +4779,7 @@ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", "dev": true, + "license": "MIT", "peer": true, "dependencies": { "flatted": "^3.2.9", @@ -4745,6 +4795,7 @@ "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", "dev": true, + "license": "MIT", "peer": true, "dependencies": { "type-fest": "^0.20.2" @@ -4761,6 +4812,7 @@ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", "dev": true, + "license": "(MIT OR CC0-1.0)", "peer": true, "engines": { "node": ">=10" @@ -4770,15 +4822,15 @@ } }, "node_modules/espree": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/espree/-/espree-10.1.0.tgz", - "integrity": "sha512-M1M6CpiE6ffoigIOWYO9UDP8TMUw9kqb21tf+08IgDYjCsOvCuDt4jQcZmoYxx+w7zlKw9/N0KXfto+I8/FrXA==", + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.3.0.tgz", + "integrity": "sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg==", "dev": true, "license": "BSD-2-Clause", "dependencies": { - "acorn": "^8.12.0", + "acorn": "^8.14.0", "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^4.0.0" + "eslint-visitor-keys": "^4.2.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -4820,6 +4872,7 @@ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, + "license": "BSD-2-Clause", "dependencies": { "estraverse": "^5.2.0" }, @@ -4992,6 +5045,7 @@ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", "dev": true, + "license": "MIT", "dependencies": { "flat-cache": "^4.0.0" }, @@ -5067,6 +5121,7 @@ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", "dev": true, + "license": "MIT", "dependencies": { "flatted": "^3.2.9", "keyv": "^4.5.4" @@ -5076,10 +5131,11 @@ } }, "node_modules/flatted": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz", - "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", - "dev": true + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.2.tgz", + "integrity": "sha512-AiwGJM8YcNOaobumgtng+6NHuOqC3A7MixFeDafM3X9cIUM+xUXoS5Vfgf+OihAYe20fxqNM9yPBXJzRtZ/4eA==", + "dev": true, + "license": "ISC" }, "node_modules/form-data": { "version": "4.0.0", @@ -5612,6 +5668,7 @@ "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -6773,7 +6830,8 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/json-parse-even-better-errors": { "version": "2.3.1", @@ -6822,6 +6880,7 @@ "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", "dev": true, + "license": "MIT", "dependencies": { "json-buffer": "3.0.1" } @@ -8186,6 +8245,7 @@ "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "deprecated": "Rimraf versions prior to v4 are no longer supported", "dev": true, + "license": "ISC", "peer": true, "dependencies": { "glob": "^7.1.3" @@ -8500,6 +8560,7 @@ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "ansi-regex": "^5.0.1" }, @@ -8652,7 +8713,8 @@ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/throttle-debounce": { "version": "2.3.0", From cfb54fa6a3e4349398875d4d5d48025e36489356 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:31:56 +0100 Subject: [PATCH 1085/1397] chore(deps-dev): bump @testing-library/react-hooks in /web/ui/react-app (#15599) Bumps [@testing-library/react-hooks](https://github.com/testing-library/react-hooks-testing-library) from 7.0.2 to 8.0.1. - [Release notes](https://github.com/testing-library/react-hooks-testing-library/releases) - [Changelog](https://github.com/testing-library/react-hooks-testing-library/blob/main/CHANGELOG.md) - [Commits](https://github.com/testing-library/react-hooks-testing-library/compare/v7.0.2...v8.0.1) --- updated-dependencies: - dependency-name: "@testing-library/react-hooks" dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 31 ++++++++++++------------------ web/ui/react-app/package.json | 2 +- 2 files changed, 13 insertions(+), 20 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 7653cccffd..0cee58215e 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -48,7 +48,7 @@ "tempusdominus-core": "^5.19.3" }, "devDependencies": { - "@testing-library/react-hooks": "^7.0.2", + "@testing-library/react-hooks": "^8.0.1", "@types/enzyme": "^3.10.18", "@types/flot": "0.0.36", "@types/jest": "^29.5.14", @@ -4753,26 +4753,28 @@ } }, "node_modules/@testing-library/react-hooks": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/@testing-library/react-hooks/-/react-hooks-7.0.2.tgz", - "integrity": "sha512-dYxpz8u9m4q1TuzfcUApqi8iFfR6R0FaMbr2hjZJy1uC8z+bO/K4v8Gs9eogGKYQop7QsrBTFkv/BCF7MzD2Cg==", + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/@testing-library/react-hooks/-/react-hooks-8.0.1.tgz", + "integrity": "sha512-Aqhl2IVmLt8IovEVarNDFuJDVWVvhnr9/GCU6UUnrYXwgDFF9h2L2o2P9KBni1AST5sT6riAyoukFLyjQUgD/g==", "dev": true, + "license": "MIT", "dependencies": { "@babel/runtime": "^7.12.5", - "@types/react": ">=16.9.0", - "@types/react-dom": ">=16.9.0", - "@types/react-test-renderer": ">=16.9.0", "react-error-boundary": "^3.1.0" }, "engines": { "node": ">=12" }, "peerDependencies": { - "react": ">=16.9.0", - "react-dom": ">=16.9.0", - "react-test-renderer": ">=16.9.0" + "@types/react": "^16.9.0 || ^17.0.0", + "react": "^16.9.0 || ^17.0.0", + "react-dom": "^16.9.0 || ^17.0.0", + "react-test-renderer": "^16.9.0 || ^17.0.0" }, "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, "react-dom": { "optional": true }, @@ -5201,15 +5203,6 @@ "@types/react-router": "*" } }, - "node_modules/@types/react-test-renderer": { - "version": "18.0.7", - "resolved": "https://registry.npmjs.org/@types/react-test-renderer/-/react-test-renderer-18.0.7.tgz", - "integrity": "sha512-1+ANPOWc6rB3IkSnElhjv6VLlKg2dSv/OWClUyZimbLsQyBn8Js9Vtdsi3UICJ2rIQ3k2la06dkB+C92QfhKmg==", - "dev": true, - "dependencies": { - "@types/react": "*" - } - }, "node_modules/@types/resolve": { "version": "1.17.1", "resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.17.1.tgz", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 4e5cbd06bd..17e14f8066 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -65,7 +65,7 @@ "not op_mini all" ], "devDependencies": { - "@testing-library/react-hooks": "^7.0.2", + "@testing-library/react-hooks": "^8.0.1", "@types/enzyme": "^3.10.18", "@types/flot": "0.0.36", "@types/jest": "^29.5.14", From 3c99111c80b6bd568ddbc550a8736947724a0379 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:32:32 +0100 Subject: [PATCH 1086/1397] chore(deps-dev): bump eslint-plugin-react-refresh in /web/ui (#15602) Bumps [eslint-plugin-react-refresh](https://github.com/ArnaudBarre/eslint-plugin-react-refresh) from 0.4.12 to 0.4.16. - [Release notes](https://github.com/ArnaudBarre/eslint-plugin-react-refresh/releases) - [Changelog](https://github.com/ArnaudBarre/eslint-plugin-react-refresh/blob/main/CHANGELOG.md) - [Commits](https://github.com/ArnaudBarre/eslint-plugin-react-refresh/compare/v0.4.12...v0.4.16) --- updated-dependencies: - dependency-name: eslint-plugin-react-refresh dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 91044d82e7..1ce34598bd 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -61,7 +61,7 @@ "@vitejs/plugin-react": "^4.3.4", "eslint": "^9.16.0", "eslint-plugin-react-hooks": "^5.1.0", - "eslint-plugin-react-refresh": "^0.4.12", + "eslint-plugin-react-refresh": "^0.4.16", "globals": "^15.13.0", "jsdom": "^25.0.1", "postcss": "^8.4.47", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index becfc573d1..c512e376d6 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -75,7 +75,7 @@ "@vitejs/plugin-react": "^4.3.4", "eslint": "^9.16.0", "eslint-plugin-react-hooks": "^5.1.0", - "eslint-plugin-react-refresh": "^0.4.12", + "eslint-plugin-react-refresh": "^0.4.16", "globals": "^15.13.0", "jsdom": "^25.0.1", "postcss": "^8.4.47", @@ -4635,12 +4635,13 @@ } }, "node_modules/eslint-plugin-react-refresh": { - "version": "0.4.12", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.12.tgz", - "integrity": "sha512-9neVjoGv20FwYtCP6CB1dzR1vr57ZDNOXst21wd2xJ/cTlM2xLq0GWVlSNTdMn/4BtP6cHYBMCSp1wFBJ9jBsg==", + "version": "0.4.16", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.16.tgz", + "integrity": "sha512-slterMlxAhov/DZO8NScf6mEeMBBXodFUolijDvrtTxyezyLoTQaa73FyYus/VbTdftd8wBgBxPMRk3poleXNQ==", "dev": true, + "license": "MIT", "peerDependencies": { - "eslint": ">=7" + "eslint": ">=8.40" } }, "node_modules/eslint-scope": { From c4ed1962dda477cadc99e93e816e1af3286d3b78 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:33:02 +0100 Subject: [PATCH 1087/1397] chore(deps): bump @codemirror/language in /web/ui/react-app (#15621) Bumps [@codemirror/language](https://github.com/codemirror/language) from 6.10.2 to 6.10.6. - [Changelog](https://github.com/codemirror/language/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/language/compare/6.10.2...6.10.6) --- updated-dependencies: - dependency-name: "@codemirror/language" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 8 ++++---- web/ui/react-app/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 0cee58215e..52623a8f1d 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -10,7 +10,7 @@ "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", - "@codemirror/language": "^6.10.2", + "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.1", "@codemirror/search": "^6.5.6", "@codemirror/state": "^6.3.3", @@ -2267,9 +2267,9 @@ } }, "node_modules/@codemirror/language": { - "version": "6.10.2", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.2.tgz", - "integrity": "sha512-kgbTYTo0Au6dCSc/TFy7fK3fpJmgHDv1sG1KNQKJXVi+xBTEeBPY/M30YXiU6mMXeH+YIDLsbrT4ZwNRdtF+SA==", + "version": "6.10.6", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.6.tgz", + "integrity": "sha512-KrsbdCnxEztLVbB5PycWXFxas4EOyk/fPAfruSOnDDppevQgid2XZ+KbJ9u+fDikP/e7MW7HPBTvTb8JlZK9vA==", "license": "MIT", "dependencies": { "@codemirror/state": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 17e14f8066..764dd282bd 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -5,7 +5,7 @@ "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", - "@codemirror/language": "^6.10.2", + "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.1", "@codemirror/search": "^6.5.6", "@codemirror/state": "^6.3.3", From 4ac603218a12e6e92bfdede3f138b6753ed96b87 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:33:38 +0100 Subject: [PATCH 1088/1397] chore(deps-dev): bump sinon from 18.0.0 to 19.0.2 in /web/ui/react-app (#15584) Bumps [sinon](https://github.com/sinonjs/sinon) from 18.0.0 to 19.0.2. - [Release notes](https://github.com/sinonjs/sinon/releases) - [Changelog](https://github.com/sinonjs/sinon/blob/main/docs/changelog.md) - [Commits](https://github.com/sinonjs/sinon/compare/v18.0.0...v19.0.2) --- updated-dependencies: - dependency-name: sinon dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 88 ++++++++++++++++-------------- web/ui/react-app/package.json | 2 +- 2 files changed, 47 insertions(+), 43 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 52623a8f1d..63b961b2ac 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -71,7 +71,7 @@ "mutationobserver-shim": "^0.3.7", "prettier": "^2.8.8", "react-scripts": "^5.0.1", - "sinon": "^18.0.0", + "sinon": "^19.0.2", "ts-jest": "^29.2.2" }, "optionalDependencies": { @@ -4481,41 +4481,41 @@ } }, "node_modules/@sinonjs/fake-timers": { - "version": "11.2.2", - "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-11.2.2.tgz", - "integrity": "sha512-G2piCSxQ7oWOxwGSAyFHfPIsyeJGXYtc6mFbnFA+kRXkiEnTl8c/8jul2S329iFBnDI9HGoeWWAZvuvOkZccgw==", + "version": "13.0.5", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-13.0.5.tgz", + "integrity": "sha512-36/hTbH2uaWuGVERyC6da9YwGWnzUZXuPro/F2LfsdOsLnCojz/iSH8MxUt/FD2S5XBSVPhmArFUXcpCQ2Hkiw==", "dev": true, "license": "BSD-3-Clause", "dependencies": { - "@sinonjs/commons": "^3.0.0" + "@sinonjs/commons": "^3.0.1" } }, "node_modules/@sinonjs/samsam": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@sinonjs/samsam/-/samsam-8.0.0.tgz", - "integrity": "sha512-Bp8KUVlLp8ibJZrnvq2foVhP0IVX2CIprMJPK0vqGqgrDa0OHVKeZyBykqskkrdxV6yKBPmGasO8LVjAKR3Gew==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@sinonjs/samsam/-/samsam-8.0.2.tgz", + "integrity": "sha512-v46t/fwnhejRSFTGqbpn9u+LQ9xJDse10gNnPgAcxgdoCDMXj/G2asWAC/8Qs+BAZDicX+MNZouXT1A7c83kVw==", "dev": true, "license": "BSD-3-Clause", "dependencies": { - "@sinonjs/commons": "^2.0.0", + "@sinonjs/commons": "^3.0.1", "lodash.get": "^4.4.2", - "type-detect": "^4.0.8" + "type-detect": "^4.1.0" } }, - "node_modules/@sinonjs/samsam/node_modules/@sinonjs/commons": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-2.0.0.tgz", - "integrity": "sha512-uLa0j859mMrg2slwQYdO/AkrOfmH+X6LTVmNTS9CqexuE2IvVORIkSpJLqePAbEnKJ77aMmCwr1NUZ57120Xcg==", + "node_modules/@sinonjs/samsam/node_modules/type-detect": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.1.0.tgz", + "integrity": "sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw==", "dev": true, - "license": "BSD-3-Clause", - "dependencies": { - "type-detect": "4.0.8" + "license": "MIT", + "engines": { + "node": ">=4" } }, "node_modules/@sinonjs/text-encoding": { - "version": "0.7.2", - "resolved": "https://registry.npmjs.org/@sinonjs/text-encoding/-/text-encoding-0.7.2.tgz", - "integrity": "sha512-sXXKG+uL9IrKqViTtao2Ws6dy0znu9sOaP1di/jKGW1M6VssO8vlpXCQcpZ+jisQ1tTFAC5Jo/EOzFbggBagFQ==", + "version": "0.7.3", + "resolved": "https://registry.npmjs.org/@sinonjs/text-encoding/-/text-encoding-0.7.3.tgz", + "integrity": "sha512-DE427ROAphMQzU4ENbliGYrBSYPXF+TtLg9S8vzeA+OF4ZKzoDdzfL8sxuMUGS/lgRhM6j1URSk9ghf7Xo1tyA==", "dev": true, "license": "(Unlicense OR Apache-2.0)" }, @@ -8358,10 +8358,11 @@ "dev": true }, "node_modules/diff": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-5.2.0.tgz", - "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-7.0.0.tgz", + "integrity": "sha512-PJWHUb1RFevKCwaFA9RlG5tCd+FO5iRh9A8HEtkmBH2Li03iJriB6m6JIN4rGz3K3JLawI7/veA1xzRKP6ISBw==", "dev": true, + "license": "BSD-3-Clause", "engines": { "node": ">=0.3.1" } @@ -16161,25 +16162,28 @@ "dev": true }, "node_modules/nise": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/nise/-/nise-6.0.0.tgz", - "integrity": "sha512-K8ePqo9BFvN31HXwEtTNGzgrPpmvgciDsFz8aztFjt4LqKO/JeFD8tBOeuDiCMXrIl/m1YvfH8auSpxfaD09wg==", + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/nise/-/nise-6.1.1.tgz", + "integrity": "sha512-aMSAzLVY7LyeM60gvBS423nBmIPP+Wy7St7hsb+8/fc1HmeoHJfLO8CKse4u3BtOZvQLJghYPI2i/1WZrEj5/g==", "dev": true, "license": "BSD-3-Clause", "dependencies": { - "@sinonjs/commons": "^3.0.0", - "@sinonjs/fake-timers": "^11.2.2", - "@sinonjs/text-encoding": "^0.7.2", + "@sinonjs/commons": "^3.0.1", + "@sinonjs/fake-timers": "^13.0.1", + "@sinonjs/text-encoding": "^0.7.3", "just-extend": "^6.2.0", - "path-to-regexp": "^6.2.1" + "path-to-regexp": "^8.1.0" } }, "node_modules/nise/node_modules/path-to-regexp": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.2.tgz", - "integrity": "sha512-GQX3SSMokngb36+whdpRXE+3f9V8UzyAorlYvOGx87ufGHehNTn5lCxrKtLyZ4Yl/wEKnNnr98ZzOwwDZV5ogw==", + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.2.0.tgz", + "integrity": "sha512-TdrF7fW9Rphjq4RjrW0Kp2AW0Ahwu9sRGTkS6bvDi0SCwZlEZYmcfDbEsTz8RVk0EHIS/Vd1bv3JhG+1xZuAyQ==", "dev": true, - "license": "MIT" + "license": "MIT", + "engines": { + "node": ">=16" + } }, "node_modules/no-case": { "version": "3.0.4", @@ -21260,18 +21264,18 @@ "dev": true }, "node_modules/sinon": { - "version": "18.0.0", - "resolved": "https://registry.npmjs.org/sinon/-/sinon-18.0.0.tgz", - "integrity": "sha512-+dXDXzD1sBO6HlmZDd7mXZCR/y5ECiEiGCBSGuFD/kZ0bDTofPYc6JaeGmPSF+1j1MejGUWkORbYOLDyvqCWpA==", + "version": "19.0.2", + "resolved": "https://registry.npmjs.org/sinon/-/sinon-19.0.2.tgz", + "integrity": "sha512-euuToqM+PjO4UgXeLETsfQiuoyPXlqFezr6YZDFwHR3t4qaX0fZUe1MfPMznTL5f8BWrVS89KduLdMUsxFCO6g==", "dev": true, "license": "BSD-3-Clause", "dependencies": { "@sinonjs/commons": "^3.0.1", - "@sinonjs/fake-timers": "^11.2.2", - "@sinonjs/samsam": "^8.0.0", - "diff": "^5.2.0", - "nise": "^6.0.0", - "supports-color": "^7" + "@sinonjs/fake-timers": "^13.0.2", + "@sinonjs/samsam": "^8.0.1", + "diff": "^7.0.0", + "nise": "^6.1.1", + "supports-color": "^7.2.0" }, "funding": { "type": "opencollective", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 764dd282bd..d688c7c44f 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -88,7 +88,7 @@ "mutationobserver-shim": "^0.3.7", "prettier": "^2.8.8", "react-scripts": "^5.0.1", - "sinon": "^18.0.0", + "sinon": "^19.0.2", "ts-jest": "^29.2.2" }, "jest": { From 7294aea08596064cfe1581c6cafe7e058a7c8a5a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:34:15 +0100 Subject: [PATCH 1089/1397] chore(deps-dev): bump @rollup/plugin-node-resolve in /web/ui (#15270) Bumps [@rollup/plugin-node-resolve](https://github.com/rollup/plugins/tree/HEAD/packages/node-resolve) from 15.2.3 to 15.3.0. - [Changelog](https://github.com/rollup/plugins/blob/master/packages/node-resolve/CHANGELOG.md) - [Commits](https://github.com/rollup/plugins/commits/node-resolve-v15.3.0/packages/node-resolve) --- updated-dependencies: - dependency-name: "@rollup/plugin-node-resolve" dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 39 +++---------------------- 2 files changed, 5 insertions(+), 36 deletions(-) diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 6564d3fa56..5eff1a9f8b 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -34,7 +34,7 @@ "@lezer/generator": "^1.7.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", - "@rollup/plugin-node-resolve": "^15.2.3" + "@rollup/plugin-node-resolve": "^15.3.0" }, "peerDependencies": { "@lezer/highlight": "^1.1.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index c512e376d6..6a72ca39d6 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -186,7 +186,7 @@ "@lezer/generator": "^1.7.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", - "@rollup/plugin-node-resolve": "^15.2.3" + "@rollup/plugin-node-resolve": "^15.3.0" }, "peerDependencies": { "@lezer/highlight": "^1.1.2", @@ -2301,16 +2301,14 @@ } }, "node_modules/@rollup/plugin-node-resolve": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.2.3.tgz", - "integrity": "sha512-j/lym8nf5E21LwBT4Df1VD6hRO2L2iwUeUmP7litikRsVp1H6NWx20NEp0Y7su+7XGc476GnXXc4kFeZNGmaSQ==", + "version": "15.3.0", + "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.3.0.tgz", + "integrity": "sha512-9eO5McEICxMzJpDW9OnMYSv4Sta3hmt7VtBFz5zR9273suNOydOyq/FrGeGy+KsTRFm8w0SLVhzig2ILFT63Ag==", "dev": true, - "license": "MIT", "dependencies": { "@rollup/pluginutils": "^5.0.1", "@types/resolve": "1.20.2", "deepmerge": "^4.2.2", - "is-builtin-module": "^3.2.1", "is-module": "^1.0.0", "resolve": "^1.22.1" }, @@ -3819,19 +3817,6 @@ "license": "MIT", "peer": true }, - "node_modules/builtin-modules": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-3.3.0.tgz", - "integrity": "sha512-zhaCDicdLuWN5UbN5IMnFqNMhNfo919sH85y2/ea+5Yg9TsTkeZxpL+JLbp6cgYFS4sRLp3YV4S6yDuqVWHYOw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/cac": { "version": "6.7.14", "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", @@ -5569,22 +5554,6 @@ "license": "MIT", "peer": true }, - "node_modules/is-builtin-module": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/is-builtin-module/-/is-builtin-module-3.2.1.tgz", - "integrity": "sha512-BSLE3HnV2syZ0FK0iMA/yUGplUeMmNz4AW5fnTunbCIqZi4vG3WjJT9FHMy5D69xmAYBHXQhJdALdpwVxV501A==", - "dev": true, - "license": "MIT", - "dependencies": { - "builtin-modules": "^3.3.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/is-core-module": { "version": "2.15.1", "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.15.1.tgz", From 45d3d2e517558d189b8b1866995f8f3fc94adb68 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:34:41 +0100 Subject: [PATCH 1090/1397] chore(deps-dev): bump @eslint/eslintrc from 3.1.0 to 3.2.0 in /web/ui (#15586) Bumps [@eslint/eslintrc](https://github.com/eslint/eslintrc) from 3.1.0 to 3.2.0. - [Release notes](https://github.com/eslint/eslintrc/releases) - [Changelog](https://github.com/eslint/eslintrc/blob/main/CHANGELOG.md) - [Commits](https://github.com/eslint/eslintrc/compare/v3.1.0...v3.2.0) --- updated-dependencies: - dependency-name: "@eslint/eslintrc" dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 1ce34598bd..3e749cd9e9 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -52,7 +52,7 @@ }, "devDependencies": { "@eslint/compat": "^1.2.4", - "@eslint/eslintrc": "^3.1.0", + "@eslint/eslintrc": "^3.2.0", "@eslint/js": "^9.16.0", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 6a72ca39d6..843418300f 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -66,7 +66,7 @@ }, "devDependencies": { "@eslint/compat": "^1.2.4", - "@eslint/eslintrc": "^3.1.0", + "@eslint/eslintrc": "^3.2.0", "@eslint/js": "^9.16.0", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", From 16f4adbd65127e548d2d365c57f3af7d112045ca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:35:43 +0100 Subject: [PATCH 1091/1397] chore(deps-dev): bump @lezer/generator from 1.7.1 to 1.7.2 in /web/ui (#15622) Bumps [@lezer/generator](https://github.com/lezer-parser/generator) from 1.7.1 to 1.7.2. - [Changelog](https://github.com/lezer-parser/generator/blob/main/CHANGELOG.md) - [Commits](https://github.com/lezer-parser/generator/compare/1.7.1...1.7.2) --- updated-dependencies: - dependency-name: "@lezer/generator" dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 5eff1a9f8b..5f35e621cc 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -31,7 +31,7 @@ "test": "NODE_OPTIONS=--experimental-vm-modules jest" }, "devDependencies": { - "@lezer/generator": "^1.7.1", + "@lezer/generator": "^1.7.2", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", "@rollup/plugin-node-resolve": "^15.3.0" diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 843418300f..3638a3fa80 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -183,7 +183,7 @@ "version": "0.300.0", "license": "Apache-2.0", "devDependencies": { - "@lezer/generator": "^1.7.1", + "@lezer/generator": "^1.7.2", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", "@rollup/plugin-node-resolve": "^15.3.0" @@ -2081,9 +2081,9 @@ "integrity": "sha512-w7ojc8ejBqr2REPsWxJjrMFsA/ysDCFICn8zEOR9mrqzOu2amhITYuLD8ag6XZf0CFXDrhKqw7+tW8cX66NaDA==" }, "node_modules/@lezer/generator": { - "version": "1.7.1", - "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.7.1.tgz", - "integrity": "sha512-MgPJN9Si+ccxzXl3OAmCeZuUKw4XiPl4y664FX/hnnyG9CTqUPq65N3/VGPA2jD23D7QgMTtNqflta+cPN+5mQ==", + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.7.2.tgz", + "integrity": "sha512-CwgULPOPPmH54tv4gki18bElLCdJ1+FBC+nGVSVD08vFWDsMjS7KEjNTph9JOypDnet90ujN3LzQiW3CyVODNQ==", "dev": true, "license": "MIT", "dependencies": { From 3071349d9916ca230b0b3fd5ac80130fff4eb8d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:38:50 +0100 Subject: [PATCH 1092/1397] chore(deps-dev): bump prettier from 3.3.3 to 3.4.2 in /web/ui (#15620) Bumps [prettier](https://github.com/prettier/prettier) from 3.3.3 to 3.4.2. - [Release notes](https://github.com/prettier/prettier/releases) - [Changelog](https://github.com/prettier/prettier/blob/main/CHANGELOG.md) - [Commits](https://github.com/prettier/prettier/compare/3.3.3...3.4.2) --- updated-dependencies: - dependency-name: prettier dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/package-lock.json | 8 ++++---- web/ui/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 3638a3fa80..b4ffb1db1b 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -16,7 +16,7 @@ "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "eslint-config-prettier": "^9.1.0", - "prettier": "^3.3.3", + "prettier": "^3.4.2", "ts-jest": "^29.2.2", "typescript": "^5.6.2", "vite": "^5.4.8" @@ -7713,9 +7713,9 @@ } }, "node_modules/prettier": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.3.3.tgz", - "integrity": "sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==", + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.4.2.tgz", + "integrity": "sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ==", "dev": true, "license": "MIT", "bin": { diff --git a/web/ui/package.json b/web/ui/package.json index b5324df694..b79c341434 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -19,7 +19,7 @@ "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "eslint-config-prettier": "^9.1.0", - "prettier": "^3.3.3", + "prettier": "^3.4.2", "ts-jest": "^29.2.2", "typescript": "^5.6.2", "vite": "^5.4.8" From f0c9154c40f7fa95578b3b21243702ff7cd9971e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:39:23 +0100 Subject: [PATCH 1093/1397] chore(deps): bump @reduxjs/toolkit from 2.4.0 to 2.5.0 in /web/ui (#15616) Bumps [@reduxjs/toolkit](https://github.com/reduxjs/redux-toolkit) from 2.4.0 to 2.5.0. - [Release notes](https://github.com/reduxjs/redux-toolkit/releases) - [Commits](https://github.com/reduxjs/redux-toolkit/compare/v2.4.0...v2.5.0) --- updated-dependencies: - dependency-name: "@reduxjs/toolkit" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 3e749cd9e9..8a91fa4843 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -29,7 +29,7 @@ "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.300.0", - "@reduxjs/toolkit": "^2.4.0", + "@reduxjs/toolkit": "^2.5.0", "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.62.0", "@testing-library/jest-dom": "^6.6.3", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index b4ffb1db1b..1316c24ade 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -43,7 +43,7 @@ "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.300.0", - "@reduxjs/toolkit": "^2.4.0", + "@reduxjs/toolkit": "^2.5.0", "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.62.0", "@testing-library/jest-dom": "^6.6.3", @@ -2278,9 +2278,10 @@ "link": true }, "node_modules/@reduxjs/toolkit": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-2.4.0.tgz", - "integrity": "sha512-wJZEuSKj14tvNfxiIiJws0tQN77/rDqucBq528ApebMIRHyWpCanJVQRxQ8WWZC19iCDKxDsGlbAir3F1layxA==", + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-2.5.0.tgz", + "integrity": "sha512-awNe2oTodsZ6LmRqmkFhtb/KH03hUhxOamEQy411m3Njj3BbFvoBovxo4Q1cBWnV1ErprVj9MlF0UPXkng0eyg==", + "license": "MIT", "dependencies": { "immer": "^10.0.3", "redux": "^5.0.1", @@ -2288,7 +2289,7 @@ "reselect": "^5.1.0" }, "peerDependencies": { - "react": "^16.9.0 || ^17.0.0 || ^18", + "react": "^16.9.0 || ^17.0.0 || ^18 || ^19", "react-redux": "^7.2.1 || ^8.1.3 || ^9.0.0" }, "peerDependenciesMeta": { From e07de08950cee43d5e311d3100a0c03bd6b43c7c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:40:19 +0100 Subject: [PATCH 1094/1397] chore(deps): bump @codemirror/lint from 6.8.1 to 6.8.4 in /web/ui (#15613) Bumps [@codemirror/lint](https://github.com/codemirror/lint) from 6.8.1 to 6.8.4. - [Changelog](https://github.com/codemirror/lint/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/lint/compare/6.8.1...6.8.4) --- updated-dependencies: - dependency-name: "@codemirror/lint" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 38 ++++++++++++-------- 3 files changed, 26 insertions(+), 16 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 8a91fa4843..9889abc7b8 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -14,7 +14,7 @@ "dependencies": { "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.6", - "@codemirror/lint": "^6.8.1", + "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.4.1", "@codemirror/view": "^6.34.1", "@floating-ui/dom": "^1.6.7", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index e8c4534069..efffccbe12 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -35,7 +35,7 @@ "devDependencies": { "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.6", - "@codemirror/lint": "^6.8.1", + "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.34.1", "@lezer/common": "^1.2.3", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 1316c24ade..45b524441a 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -28,7 +28,7 @@ "dependencies": { "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.6", - "@codemirror/lint": "^6.8.1", + "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.4.1", "@codemirror/view": "^6.34.1", "@floating-ui/dom": "^1.6.7", @@ -156,7 +156,7 @@ "devDependencies": { "@codemirror/autocomplete": "^6.18.1", "@codemirror/language": "^6.10.6", - "@codemirror/lint": "^6.8.1", + "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.34.1", "@lezer/common": "^1.2.3", @@ -821,13 +821,13 @@ } }, "node_modules/@codemirror/lint": { - "version": "6.8.1", - "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.1.tgz", - "integrity": "sha512-IZ0Y7S4/bpaunwggW2jYqwLuHj0QtESf5xcROewY6+lDNwZ/NzvR4t+vpYgg9m7V8UXLPYqG+lu3DF470E5Oxg==", + "version": "6.8.4", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.4.tgz", + "integrity": "sha512-u4q7PnZlJUojeRe8FJa/njJcMctISGgPQ4PnWsd9268R4ZTtU+tfFYmwkBvgcrK2+QQ8tYFVALVb5fVJykKc5A==", "license": "MIT", "dependencies": { "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.0.0", + "@codemirror/view": "^6.35.0", "crelt": "^1.0.5" } }, @@ -843,10 +843,13 @@ } }, "node_modules/@codemirror/state": { - "version": "6.4.1", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.4.1.tgz", - "integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A==", - "license": "MIT" + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.5.0.tgz", + "integrity": "sha512-MwBHVK60IiIHDcoMet78lxt6iw5gJOGSbNbOIVBHWVXIH4/Nq1+GQgLLGgI1KlnN86WDXsPudVaqYHKBIx7Eyw==", + "license": "MIT", + "dependencies": { + "@marijn/find-cluster-break": "^1.0.0" + } }, "node_modules/@codemirror/theme-one-dark": { "version": "6.1.2", @@ -861,11 +864,12 @@ } }, "node_modules/@codemirror/view": { - "version": "6.34.1", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.34.1.tgz", - "integrity": "sha512-t1zK/l9UiRqwUNPm+pdIT0qzJlzuVckbTEMVNFhfWkGiBQClstzg+78vedCvLSX0xJEZ6lwZbPpnljL7L6iwMQ==", + "version": "6.35.3", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.35.3.tgz", + "integrity": "sha512-ScY7L8+EGdPl4QtoBiOzE4FELp7JmNUsBvgBcCakXWM2uiv/K89VAzU3BMDscf0DsACLvTKePbd5+cFDTcei6g==", + "license": "MIT", "dependencies": { - "@codemirror/state": "^6.4.0", + "@codemirror/state": "^6.5.0", "style-mod": "^4.1.0", "w3c-keyname": "^2.2.4" } @@ -2194,6 +2198,12 @@ "react": "^18.2.0" } }, + "node_modules/@marijn/find-cluster-break": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@marijn/find-cluster-break/-/find-cluster-break-1.0.2.tgz", + "integrity": "sha512-l0h88YhZFyKdXIFNfSWpyjStDjGHwZ/U7iobcK1cQQD8sejsONdQtTVU+1wVN1PBw40PiiHB1vA5S7VTfQiP9g==", + "license": "MIT" + }, "node_modules/@microsoft/fetch-event-source": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/@microsoft/fetch-event-source/-/fetch-event-source-2.0.1.tgz", From ab3681fdd909af7b4f9de6422e8e8819c914a45f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:40:47 +0100 Subject: [PATCH 1095/1397] chore(deps): bump lru-cache from 11.0.1 to 11.0.2 in /web/ui (#15596) Bumps [lru-cache](https://github.com/isaacs/node-lru-cache) from 11.0.1 to 11.0.2. - [Changelog](https://github.com/isaacs/node-lru-cache/blob/main/CHANGELOG.md) - [Commits](https://github.com/isaacs/node-lru-cache/compare/v11.0.1...v11.0.2) --- updated-dependencies: - dependency-name: lru-cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index efffccbe12..3aee6538a8 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -30,7 +30,7 @@ "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { "@prometheus-io/lezer-promql": "0.300.0", - "lru-cache": "^11.0.1" + "lru-cache": "^11.0.2" }, "devDependencies": { "@codemirror/autocomplete": "^6.18.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 45b524441a..db764fa53a 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -151,7 +151,7 @@ "license": "Apache-2.0", "dependencies": { "@prometheus-io/lezer-promql": "0.300.0", - "lru-cache": "^11.0.1" + "lru-cache": "^11.0.2" }, "devDependencies": { "@codemirror/autocomplete": "^6.18.1", @@ -6968,9 +6968,10 @@ } }, "node_modules/lru-cache": { - "version": "11.0.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.0.1.tgz", - "integrity": "sha512-CgeuL5uom6j/ZVrg7G/+1IXqRY8JXX4Hghfy5YE0EhoYQWvndP1kufu58cmZLNIDKnRhZrXfdS9urVWx98AipQ==", + "version": "11.0.2", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.0.2.tgz", + "integrity": "sha512-123qHRfJBmo2jXDbo/a5YOQrJoHF/GNQTLzQ5+IdK5pWpceK17yRc6ozlWd25FxvGKQbIUs91fDFkXmDHTKcyA==", + "license": "ISC", "engines": { "node": "20 || >=22" } From 8b716e3f3458f7fa29913d1b3791fccd20dfaa98 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:41:17 +0100 Subject: [PATCH 1096/1397] chore(deps-dev): bump vitest from 2.1.1 to 2.1.8 in /web/ui (#15591) Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 2.1.1 to 2.1.8. - [Release notes](https://github.com/vitest-dev/vitest/releases) - [Commits](https://github.com/vitest-dev/vitest/commits/v2.1.8/packages/vitest) --- updated-dependencies: - dependency-name: vitest dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 216 +++++++++++++++++++-------------- 2 files changed, 123 insertions(+), 95 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 9889abc7b8..f9fc229e0b 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -68,6 +68,6 @@ "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", "vite": "^5.4.8", - "vitest": "^2.1.1" + "vitest": "^2.1.8" } } diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index db764fa53a..9c13f91667 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -82,7 +82,7 @@ "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", "vite": "^5.4.8", - "vitest": "^2.1.1" + "vitest": "^2.1.8" } }, "mantine-ui/node_modules/eslint": { @@ -3321,14 +3321,15 @@ } }, "node_modules/@vitest/expect": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.1.1.tgz", - "integrity": "sha512-YeueunS0HiHiQxk+KEOnq/QMzlUuOzbU1Go+PgAsHvvv3tUkJPm9xWt+6ITNTlzsMXUjmgm5T+U7KBPK2qQV6w==", + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.1.8.tgz", + "integrity": "sha512-8ytZ/fFHq2g4PJVAtDX57mayemKgDR6X3Oa2Foro+EygiOJHUXhCqBAAKQYYajZpFoIfvBCF1j6R6IYRSIUFuw==", "dev": true, + "license": "MIT", "dependencies": { - "@vitest/spy": "2.1.1", - "@vitest/utils": "2.1.1", - "chai": "^5.1.1", + "@vitest/spy": "2.1.8", + "@vitest/utils": "2.1.8", + "chai": "^5.1.2", "tinyrainbow": "^1.2.0" }, "funding": { @@ -3336,21 +3337,21 @@ } }, "node_modules/@vitest/mocker": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-2.1.1.tgz", - "integrity": "sha512-LNN5VwOEdJqCmJ/2XJBywB11DLlkbY0ooDJW3uRX5cZyYCrc4PI/ePX0iQhE3BiEGiQmK4GE7Q/PqCkkaiPnrA==", + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-2.1.8.tgz", + "integrity": "sha512-7guJ/47I6uqfttp33mgo6ga5Gr1VnL58rcqYKyShoRK9ebu8T5Rs6HN3s1NABiBeVTdWNrwUMcHH54uXZBN4zA==", "dev": true, + "license": "MIT", "dependencies": { - "@vitest/spy": "^2.1.0-beta.1", + "@vitest/spy": "2.1.8", "estree-walker": "^3.0.3", - "magic-string": "^0.30.11" + "magic-string": "^0.30.12" }, "funding": { "url": "https://opencollective.com/vitest" }, "peerDependencies": { - "@vitest/spy": "2.1.1", - "msw": "^2.3.5", + "msw": "^2.4.9", "vite": "^5.0.0" }, "peerDependenciesMeta": { @@ -3363,10 +3364,11 @@ } }, "node_modules/@vitest/pretty-format": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.1.tgz", - "integrity": "sha512-SjxPFOtuINDUW8/UkElJYQSFtnWX7tMksSGW0vfjxMneFqxVr8YJ979QpMbDW7g+BIiq88RAGDjf7en6rvLPPQ==", + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.8.tgz", + "integrity": "sha512-9HiSZ9zpqNLKlbIDRWOnAWqgcA7xu+8YxXSekhr0Ykab7PAYFkhkwoqVArPOtJhPmYeE2YHgKZlj3CP36z2AJQ==", "dev": true, + "license": "MIT", "dependencies": { "tinyrainbow": "^1.2.0" }, @@ -3375,12 +3377,13 @@ } }, "node_modules/@vitest/runner": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-2.1.1.tgz", - "integrity": "sha512-uTPuY6PWOYitIkLPidaY5L3t0JJITdGTSwBtwMjKzo5O6RCOEncz9PUN+0pDidX8kTHYjO0EwUIvhlGpnGpxmA==", + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-2.1.8.tgz", + "integrity": "sha512-17ub8vQstRnRlIU5k50bG+QOMLHRhYPAna5tw8tYbj+jzjcspnwnwtPtiOlkuKC4+ixDPTuLZiqiWWQ2PSXHVg==", "dev": true, + "license": "MIT", "dependencies": { - "@vitest/utils": "2.1.1", + "@vitest/utils": "2.1.8", "pathe": "^1.1.2" }, "funding": { @@ -3388,13 +3391,14 @@ } }, "node_modules/@vitest/snapshot": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-2.1.1.tgz", - "integrity": "sha512-BnSku1WFy7r4mm96ha2FzN99AZJgpZOWrAhtQfoxjUU5YMRpq1zmHRq7a5K9/NjqonebO7iVDla+VvZS8BOWMw==", + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-2.1.8.tgz", + "integrity": "sha512-20T7xRFbmnkfcmgVEz+z3AU/3b0cEzZOt/zmnvZEctg64/QZbSDJEVm9fLnnlSi74KibmRsO9/Qabi+t0vCRPg==", "dev": true, + "license": "MIT", "dependencies": { - "@vitest/pretty-format": "2.1.1", - "magic-string": "^0.30.11", + "@vitest/pretty-format": "2.1.8", + "magic-string": "^0.30.12", "pathe": "^1.1.2" }, "funding": { @@ -3402,25 +3406,27 @@ } }, "node_modules/@vitest/spy": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-2.1.1.tgz", - "integrity": "sha512-ZM39BnZ9t/xZ/nF4UwRH5il0Sw93QnZXd9NAZGRpIgj0yvVwPpLd702s/Cx955rGaMlyBQkZJ2Ir7qyY48VZ+g==", + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-2.1.8.tgz", + "integrity": "sha512-5swjf2q95gXeYPevtW0BLk6H8+bPlMb4Vw/9Em4hFxDcaOxS+e0LOX4yqNxoHzMR2akEB2xfpnWUzkZokmgWDg==", "dev": true, + "license": "MIT", "dependencies": { - "tinyspy": "^3.0.0" + "tinyspy": "^3.0.2" }, "funding": { "url": "https://opencollective.com/vitest" } }, "node_modules/@vitest/utils": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-2.1.1.tgz", - "integrity": "sha512-Y6Q9TsI+qJ2CC0ZKj6VBb+T8UPz593N113nnUykqwANqhgf3QkZeHFlusgKLTqrnVHbj/XDKZcDHol+dxVT+rQ==", + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-2.1.8.tgz", + "integrity": "sha512-dwSoui6djdwbfFmIgbIjX2ZhIoG7Ex/+xpxyiEgIGzjliY8xGkcpITKTlp6B4MgtGkF2ilvm97cPM96XZaAgcA==", "dev": true, + "license": "MIT", "dependencies": { - "@vitest/pretty-format": "2.1.1", - "loupe": "^3.1.1", + "@vitest/pretty-format": "2.1.8", + "loupe": "^3.1.2", "tinyrainbow": "^1.2.0" }, "funding": { @@ -3582,6 +3588,7 @@ "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", "dev": true, + "license": "MIT", "engines": { "node": ">=12" } @@ -3833,6 +3840,7 @@ "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -3889,10 +3897,11 @@ ] }, "node_modules/chai": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/chai/-/chai-5.1.1.tgz", - "integrity": "sha512-pT1ZgP8rPNqUgieVaEY+ryQr6Q4HXNg8Ei9UnLUrjN4IA7dvQC5JB+/kxVcPNDHyBcc/26CXPkbNzq3qwrOEKA==", + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.1.2.tgz", + "integrity": "sha512-aGtmf24DW6MLHHG5gCx4zaI3uBq3KRtxeVs0DjFH6Z0rDNbsvTxFASFvdj79pxjxZ8/5u3PIiN3IwEIQkiiuPw==", "dev": true, + "license": "MIT", "dependencies": { "assertion-error": "^2.0.1", "check-error": "^2.1.1", @@ -3936,6 +3945,7 @@ "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", "dev": true, + "license": "MIT", "engines": { "node": ">= 16" } @@ -4179,13 +4189,13 @@ "license": "MIT" }, "node_modules/debug": { - "version": "4.3.6", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.6.tgz", - "integrity": "sha512-O/09Bd4Z1fBrU4VzkhFqVgpPzaGbw6Sm9FEkBT1A/YBXQFGuuSxa1dN2nxgxS34JmKXqYx8CZAwEVoJFImUXIg==", + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", "dev": true, "license": "MIT", "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -4224,6 +4234,7 @@ "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } @@ -4455,6 +4466,13 @@ "is-arrayish": "^0.2.1" } }, + "node_modules/es-module-lexer": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.5.4.tgz", + "integrity": "sha512-MVNK56NiMrOwitFB7cqDwq0CQutbw+0BvLshJSse0MUNU+y1FC3bUS/AQg7oUng+/wKrrki7JfmwtVHkVfPLlw==", + "dev": true, + "license": "MIT" + }, "node_modules/esbuild": { "version": "0.21.5", "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", @@ -4892,6 +4910,7 @@ "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", "dev": true, + "license": "MIT", "dependencies": { "@types/estree": "^1.0.0" } @@ -4958,6 +4977,16 @@ "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, + "node_modules/expect-type": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.1.0.tgz", + "integrity": "sha512-bFi65yM+xZgk+u/KRIpekdSYkTB5W1pEf0Lt8Q8Msh7b+eQ7LXVtIB1Bkm4fvclDEL1b2CZkMhv2mOeF8tMdkA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -5203,15 +5232,6 @@ "node": "6.* || 8.* || >= 10.*" } }, - "node_modules/get-func-name": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.2.tgz", - "integrity": "sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==", - "dev": true, - "engines": { - "node": "*" - } - }, "node_modules/get-nonce": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", @@ -6959,13 +6979,11 @@ } }, "node_modules/loupe": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.1.tgz", - "integrity": "sha512-edNu/8D5MKVfGVFRhFf8aAxiTM6Wumfz5XsaatSxlD3w4R1d/WEKUTydCdPGbl9K7QG/Ca3GnDV2sIKIpXRQcw==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.2.tgz", + "integrity": "sha512-23I4pFZHmAemUnz8WZXbYRSKYj801VDaNv9ETuMh7IrMc7VuVVSo+Z9iLE3ni30+U48iDWfi30d3twAXBYmnCg==", "dev": true, - "dependencies": { - "get-func-name": "^2.0.1" - } + "license": "MIT" }, "node_modules/lru-cache": { "version": "11.0.2", @@ -6987,10 +7005,11 @@ } }, "node_modules/magic-string": { - "version": "0.30.11", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.11.tgz", - "integrity": "sha512-+Wri9p0QHMy+545hKww7YAu5NyzF8iomPL/RQazugQ9+Ez4Ic3mERMd8ZTX5rfK944j+560ZJi8iAwgak1Ac7A==", + "version": "0.30.15", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.15.tgz", + "integrity": "sha512-zXeaYRgZ6ldS1RJJUrMrYgNJ4fdwnyI6tVqoiIhyCyv5IVTK9BU8Ic2l253GGETQHxI4HNUwhJ3fjDhKqEoaAw==", "dev": true, + "license": "MIT", "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0" } @@ -7119,9 +7138,9 @@ } }, "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true, "license": "MIT" }, @@ -7459,13 +7478,15 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/pathval": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.0.tgz", "integrity": "sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==", "dev": true, + "license": "MIT", "engines": { "node": ">= 14.16" } @@ -8499,9 +8520,9 @@ "license": "MIT" }, "node_modules/std-env": { - "version": "3.7.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz", - "integrity": "sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==", + "version": "3.8.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.8.0.tgz", + "integrity": "sha512-Bc3YwwCB+OzldMxOXJIIvC6cPRWr/LxOp48CdQTOkPyk/t4JWWJbrilwBd7RJzKV8QW7tJkcgAmeuLLJugl5/w==", "dev": true, "license": "MIT" }, @@ -8715,10 +8736,11 @@ "license": "MIT" }, "node_modules/tinyexec": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.0.tgz", - "integrity": "sha512-tVGE0mVJPGb0chKhqmsoosjsS+qUnJVGJpZgsHYQcGoPlG3B51R3PouqTgEGH2Dc9jjFyOqOpix6ZHNMXp1FZg==", - "dev": true + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.1.tgz", + "integrity": "sha512-WiCJLEECkO18gwqIp6+hJg0//p23HXp4S+gGtAKu3mI2F2/sXC4FvHvXvB0zJVVaTPhx1/tOwdbRsa1sOBIKqQ==", + "dev": true, + "license": "MIT" }, "node_modules/tinypool": { "version": "1.0.1", @@ -8735,6 +8757,7 @@ "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-1.2.0.tgz", "integrity": "sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=14.0.0" } @@ -8744,6 +8767,7 @@ "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-3.0.2.tgz", "integrity": "sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==", "dev": true, + "license": "MIT", "engines": { "node": ">=14.0.0" } @@ -9191,13 +9215,15 @@ } }, "node_modules/vite-node": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.1.1.tgz", - "integrity": "sha512-N/mGckI1suG/5wQI35XeR9rsMsPqKXzq1CdUndzVstBj/HvyxxGctwnK6WX43NGt5L3Z5tcRf83g4TITKJhPrA==", + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.1.8.tgz", + "integrity": "sha512-uPAwSr57kYjAUux+8E2j0q0Fxpn8M9VoyfGiRI8Kfktz9NcYMCenwY5RnZxnF1WTu3TGiYipirIzacLL3VVGFg==", "dev": true, + "license": "MIT", "dependencies": { "cac": "^6.7.14", - "debug": "^4.3.6", + "debug": "^4.3.7", + "es-module-lexer": "^1.5.4", "pathe": "^1.1.2", "vite": "^5.0.0" }, @@ -9212,29 +9238,31 @@ } }, "node_modules/vitest": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-2.1.1.tgz", - "integrity": "sha512-97We7/VC0e9X5zBVkvt7SGQMGrRtn3KtySFQG5fpaMlS+l62eeXRQO633AYhSTC3z7IMebnPPNjGXVGNRFlxBA==", + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-2.1.8.tgz", + "integrity": "sha512-1vBKTZskHw/aosXqQUlVWWlGUxSJR8YtiyZDJAFeW2kPAeX6S3Sool0mjspO+kXLuxVWlEDDowBAeqeAQefqLQ==", "dev": true, + "license": "MIT", "dependencies": { - "@vitest/expect": "2.1.1", - "@vitest/mocker": "2.1.1", - "@vitest/pretty-format": "^2.1.1", - "@vitest/runner": "2.1.1", - "@vitest/snapshot": "2.1.1", - "@vitest/spy": "2.1.1", - "@vitest/utils": "2.1.1", - "chai": "^5.1.1", - "debug": "^4.3.6", - "magic-string": "^0.30.11", + "@vitest/expect": "2.1.8", + "@vitest/mocker": "2.1.8", + "@vitest/pretty-format": "^2.1.8", + "@vitest/runner": "2.1.8", + "@vitest/snapshot": "2.1.8", + "@vitest/spy": "2.1.8", + "@vitest/utils": "2.1.8", + "chai": "^5.1.2", + "debug": "^4.3.7", + "expect-type": "^1.1.0", + "magic-string": "^0.30.12", "pathe": "^1.1.2", - "std-env": "^3.7.0", + "std-env": "^3.8.0", "tinybench": "^2.9.0", - "tinyexec": "^0.3.0", - "tinypool": "^1.0.0", + "tinyexec": "^0.3.1", + "tinypool": "^1.0.1", "tinyrainbow": "^1.2.0", "vite": "^5.0.0", - "vite-node": "2.1.1", + "vite-node": "2.1.8", "why-is-node-running": "^2.3.0" }, "bin": { @@ -9249,8 +9277,8 @@ "peerDependencies": { "@edge-runtime/vm": "*", "@types/node": "^18.0.0 || >=20.0.0", - "@vitest/browser": "2.1.1", - "@vitest/ui": "2.1.1", + "@vitest/browser": "2.1.8", + "@vitest/ui": "2.1.8", "happy-dom": "*", "jsdom": "*" }, From e62a737cf4c4a660b115e889bb593a95a7dffd13 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:10:27 +0000 Subject: [PATCH 1097/1397] chore(deps): bump @codemirror/search in /web/ui/react-app Bumps [@codemirror/search](https://github.com/codemirror/search) from 6.5.6 to 6.5.8. - [Changelog](https://github.com/codemirror/search/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/search/compare/6.5.6...6.5.8) --- updated-dependencies: - dependency-name: "@codemirror/search" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/react-app/package-lock.json | 9 +++++---- web/ui/react-app/package.json | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 63b961b2ac..5af8ce316f 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -12,7 +12,7 @@ "@codemirror/commands": "^6.6.0", "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.1", - "@codemirror/search": "^6.5.6", + "@codemirror/search": "^6.5.8", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.35.3", "@forevolve/bootstrap-dark": "^4.0.2", @@ -2292,9 +2292,10 @@ } }, "node_modules/@codemirror/search": { - "version": "6.5.6", - "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.6.tgz", - "integrity": "sha512-rpMgcsh7o0GuCDUXKPvww+muLA1pDJaFrpq/CCHtpQJYz8xopu4D1hPcKRoDD0YlF8gZaqTNIRa4VRBWyhyy7Q==", + "version": "6.5.8", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.8.tgz", + "integrity": "sha512-PoWtZvo7c1XFeZWmmyaOp2G0XVbOnm+fJzvghqGAktBW3cufwJUWvSCcNG0ppXiBEM05mZu6RhMtXPv2hpllig==", + "license": "MIT", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index d688c7c44f..7be4a0c467 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -7,7 +7,7 @@ "@codemirror/commands": "^6.6.0", "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.1", - "@codemirror/search": "^6.5.6", + "@codemirror/search": "^6.5.8", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.35.3", "@forevolve/bootstrap-dark": "^4.0.2", From a940deb1a93351fc7beda581bcc619e793cd731f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 20:35:25 +0100 Subject: [PATCH 1098/1397] chore(deps-dev): bump @types/jquery in /web/ui/react-app (#15590) Signed-off-by: Ben Kochie --- web/ui/react-app/package-lock.json | 8 ++++---- web/ui/react-app/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 5af8ce316f..76a525d114 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -52,7 +52,7 @@ "@types/enzyme": "^3.10.18", "@types/flot": "0.0.36", "@types/jest": "^29.5.14", - "@types/jquery": "^3.5.30", + "@types/jquery": "^3.5.32", "@types/node": "^20.14.9", "@types/react": "^17.0.71", "@types/react-copy-to-clipboard": "^5.0.7", @@ -5073,9 +5073,9 @@ "dev": true }, "node_modules/@types/jquery": { - "version": "3.5.30", - "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.30.tgz", - "integrity": "sha512-nbWKkkyb919DOUxjmRVk8vwtDb0/k8FKncmUKFi+NY+QXqWltooxTrswvz4LspQwxvLdvzBN1TImr6cw3aQx2A==", + "version": "3.5.32", + "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.32.tgz", + "integrity": "sha512-b9Xbf4CkMqS02YH8zACqN1xzdxc3cO735Qe5AbSUFmyOiaWAbcpqh9Wna+Uk0vgACvoQHpWDg2rGdHkYPLmCiQ==", "dev": true, "license": "MIT", "dependencies": { diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 7be4a0c467..f0f516e497 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -69,7 +69,7 @@ "@types/enzyme": "^3.10.18", "@types/flot": "0.0.36", "@types/jest": "^29.5.14", - "@types/jquery": "^3.5.30", + "@types/jquery": "^3.5.32", "@types/node": "^20.14.9", "@types/react": "^17.0.71", "@types/react-copy-to-clipboard": "^5.0.7", From c66d78cf5f520da7bc2172db53f374840a7656d3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 20:39:33 +0100 Subject: [PATCH 1099/1397] chore(deps-dev): bump vite from 5.4.8 to 6.0.3 in /web/ui (#15611) Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.4.8 to 6.0.3. - [Release notes](https://github.com/vitejs/vite/releases) - [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) - [Commits](https://github.com/vitejs/vite/commits/v6.0.3/packages/vite) --- updated-dependencies: - dependency-name: vite dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 1375 +++++++++++++++++++++++++++----- web/ui/package.json | 2 +- 3 files changed, 1196 insertions(+), 183 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index f9fc229e0b..d204d527b3 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -67,7 +67,7 @@ "postcss": "^8.4.47", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", - "vite": "^5.4.8", + "vite": "^6.0.3", "vitest": "^2.1.8" } } diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 9c13f91667..c329352d00 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -19,7 +19,7 @@ "prettier": "^3.4.2", "ts-jest": "^29.2.2", "typescript": "^5.6.2", - "vite": "^5.4.8" + "vite": "^6.0.3" } }, "mantine-ui": { @@ -81,7 +81,7 @@ "postcss": "^8.4.47", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", - "vite": "^5.4.8", + "vite": "^6.0.3", "vitest": "^2.1.8" } }, @@ -875,9 +875,9 @@ } }, "node_modules/@esbuild/aix-ppc64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", - "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.24.0.tgz", + "integrity": "sha512-WtKdFM7ls47zkKHFVzMz8opM7LkcsIp9amDUBIAWirg70RM71WRSjdILPsY5Uv1D42ZpUfaPILDlfactHgsRkw==", "cpu": [ "ppc64" ], @@ -888,13 +888,13 @@ "aix" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/android-arm": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", - "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.24.0.tgz", + "integrity": "sha512-arAtTPo76fJ/ICkXWetLCc9EwEHKaeya4vMrReVlEIUCAUncH7M4bhMQ+M9Vf+FFOZJdTNMXNBrWwW+OXWpSew==", "cpu": [ "arm" ], @@ -905,13 +905,13 @@ "android" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/android-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", - "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.24.0.tgz", + "integrity": "sha512-Vsm497xFM7tTIPYK9bNTYJyF/lsP590Qc1WxJdlB6ljCbdZKU9SY8i7+Iin4kyhV/KV5J2rOKsBQbB77Ab7L/w==", "cpu": [ "arm64" ], @@ -922,13 +922,13 @@ "android" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/android-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", - "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.24.0.tgz", + "integrity": "sha512-t8GrvnFkiIY7pa7mMgJd7p8p8qqYIz1NYiAoKc75Zyv73L3DZW++oYMSHPRarcotTKuSs6m3hTOa5CKHaS02TQ==", "cpu": [ "x64" ], @@ -939,13 +939,13 @@ "android" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/darwin-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", - "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.24.0.tgz", + "integrity": "sha512-CKyDpRbK1hXwv79soeTJNHb5EiG6ct3efd/FTPdzOWdbZZfGhpbcqIpiD0+vwmpu0wTIL97ZRPZu8vUt46nBSw==", "cpu": [ "arm64" ], @@ -956,13 +956,13 @@ "darwin" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/darwin-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", - "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.24.0.tgz", + "integrity": "sha512-rgtz6flkVkh58od4PwTRqxbKH9cOjaXCMZgWD905JOzjFKW+7EiUObfd/Kav+A6Gyud6WZk9w+xu6QLytdi2OA==", "cpu": [ "x64" ], @@ -973,13 +973,13 @@ "darwin" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/freebsd-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", - "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.24.0.tgz", + "integrity": "sha512-6Mtdq5nHggwfDNLAHkPlyLBpE5L6hwsuXZX8XNmHno9JuL2+bg2BX5tRkwjyfn6sKbxZTq68suOjgWqCicvPXA==", "cpu": [ "arm64" ], @@ -990,13 +990,13 @@ "freebsd" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/freebsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", - "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.24.0.tgz", + "integrity": "sha512-D3H+xh3/zphoX8ck4S2RxKR6gHlHDXXzOf6f/9dbFt/NRBDIE33+cVa49Kil4WUjxMGW0ZIYBYtaGCa2+OsQwQ==", "cpu": [ "x64" ], @@ -1007,13 +1007,13 @@ "freebsd" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-arm": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", - "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.24.0.tgz", + "integrity": "sha512-gJKIi2IjRo5G6Glxb8d3DzYXlxdEj2NlkixPsqePSZMhLudqPhtZ4BUrpIuTjJYXxvF9njql+vRjB2oaC9XpBw==", "cpu": [ "arm" ], @@ -1024,13 +1024,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", - "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.24.0.tgz", + "integrity": "sha512-TDijPXTOeE3eaMkRYpcy3LarIg13dS9wWHRdwYRnzlwlA370rNdZqbcp0WTyyV/k2zSxfko52+C7jU5F9Tfj1g==", "cpu": [ "arm64" ], @@ -1041,13 +1041,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-ia32": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", - "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.24.0.tgz", + "integrity": "sha512-K40ip1LAcA0byL05TbCQ4yJ4swvnbzHscRmUilrmP9Am7//0UjPreh4lpYzvThT2Quw66MhjG//20mrufm40mA==", "cpu": [ "ia32" ], @@ -1058,13 +1058,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-loong64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", - "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.24.0.tgz", + "integrity": "sha512-0mswrYP/9ai+CU0BzBfPMZ8RVm3RGAN/lmOMgW4aFUSOQBjA31UP8Mr6DDhWSuMwj7jaWOT0p0WoZ6jeHhrD7g==", "cpu": [ "loong64" ], @@ -1075,13 +1075,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-mips64el": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", - "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.24.0.tgz", + "integrity": "sha512-hIKvXm0/3w/5+RDtCJeXqMZGkI2s4oMUGj3/jM0QzhgIASWrGO5/RlzAzm5nNh/awHE0A19h/CvHQe6FaBNrRA==", "cpu": [ "mips64el" ], @@ -1092,13 +1092,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-ppc64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", - "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.24.0.tgz", + "integrity": "sha512-HcZh5BNq0aC52UoocJxaKORfFODWXZxtBaaZNuN3PUX3MoDsChsZqopzi5UupRhPHSEHotoiptqikjN/B77mYQ==", "cpu": [ "ppc64" ], @@ -1109,13 +1109,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-riscv64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", - "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.24.0.tgz", + "integrity": "sha512-bEh7dMn/h3QxeR2KTy1DUszQjUrIHPZKyO6aN1X4BCnhfYhuQqedHaa5MxSQA/06j3GpiIlFGSsy1c7Gf9padw==", "cpu": [ "riscv64" ], @@ -1126,13 +1126,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-s390x": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", - "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.24.0.tgz", + "integrity": "sha512-ZcQ6+qRkw1UcZGPyrCiHHkmBaj9SiCD8Oqd556HldP+QlpUIe2Wgn3ehQGVoPOvZvtHm8HPx+bH20c9pvbkX3g==", "cpu": [ "s390x" ], @@ -1143,13 +1143,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", - "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.24.0.tgz", + "integrity": "sha512-vbutsFqQ+foy3wSSbmjBXXIJ6PL3scghJoM8zCL142cGaZKAdCZHyf+Bpu/MmX9zT9Q0zFBVKb36Ma5Fzfa8xA==", "cpu": [ "x64" ], @@ -1160,13 +1160,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/netbsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", - "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.24.0.tgz", + "integrity": "sha512-hjQ0R/ulkO8fCYFsG0FZoH+pWgTTDreqpqY7UnQntnaKv95uP5iW3+dChxnx7C3trQQU40S+OgWhUVwCjVFLvg==", "cpu": [ "x64" ], @@ -1177,13 +1177,30 @@ "netbsd" ], "engines": { - "node": ">=12" + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.24.0.tgz", + "integrity": "sha512-MD9uzzkPQbYehwcN583yx3Tu5M8EIoTD+tUgKF982WYL9Pf5rKy9ltgD0eUgs8pvKnmizxjXZyLt0z6DC3rRXg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" } }, "node_modules/@esbuild/openbsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", - "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.24.0.tgz", + "integrity": "sha512-4ir0aY1NGUhIC1hdoCzr1+5b43mw99uNwVzhIq1OY3QcEwPDO3B7WNXBzaKY5Nsf1+N11i1eOfFcq+D/gOS15Q==", "cpu": [ "x64" ], @@ -1194,13 +1211,13 @@ "openbsd" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/sunos-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", - "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.24.0.tgz", + "integrity": "sha512-jVzdzsbM5xrotH+W5f1s+JtUy1UWgjU0Cf4wMvffTB8m6wP5/kx0KiaLHlbJO+dMgtxKV8RQ/JvtlFcdZ1zCPA==", "cpu": [ "x64" ], @@ -1211,13 +1228,13 @@ "sunos" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/win32-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", - "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.24.0.tgz", + "integrity": "sha512-iKc8GAslzRpBytO2/aN3d2yb2z8XTVfNV0PjGlCxKo5SgWmNXx82I/Q3aG1tFfS+A2igVCY97TJ8tnYwpUWLCA==", "cpu": [ "arm64" ], @@ -1228,13 +1245,13 @@ "win32" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/win32-ia32": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", - "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.24.0.tgz", + "integrity": "sha512-vQW36KZolfIudCcTnaTpmLQ24Ha1RjygBo39/aLkM2kmjkWmZGEJ5Gn9l5/7tzXA42QGIoWbICfg6KLLkIw6yw==", "cpu": [ "ia32" ], @@ -1245,13 +1262,13 @@ "win32" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/win32-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", - "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.24.0.tgz", + "integrity": "sha512-7IAFPrjSQIJrGsK6flwg7NFmwBoSTyF3rl7If0hNUFQU4ilTsEPL6GuMuU9BfIWVVGuRnuIidkSMC+c0Otu8IA==", "cpu": [ "x64" ], @@ -1262,7 +1279,7 @@ "win32" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@eslint-community/eslint-utils": { @@ -3336,33 +3353,6 @@ "url": "https://opencollective.com/vitest" } }, - "node_modules/@vitest/mocker": { - "version": "2.1.8", - "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-2.1.8.tgz", - "integrity": "sha512-7guJ/47I6uqfttp33mgo6ga5Gr1VnL58rcqYKyShoRK9ebu8T5Rs6HN3s1NABiBeVTdWNrwUMcHH54uXZBN4zA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/spy": "2.1.8", - "estree-walker": "^3.0.3", - "magic-string": "^0.30.12" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "msw": "^2.4.9", - "vite": "^5.0.0" - }, - "peerDependenciesMeta": { - "msw": { - "optional": true - }, - "vite": { - "optional": true - } - } - }, "node_modules/@vitest/pretty-format": { "version": "2.1.8", "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.8.tgz", @@ -4474,9 +4464,9 @@ "license": "MIT" }, "node_modules/esbuild": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", - "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.24.0.tgz", + "integrity": "sha512-FuLPevChGDshgSicjisSooU0cemp/sGXR841D5LHMB7mTVOmsEHcAxaH3irL53+8YDIeVNQEySh4DaYU/iuPqQ==", "dev": true, "hasInstallScript": true, "license": "MIT", @@ -4484,32 +4474,33 @@ "esbuild": "bin/esbuild" }, "engines": { - "node": ">=12" + "node": ">=18" }, "optionalDependencies": { - "@esbuild/aix-ppc64": "0.21.5", - "@esbuild/android-arm": "0.21.5", - "@esbuild/android-arm64": "0.21.5", - "@esbuild/android-x64": "0.21.5", - "@esbuild/darwin-arm64": "0.21.5", - "@esbuild/darwin-x64": "0.21.5", - "@esbuild/freebsd-arm64": "0.21.5", - "@esbuild/freebsd-x64": "0.21.5", - "@esbuild/linux-arm": "0.21.5", - "@esbuild/linux-arm64": "0.21.5", - "@esbuild/linux-ia32": "0.21.5", - "@esbuild/linux-loong64": "0.21.5", - "@esbuild/linux-mips64el": "0.21.5", - "@esbuild/linux-ppc64": "0.21.5", - "@esbuild/linux-riscv64": "0.21.5", - "@esbuild/linux-s390x": "0.21.5", - "@esbuild/linux-x64": "0.21.5", - "@esbuild/netbsd-x64": "0.21.5", - "@esbuild/openbsd-x64": "0.21.5", - "@esbuild/sunos-x64": "0.21.5", - "@esbuild/win32-arm64": "0.21.5", - "@esbuild/win32-ia32": "0.21.5", - "@esbuild/win32-x64": "0.21.5" + "@esbuild/aix-ppc64": "0.24.0", + "@esbuild/android-arm": "0.24.0", + "@esbuild/android-arm64": "0.24.0", + "@esbuild/android-x64": "0.24.0", + "@esbuild/darwin-arm64": "0.24.0", + "@esbuild/darwin-x64": "0.24.0", + "@esbuild/freebsd-arm64": "0.24.0", + "@esbuild/freebsd-x64": "0.24.0", + "@esbuild/linux-arm": "0.24.0", + "@esbuild/linux-arm64": "0.24.0", + "@esbuild/linux-ia32": "0.24.0", + "@esbuild/linux-loong64": "0.24.0", + "@esbuild/linux-mips64el": "0.24.0", + "@esbuild/linux-ppc64": "0.24.0", + "@esbuild/linux-riscv64": "0.24.0", + "@esbuild/linux-s390x": "0.24.0", + "@esbuild/linux-x64": "0.24.0", + "@esbuild/netbsd-x64": "0.24.0", + "@esbuild/openbsd-arm64": "0.24.0", + "@esbuild/openbsd-x64": "0.24.0", + "@esbuild/sunos-x64": "0.24.0", + "@esbuild/win32-arm64": "0.24.0", + "@esbuild/win32-ia32": "0.24.0", + "@esbuild/win32-x64": "0.24.0" } }, "node_modules/escalade": { @@ -7492,9 +7483,10 @@ } }, "node_modules/picocolors": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz", - "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==" + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" }, "node_modules/picomatch": { "version": "2.3.1", @@ -7595,9 +7587,9 @@ } }, "node_modules/postcss": { - "version": "8.4.47", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.47.tgz", - "integrity": "sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ==", + "version": "8.4.49", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.49.tgz", + "integrity": "sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA==", "funding": [ { "type": "opencollective", @@ -7612,9 +7604,10 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { "nanoid": "^3.3.7", - "picocolors": "^1.1.0", + "picocolors": "^1.1.1", "source-map-js": "^1.2.1" }, "engines": { @@ -9156,10 +9149,536 @@ } }, "node_modules/vite": { - "version": "5.4.8", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.8.tgz", - "integrity": "sha512-FqrItQ4DT1NC4zCUqMB4c4AZORMKIa0m8/URVCZ77OZ/QSNeJ54bU1vrFADbDsuwfIPcgknRkmqakQcgnL4GiQ==", + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.0.3.tgz", + "integrity": "sha512-Cmuo5P0ENTN6HxLSo6IHsjCLn/81Vgrp81oaiFFMRa8gGDj5xEjIcEpf2ZymZtZR8oU0P2JX5WuUp/rlXcHkAw==", "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.24.0", + "postcss": "^8.4.49", + "rollup": "^4.23.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "jiti": ">=1.21.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vite-node": { + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.1.8.tgz", + "integrity": "sha512-uPAwSr57kYjAUux+8E2j0q0Fxpn8M9VoyfGiRI8Kfktz9NcYMCenwY5RnZxnF1WTu3TGiYipirIzacLL3VVGFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.3.7", + "es-module-lexer": "^1.5.4", + "pathe": "^1.1.2", + "vite": "^5.0.0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/vite-node/node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vite-node/node_modules/esbuild": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/vite-node/node_modules/vite": { + "version": "5.4.11", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.11.tgz", + "integrity": "sha512-c7jFQRklXua0mTzneGW9QVyxFjUgwcihC4bXEtujIo2ouWCe1Ajt/amn2PCxYnhYfd5k09JX3SB7OYWFKYqj8Q==", + "dev": true, + "license": "MIT", "dependencies": { "esbuild": "^0.21.3", "postcss": "^8.4.43", @@ -9214,29 +9733,6 @@ } } }, - "node_modules/vite-node": { - "version": "2.1.8", - "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.1.8.tgz", - "integrity": "sha512-uPAwSr57kYjAUux+8E2j0q0Fxpn8M9VoyfGiRI8Kfktz9NcYMCenwY5RnZxnF1WTu3TGiYipirIzacLL3VVGFg==", - "dev": true, - "license": "MIT", - "dependencies": { - "cac": "^6.7.14", - "debug": "^4.3.7", - "es-module-lexer": "^1.5.4", - "pathe": "^1.1.2", - "vite": "^5.0.0" - }, - "bin": { - "vite-node": "vite-node.mjs" - }, - "engines": { - "node": "^18.0.0 || >=20.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, "node_modules/vitest": { "version": "2.1.8", "resolved": "https://registry.npmjs.org/vitest/-/vitest-2.1.8.tgz", @@ -9303,6 +9799,523 @@ } } }, + "node_modules/vitest/node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/vitest/node_modules/@vitest/mocker": { + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-2.1.8.tgz", + "integrity": "sha512-7guJ/47I6uqfttp33mgo6ga5Gr1VnL58rcqYKyShoRK9ebu8T5Rs6HN3s1NABiBeVTdWNrwUMcHH54uXZBN4zA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "2.1.8", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.12" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^5.0.0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/vitest/node_modules/esbuild": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/vitest/node_modules/vite": { + "version": "5.4.11", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.11.tgz", + "integrity": "sha512-c7jFQRklXua0mTzneGW9QVyxFjUgwcihC4bXEtujIo2ouWCe1Ajt/amn2PCxYnhYfd5k09JX3SB7OYWFKYqj8Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.21.3", + "postcss": "^8.4.43", + "rollup": "^4.20.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, "node_modules/w3c-keyname": { "version": "2.2.8", "resolved": "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz", diff --git a/web/ui/package.json b/web/ui/package.json index b79c341434..c10a915c1b 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -22,6 +22,6 @@ "prettier": "^3.4.2", "ts-jest": "^29.2.2", "typescript": "^5.6.2", - "vite": "^5.4.8" + "vite": "^6.0.3" } } From d33019eef4e6290f02f78ebc6b64bfbcf75bfc2d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 20:39:46 +0100 Subject: [PATCH 1100/1397] chore(deps): bump @fortawesome/react-fontawesome in /web/ui/react-app (#15623) Bumps [@fortawesome/react-fontawesome](https://github.com/FortAwesome/react-fontawesome) from 0.2.0 to 0.2.2. - [Release notes](https://github.com/FortAwesome/react-fontawesome/releases) - [Changelog](https://github.com/FortAwesome/react-fontawesome/blob/0.2.x/CHANGELOG.md) - [Commits](https://github.com/FortAwesome/react-fontawesome/compare/0.2.0...0.2.2) --- updated-dependencies: - dependency-name: "@fortawesome/react-fontawesome" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 9 +++++---- web/ui/react-app/package.json | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 76a525d114..344e00c15c 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -18,7 +18,7 @@ "@forevolve/bootstrap-dark": "^4.0.2", "@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2", - "@fortawesome/react-fontawesome": "0.2.0", + "@fortawesome/react-fontawesome": "0.2.2", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", @@ -2757,9 +2757,10 @@ } }, "node_modules/@fortawesome/react-fontawesome": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@fortawesome/react-fontawesome/-/react-fontawesome-0.2.0.tgz", - "integrity": "sha512-uHg75Rb/XORTtVt7OS9WoK8uM276Ufi7gCzshVWkUJbHhh3svsUUeqXerrM96Wm7fRiDzfKRwSoahhMIkGAYHw==", + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/@fortawesome/react-fontawesome/-/react-fontawesome-0.2.2.tgz", + "integrity": "sha512-EnkrprPNqI6SXJl//m29hpaNzOp1bruISWaOiRtkMi/xSvHJlzc2j2JAYS7egxt/EbjSNV/k6Xy0AQI6vB2+1g==", + "license": "MIT", "dependencies": { "prop-types": "^15.8.1" }, diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index f0f516e497..c5d7473462 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -13,7 +13,7 @@ "@forevolve/bootstrap-dark": "^4.0.2", "@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2", - "@fortawesome/react-fontawesome": "0.2.0", + "@fortawesome/react-fontawesome": "0.2.2", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", From f4512469c07bc1a97432b0a3772f31efbcb857a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 20:40:09 +0100 Subject: [PATCH 1101/1397] chore(deps): bump downshift from 9.0.6 to 9.0.8 in /web/ui/react-app (#15627) Bumps [downshift](https://github.com/downshift-js/downshift) from 9.0.6 to 9.0.8. - [Release notes](https://github.com/downshift-js/downshift/releases) - [Changelog](https://github.com/downshift-js/downshift/blob/master/CHANGELOG.md) - [Commits](https://github.com/downshift-js/downshift/compare/v9.0.6...v9.0.8) --- updated-dependencies: - dependency-name: downshift dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 8 ++++---- web/ui/react-app/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 344e00c15c..0d210ad287 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -27,7 +27,7 @@ "@prometheus-io/codemirror-promql": "0.55.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", - "downshift": "^9.0.6", + "downshift": "^9.0.8", "http-proxy-middleware": "^3.0.0", "jquery": "^3.7.1", "jquery.flot.tooltip": "^0.9.0", @@ -8542,9 +8542,9 @@ "dev": true }, "node_modules/downshift": { - "version": "9.0.6", - "resolved": "https://registry.npmjs.org/downshift/-/downshift-9.0.6.tgz", - "integrity": "sha512-lkqWh0eb34XuH+3z3/BH/LGVRV7ur0rielSlxtlQKsjAFF/wc/c0wsM9phUGXyzK2g1QWHoNHQyc+vVAheI17Q==", + "version": "9.0.8", + "resolved": "https://registry.npmjs.org/downshift/-/downshift-9.0.8.tgz", + "integrity": "sha512-59BWD7+hSUQIM1DeNPLirNNnZIO9qMdIK5GQ/Uo8q34gT4B78RBlb9dhzgnh0HfQTJj4T/JKYD8KoLAlMWnTsA==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.24.5", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index c5d7473462..6a7caa648a 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -22,7 +22,7 @@ "@prometheus-io/codemirror-promql": "0.55.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", - "downshift": "^9.0.6", + "downshift": "^9.0.8", "http-proxy-middleware": "^3.0.0", "jquery": "^3.7.1", "jquery.flot.tooltip": "^0.9.0", From ee33890de50d4083b3aa5af0265a984bd76af201 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 20:40:30 +0100 Subject: [PATCH 1102/1397] chore(deps): bump @testing-library/react in /web/ui (#15588) Bumps [@testing-library/react](https://github.com/testing-library/react-testing-library) from 16.0.1 to 16.1.0. - [Release notes](https://github.com/testing-library/react-testing-library/releases) - [Changelog](https://github.com/testing-library/react-testing-library/blob/main/CHANGELOG.md) - [Commits](https://github.com/testing-library/react-testing-library/compare/v16.0.1...v16.1.0) --- updated-dependencies: - dependency-name: "@testing-library/react" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index d204d527b3..a5ed81f1bf 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -33,7 +33,7 @@ "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.62.0", "@testing-library/jest-dom": "^6.6.3", - "@testing-library/react": "^16.0.1", + "@testing-library/react": "^16.1.0", "@types/lodash": "^4.17.13", "@types/sanitize-html": "^2.13.0", "@uiw/react-codemirror": "^4.23.6", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index c329352d00..82ec2e1701 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -47,7 +47,7 @@ "@tabler/icons-react": "^3.19.0", "@tanstack/react-query": "^5.62.0", "@testing-library/jest-dom": "^6.6.3", - "@testing-library/react": "^16.0.1", + "@testing-library/react": "^16.1.0", "@types/lodash": "^4.17.13", "@types/sanitize-html": "^2.13.0", "@uiw/react-codemirror": "^4.23.6", @@ -2752,9 +2752,9 @@ "license": "MIT" }, "node_modules/@testing-library/react": { - "version": "16.0.1", - "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.0.1.tgz", - "integrity": "sha512-dSmwJVtJXmku+iocRhWOUFbrERC76TX2Mnf0ATODz8brzAZrMBbzLwQixlBSanZxR6LddK3eiwpSFZgDET1URg==", + "version": "16.1.0", + "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.1.0.tgz", + "integrity": "sha512-Q2ToPvg0KsVL0ohND9A3zLJWcOXXcO8IDu3fj11KhNt0UlCWyFyvnCIBkd12tidB2lkiVRG8VFqdhcqhqnAQtg==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.12.5" @@ -2764,10 +2764,10 @@ }, "peerDependencies": { "@testing-library/dom": "^10.0.0", - "@types/react": "^18.0.0", - "@types/react-dom": "^18.0.0", - "react": "^18.0.0", - "react-dom": "^18.0.0" + "@types/react": "^18.0.0 || ^19.0.0", + "@types/react-dom": "^18.0.0 || ^19.0.0", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" }, "peerDependenciesMeta": { "@types/react": { From 4cbf44e9dd815305d98aa9fe0b072fd50841a842 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 20:40:45 +0100 Subject: [PATCH 1103/1397] chore(deps): bump @floating-ui/dom from 1.6.10 to 1.6.12 in /web/ui (#15618) Signed-off-by: Ben Kochie --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index a5ed81f1bf..01c0fa6d90 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -17,7 +17,7 @@ "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.4.1", "@codemirror/view": "^6.34.1", - "@floating-ui/dom": "^1.6.7", + "@floating-ui/dom": "^1.6.12", "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.1", "@mantine/code-highlight": "^7.15.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 82ec2e1701..c0a635a27d 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -31,7 +31,7 @@ "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.4.1", "@codemirror/view": "^6.34.1", - "@floating-ui/dom": "^1.6.7", + "@floating-ui/dom": "^1.6.12", "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.1", "@mantine/code-highlight": "^7.15.0", @@ -1444,13 +1444,13 @@ } }, "node_modules/@floating-ui/dom": { - "version": "1.6.10", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.10.tgz", - "integrity": "sha512-fskgCFv8J8OamCmyun8MfjB1Olfn+uZKjOKZ0vhYF3gRmEUXcGOjxWL8bBr7i4kIuPZ2KD2S3EUIOxnjC8kl2A==", + "version": "1.6.12", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.12.tgz", + "integrity": "sha512-NP83c0HjokcGVEMeoStg317VD9W7eDlGK7457dMBANbKA6GJZdc7rjujdgqzTaz93jkGgc5P/jeWbaCHnMNc+w==", "license": "MIT", "dependencies": { "@floating-ui/core": "^1.6.0", - "@floating-ui/utils": "^0.2.7" + "@floating-ui/utils": "^0.2.8" } }, "node_modules/@floating-ui/react": { From da53bad3f04c86f10923c547175ab66ec4731f26 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 20:41:20 +0100 Subject: [PATCH 1104/1397] chore(deps): bump @codemirror/commands in /web/ui/react-app (#15612) Signed-off-by: Ben Kochie --- web/ui/react-app/package-lock.json | 8 ++++---- web/ui/react-app/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 0d210ad287..dfa371b6f6 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -9,7 +9,7 @@ "version": "0.300.0", "dependencies": { "@codemirror/autocomplete": "^6.17.0", - "@codemirror/commands": "^6.6.0", + "@codemirror/commands": "^6.7.1", "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.1", "@codemirror/search": "^6.5.8", @@ -2255,9 +2255,9 @@ } }, "node_modules/@codemirror/commands": { - "version": "6.6.0", - "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.6.0.tgz", - "integrity": "sha512-qnY+b7j1UNcTS31Eenuc/5YJB6gQOzkUoNmJQc0rznwqSRpeaWWpjkWy2C/MPTcePpsKJEM26hXrOXl1+nceXg==", + "version": "6.7.1", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.7.1.tgz", + "integrity": "sha512-llTrboQYw5H4THfhN4U3qCnSZ1SOJ60ohhz+SzU0ADGtwlc533DtklQP0vSFaQuCPDn3BPpOd1GbbnUtwNjsrw==", "license": "MIT", "dependencies": { "@codemirror/language": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 6a7caa648a..1af43016bd 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -4,7 +4,7 @@ "private": true, "dependencies": { "@codemirror/autocomplete": "^6.17.0", - "@codemirror/commands": "^6.6.0", + "@codemirror/commands": "^6.7.1", "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.1", "@codemirror/search": "^6.5.8", From f7fec4ddade7505f750c7cce55eb4784b6c17f19 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 21:30:28 +0100 Subject: [PATCH 1105/1397] chore(deps): bump @nexucis/fuzzy in /web/ui/react-app (#15629) Signed-off-by: Ben Kochie --- web/ui/react-app/package-lock.json | 15 +++++++++++---- web/ui/react-app/package.json | 2 +- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index dfa371b6f6..52a9584481 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -22,7 +22,7 @@ "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", - "@nexucis/fuzzy": "^0.4.1", + "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.8.1", "@prometheus-io/codemirror-promql": "0.55.0", "bootstrap": "^4.6.2", @@ -4214,9 +4214,10 @@ "license": "MIT" }, "node_modules/@nexucis/fuzzy": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/@nexucis/fuzzy/-/fuzzy-0.4.1.tgz", - "integrity": "sha512-oe+IW6ELwVGYL3340M+nKIT1exZizOjxdUFlTs36BqzxTENBbynG+cCWr4RNaUQF3bV78NspKwTBpTlnYADrTA==" + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/@nexucis/fuzzy/-/fuzzy-0.5.1.tgz", + "integrity": "sha512-+swL9itqBe1rx5Pr8ihaIS7STOeFI90HpOFF8y/3wo3ryTxKs0Hf4xc+wiA4yi9nrY4wo3VC8HJOxNiekSBE4w==", + "license": "MIT" }, "node_modules/@nexucis/kvsearch": { "version": "0.8.1", @@ -4226,6 +4227,12 @@ "@nexucis/fuzzy": "^0.4.1" } }, + "node_modules/@nexucis/kvsearch/node_modules/@nexucis/fuzzy": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@nexucis/fuzzy/-/fuzzy-0.4.1.tgz", + "integrity": "sha512-oe+IW6ELwVGYL3340M+nKIT1exZizOjxdUFlTs36BqzxTENBbynG+cCWr4RNaUQF3bV78NspKwTBpTlnYADrTA==", + "license": "MIT" + }, "node_modules/@nicolo-ribaudo/eslint-scope-5-internals": { "version": "5.1.1-v1", "resolved": "https://registry.npmjs.org/@nicolo-ribaudo/eslint-scope-5-internals/-/eslint-scope-5-internals-5.1.1-v1.tgz", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 1af43016bd..5d9010d075 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -17,7 +17,7 @@ "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", - "@nexucis/fuzzy": "^0.4.1", + "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.8.1", "@prometheus-io/codemirror-promql": "0.55.0", "bootstrap": "^4.6.2", From b10d5e02fb6e30eb2aa6d0003096f8570a716243 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 21:30:52 +0100 Subject: [PATCH 1106/1397] chore(deps): bump sass from 1.77.6 to 1.82.0 in /web/ui/react-app (#15617) Signed-off-by: Ben Kochie --- web/ui/react-app/package-lock.json | 373 ++++++++++++++++++++++++++++- web/ui/react-app/package.json | 2 +- 2 files changed, 365 insertions(+), 10 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 52a9584481..a8e2b755ef 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -43,7 +43,7 @@ "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", "sanitize-html": "^2.13.0", - "sass": "1.77.6", + "sass": "1.82.0", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" }, @@ -4299,6 +4299,302 @@ "node": ">= 8" } }, + "node_modules/@parcel/watcher": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher/-/watcher-2.5.0.tgz", + "integrity": "sha512-i0GV1yJnm2n3Yq1qw6QrUrd/LI9bE8WEBOTtOkpCXHHdyN3TAGgqAK/DAT05z4fq2x04cARXt2pDmjWjL92iTQ==", + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "dependencies": { + "detect-libc": "^1.0.3", + "is-glob": "^4.0.3", + "micromatch": "^4.0.5", + "node-addon-api": "^7.0.0" + }, + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "@parcel/watcher-android-arm64": "2.5.0", + "@parcel/watcher-darwin-arm64": "2.5.0", + "@parcel/watcher-darwin-x64": "2.5.0", + "@parcel/watcher-freebsd-x64": "2.5.0", + "@parcel/watcher-linux-arm-glibc": "2.5.0", + "@parcel/watcher-linux-arm-musl": "2.5.0", + "@parcel/watcher-linux-arm64-glibc": "2.5.0", + "@parcel/watcher-linux-arm64-musl": "2.5.0", + "@parcel/watcher-linux-x64-glibc": "2.5.0", + "@parcel/watcher-linux-x64-musl": "2.5.0", + "@parcel/watcher-win32-arm64": "2.5.0", + "@parcel/watcher-win32-ia32": "2.5.0", + "@parcel/watcher-win32-x64": "2.5.0" + } + }, + "node_modules/@parcel/watcher-android-arm64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-android-arm64/-/watcher-android-arm64-2.5.0.tgz", + "integrity": "sha512-qlX4eS28bUcQCdribHkg/herLe+0A9RyYC+mm2PXpncit8z5b3nSqGVzMNR3CmtAOgRutiZ02eIJJgP/b1iEFQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-darwin-arm64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.5.0.tgz", + "integrity": "sha512-hyZ3TANnzGfLpRA2s/4U1kbw2ZI4qGxaRJbBH2DCSREFfubMswheh8TeiC1sGZ3z2jUf3s37P0BBlrD3sjVTUw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-darwin-x64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-x64/-/watcher-darwin-x64-2.5.0.tgz", + "integrity": "sha512-9rhlwd78saKf18fT869/poydQK8YqlU26TMiNg7AIu7eBp9adqbJZqmdFOsbZ5cnLp5XvRo9wcFmNHgHdWaGYA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-freebsd-x64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-freebsd-x64/-/watcher-freebsd-x64-2.5.0.tgz", + "integrity": "sha512-syvfhZzyM8kErg3VF0xpV8dixJ+RzbUaaGaeb7uDuz0D3FK97/mZ5AJQ3XNnDsXX7KkFNtyQyFrXZzQIcN49Tw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm-glibc": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-glibc/-/watcher-linux-arm-glibc-2.5.0.tgz", + "integrity": "sha512-0VQY1K35DQET3dVYWpOaPFecqOT9dbuCfzjxoQyif1Wc574t3kOSkKevULddcR9znz1TcklCE7Ht6NIxjvTqLA==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm-musl": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-musl/-/watcher-linux-arm-musl-2.5.0.tgz", + "integrity": "sha512-6uHywSIzz8+vi2lAzFeltnYbdHsDm3iIB57d4g5oaB9vKwjb6N6dRIgZMujw4nm5r6v9/BQH0noq6DzHrqr2pA==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm64-glibc": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-glibc/-/watcher-linux-arm64-glibc-2.5.0.tgz", + "integrity": "sha512-BfNjXwZKxBy4WibDb/LDCriWSKLz+jJRL3cM/DllnHH5QUyoiUNEp3GmL80ZqxeumoADfCCP19+qiYiC8gUBjA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-arm64-musl": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-musl/-/watcher-linux-arm64-musl-2.5.0.tgz", + "integrity": "sha512-S1qARKOphxfiBEkwLUbHjCY9BWPdWnW9j7f7Hb2jPplu8UZ3nes7zpPOW9bkLbHRvWM0WDTsjdOTUgW0xLBN1Q==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-x64-glibc": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-glibc/-/watcher-linux-x64-glibc-2.5.0.tgz", + "integrity": "sha512-d9AOkusyXARkFD66S6zlGXyzx5RvY+chTP9Jp0ypSTC9d4lzyRs9ovGf/80VCxjKddcUvnsGwCHWuF2EoPgWjw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-linux-x64-musl": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-musl/-/watcher-linux-x64-musl-2.5.0.tgz", + "integrity": "sha512-iqOC+GoTDoFyk/VYSFHwjHhYrk8bljW6zOhPuhi5t9ulqiYq1togGJB5e3PwYVFFfeVgc6pbz3JdQyDoBszVaA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-arm64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-arm64/-/watcher-win32-arm64-2.5.0.tgz", + "integrity": "sha512-twtft1d+JRNkM5YbmexfcH/N4znDtjgysFaV9zvZmmJezQsKpkfLYJ+JFV3uygugK6AtIM2oADPkB2AdhBrNig==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-ia32": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-ia32/-/watcher-win32-ia32-2.5.0.tgz", + "integrity": "sha512-+rgpsNRKwo8A53elqbbHXdOMtY/tAtTzManTWShB5Kk54N8Q9mzNWV7tV+IbGueCbcj826MfWGU3mprWtuf1TA==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/@parcel/watcher-win32-x64": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-x64/-/watcher-win32-x64-2.5.0.tgz", + "integrity": "sha512-lPrxve92zEHdgeff3aiu4gDOIt4u7sJYha6wbdEZDCDUhtjTsOMiaJzG5lMY4GkWH8p0fMmO2Ppq5G5XXG+DQw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, "node_modules/@pkgjs/parseargs": { "version": "0.11.0", "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", @@ -6015,6 +6311,7 @@ "version": "3.1.3", "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" @@ -6692,6 +6989,7 @@ "version": "2.3.0", "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, "engines": { "node": ">=8" }, @@ -7107,6 +7405,7 @@ "version": "3.6.0", "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", @@ -8313,6 +8612,19 @@ "npm": "1.2.8000 || >= 1.4.16" } }, + "node_modules/detect-libc": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz", + "integrity": "sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg==", + "license": "Apache-2.0", + "optional": true, + "bin": { + "detect-libc": "bin/detect-libc.js" + }, + "engines": { + "node": ">=0.10" + } + }, "node_modules/detect-newline": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", @@ -10565,6 +10877,7 @@ "version": "5.1.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, "dependencies": { "is-glob": "^4.0.1" }, @@ -11175,9 +11488,10 @@ } }, "node_modules/immutable": { - "version": "4.3.5", - "resolved": "https://registry.npmjs.org/immutable/-/immutable-4.3.5.tgz", - "integrity": "sha512-8eabxkth9gZatlwl5TBuJnCsoTADlL6ftEr7A4qgdaTsPyreilDSnUk57SO+jfKcNtxPa22U5KK6DSeAYhpBJw==" + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/immutable/-/immutable-5.0.3.tgz", + "integrity": "sha512-P8IdPQHq3lA1xVeBRi5VPqUm5HDgKnx0Ru51wZz5mjxHr5n3RWhjIpOFU7ybkUxfB+5IToy+OLaHYDBIWsv+uw==", + "license": "MIT" }, "node_modules/import-fresh": { "version": "3.3.0", @@ -11345,6 +11659,7 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, "dependencies": { "binary-extensions": "^2.0.0" }, @@ -16204,6 +16519,13 @@ "tslib": "^2.0.3" } }, + "node_modules/node-addon-api": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.1.tgz", + "integrity": "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==", + "license": "MIT", + "optional": true + }, "node_modules/node-fetch": { "version": "2.7.0", "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", @@ -16249,6 +16571,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, "engines": { "node": ">=0.10.0" } @@ -20275,6 +20598,7 @@ "version": "3.6.0", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, "dependencies": { "picomatch": "^2.2.1" }, @@ -20875,13 +21199,13 @@ "dev": true }, "node_modules/sass": { - "version": "1.77.6", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.77.6.tgz", - "integrity": "sha512-ByXE1oLD79GVq9Ht1PeHWCPMPB8XHpBuz1r85oByKHjZY6qV6rWnQovQzXJXuQ/XyE1Oj3iPk3lo28uzaRA2/Q==", + "version": "1.82.0", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.82.0.tgz", + "integrity": "sha512-j4GMCTa8elGyN9A7x7bEglx0VgSpNUG4W4wNedQ33wSMdnkqQCT8HTwOaVSV4e6yQovcu/3Oc4coJP/l0xhL2Q==", "license": "MIT", "dependencies": { - "chokidar": ">=3.0.0 <4.0.0", - "immutable": "^4.0.0", + "chokidar": "^4.0.0", + "immutable": "^5.0.2", "source-map-js": ">=0.6.2 <2.0.0" }, "bin": { @@ -20889,6 +21213,9 @@ }, "engines": { "node": ">=14.0.0" + }, + "optionalDependencies": { + "@parcel/watcher": "^2.4.1" } }, "node_modules/sass-loader": { @@ -20929,6 +21256,34 @@ } } }, + "node_modules/sass/node_modules/chokidar": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.1.tgz", + "integrity": "sha512-n8enUVCED/KVRQlab1hr3MVpcVMvxtZjmEa956u+4YijlmQED223XMSYj2tLuKvr4jcCTzNNMpQDUer72MMmzA==", + "license": "MIT", + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/sass/node_modules/readdirp": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.0.2.tgz", + "integrity": "sha512-yDMz9g+VaZkqBYS/ozoBJwaBhTbZo3UNYQHNRw1D3UFQB8oHB4uS/tAODO+ZLjGWmUbKnIlOWO+aaIiAxrUWHA==", + "license": "MIT", + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, "node_modules/sax": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 5d9010d075..7f0198d611 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -38,7 +38,7 @@ "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", "sanitize-html": "^2.13.0", - "sass": "1.77.6", + "sass": "1.82.0", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" }, From 077d4edcc646110573d867abf840ff64c3afe9e2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 21:31:35 +0100 Subject: [PATCH 1107/1397] chore(deps): bump @lezer/common from 1.2.1 to 1.2.3 in /web/ui/react-app (#15626) Bumps [@lezer/common](https://github.com/lezer-parser/common) from 1.2.1 to 1.2.3. - [Changelog](https://github.com/lezer-parser/common/blob/main/CHANGELOG.md) - [Commits](https://github.com/lezer-parser/common/compare/1.2.1...1.2.3) --- updated-dependencies: - dependency-name: "@lezer/common" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 9 +++++---- web/ui/react-app/package.json | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index a8e2b755ef..1f13636232 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -19,7 +19,7 @@ "@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2", "@fortawesome/react-fontawesome": "0.2.2", - "@lezer/common": "^1.2.1", + "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.5.1", @@ -4186,9 +4186,10 @@ "dev": true }, "node_modules/@lezer/common": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.1.tgz", - "integrity": "sha512-yemX0ZD2xS/73llMZIK6KplkjIjf2EvAHcinDi/TfJ9hS25G0388+ClHt6/3but0oOxinTcQHJLDXh6w1crzFQ==" + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.3.tgz", + "integrity": "sha512-w7ojc8ejBqr2REPsWxJjrMFsA/ysDCFICn8zEOR9mrqzOu2amhITYuLD8ag6XZf0CFXDrhKqw7+tW8cX66NaDA==", + "license": "MIT" }, "node_modules/@lezer/highlight": { "version": "1.2.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 7f0198d611..181c0478e7 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -14,7 +14,7 @@ "@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2", "@fortawesome/react-fontawesome": "0.2.2", - "@lezer/common": "^1.2.1", + "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.5.1", From 28e611a2e6010f3da9ab463a8fb6acffdc3402ea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 14:32:43 -0600 Subject: [PATCH 1108/1397] chore(deps): bump http-proxy-middleware in /web/ui/react-app (#15610) Bumps [http-proxy-middleware](https://github.com/chimurai/http-proxy-middleware) from 3.0.0 to 3.0.3. - [Release notes](https://github.com/chimurai/http-proxy-middleware/releases) - [Changelog](https://github.com/chimurai/http-proxy-middleware/blob/master/CHANGELOG.md) - [Commits](https://github.com/chimurai/http-proxy-middleware/compare/v3.0.0...v3.0.3) --- updated-dependencies: - dependency-name: http-proxy-middleware dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 48 ++++++++++++++---------------- web/ui/react-app/package.json | 2 +- 2 files changed, 24 insertions(+), 26 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 1f13636232..d408ec45d6 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -28,7 +28,7 @@ "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.8", - "http-proxy-middleware": "^3.0.0", + "http-proxy-middleware": "^3.0.3", "jquery": "^3.7.1", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.30.1", @@ -5303,9 +5303,10 @@ "dev": true }, "node_modules/@types/http-proxy": { - "version": "1.17.14", - "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.14.tgz", - "integrity": "sha512-SSrD0c1OQzlFX7pGu1eXxSEjemej64aaNPRhhVYUGqXh0BtldAAx37MG8btcumvpgKyZp1F5Gn3JkktdxiFv6w==", + "version": "1.17.15", + "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.15.tgz", + "integrity": "sha512-25g5atgiVNTIv0LBDTg1H74Hvayx0ajtJPLLcYE3whFv75J0pWNtOBzaXJQgDTmrX1bx5U9YC2w/n65BN1HwRQ==", + "license": "MIT", "dependencies": { "@types/node": "*" } @@ -8454,11 +8455,12 @@ } }, "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "license": "MIT", "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -11389,17 +11391,17 @@ } }, "node_modules/http-proxy-middleware": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-3.0.0.tgz", - "integrity": "sha512-36AV1fIaI2cWRzHo+rbcxhe3M3jUDCNzc4D5zRl57sEWRAxdXYtw7FSQKYY6PDKssiAKjLYypbssHk+xs/kMXw==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-3.0.3.tgz", + "integrity": "sha512-usY0HG5nyDUwtqpiZdETNbmKtw3QQ1jwYFZ9wi5iHzX2BcILwQKtYDJPo7XHTsu5Z0B2Hj3W9NNnbd+AjFWjqg==", "license": "MIT", "dependencies": { - "@types/http-proxy": "^1.17.10", - "debug": "^4.3.4", + "@types/http-proxy": "^1.17.15", + "debug": "^4.3.6", "http-proxy": "^1.18.1", - "is-glob": "^4.0.1", - "is-plain-obj": "^3.0.0", - "micromatch": "^4.0.5" + "is-glob": "^4.0.3", + "is-plain-object": "^5.0.0", + "micromatch": "^4.0.8" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" @@ -11891,6 +11893,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", + "dev": true, "engines": { "node": ">=10" }, @@ -16386,9 +16389,10 @@ "dev": true }, "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" }, "node_modules/multicast-dns": { "version": "7.2.5", @@ -21430,12 +21434,6 @@ "node": ">= 0.8" } }, - "node_modules/send/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true - }, "node_modules/serialize-javascript": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 181c0478e7..98a71012f4 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -23,7 +23,7 @@ "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.8", - "http-proxy-middleware": "^3.0.0", + "http-proxy-middleware": "^3.0.3", "jquery": "^3.7.1", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.30.1", From 8df054894468373152b5659ee422c570e53eba79 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 23:14:29 +0100 Subject: [PATCH 1109/1397] chore(deps): bump moment-timezone in /web/ui/react-app (#15641) Bumps [moment-timezone](https://github.com/moment/moment-timezone) from 0.5.45 to 0.5.46. - [Release notes](https://github.com/moment/moment-timezone/releases) - [Changelog](https://github.com/moment/moment-timezone/blob/develop/changelog.md) - [Commits](https://github.com/moment/moment-timezone/compare/0.5.45...0.5.46) --- updated-dependencies: - dependency-name: moment-timezone dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 9 +++++---- web/ui/react-app/package.json | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index d408ec45d6..736e75be8f 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -32,7 +32,7 @@ "jquery": "^3.7.1", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.30.1", - "moment-timezone": "^0.5.45", + "moment-timezone": "^0.5.46", "popper.js": "^1.14.3", "react": "^17.0.2", "react-copy-to-clipboard": "^5.1.0", @@ -16357,9 +16357,10 @@ } }, "node_modules/moment-timezone": { - "version": "0.5.45", - "resolved": "https://registry.npmjs.org/moment-timezone/-/moment-timezone-0.5.45.tgz", - "integrity": "sha512-HIWmqA86KcmCAhnMAN0wuDOARV/525R2+lOLotuGFzn4HO+FH+/645z2wx0Dt3iDv6/p61SIvKnDstISainhLQ==", + "version": "0.5.46", + "resolved": "https://registry.npmjs.org/moment-timezone/-/moment-timezone-0.5.46.tgz", + "integrity": "sha512-ZXm9b36esbe7OmdABqIWJuBBiLLwAjrN7CE+7sYdCCx82Nabt1wHDj8TVseS59QIlfFPbOoiBPm6ca9BioG4hw==", + "license": "MIT", "dependencies": { "moment": "^2.29.4" }, diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 98a71012f4..c82915499a 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -27,7 +27,7 @@ "jquery": "^3.7.1", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.30.1", - "moment-timezone": "^0.5.45", + "moment-timezone": "^0.5.46", "popper.js": "^1.14.3", "react": "^17.0.2", "react-copy-to-clipboard": "^5.1.0", From cc4fcc13289ec59162bb7f39d8c8589e89edec42 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 23:25:20 +0100 Subject: [PATCH 1110/1397] chore(deps-dev): bump nock from 13.5.5 to 13.5.6 in /web/ui (#15642) Bumps [nock](https://github.com/nock/nock) from 13.5.5 to 13.5.6. - [Release notes](https://github.com/nock/nock/releases) - [Changelog](https://github.com/nock/nock/blob/main/CHANGELOG.md) - [Commits](https://github.com/nock/nock/compare/v13.5.5...v13.5.6) --- updated-dependencies: - dependency-name: nock dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 3aee6538a8..726f4f0daf 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -43,7 +43,7 @@ "@lezer/lr": "^1.4.2", "eslint-plugin-prettier": "^5.1.3", "isomorphic-fetch": "^3.0.0", - "nock": "^13.5.4" + "nock": "^13.5.6" }, "peerDependencies": { "@codemirror/autocomplete": "^6.4.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index c0a635a27d..d8f79be9ba 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -164,7 +164,7 @@ "@lezer/lr": "^1.4.2", "eslint-plugin-prettier": "^5.1.3", "isomorphic-fetch": "^3.0.0", - "nock": "^13.5.4" + "nock": "^13.5.6" }, "engines": { "node": ">=12.0.0" @@ -7161,9 +7161,9 @@ "license": "MIT" }, "node_modules/nock": { - "version": "13.5.5", - "resolved": "https://registry.npmjs.org/nock/-/nock-13.5.5.tgz", - "integrity": "sha512-XKYnqUrCwXC8DGG1xX4YH5yNIrlh9c065uaMZZHUoeUUINTOyt+x/G+ezYk0Ft6ExSREVIs+qBJDK503viTfFA==", + "version": "13.5.6", + "resolved": "https://registry.npmjs.org/nock/-/nock-13.5.6.tgz", + "integrity": "sha512-o2zOYiCpzRqSzPj0Zt/dQ/DqZeYoaQ7TUonc/xUPjCGl9WeHpNbxgVvOquXYAaJzI0M9BXV3HTzG0p8IUAbBTQ==", "dev": true, "license": "MIT", "dependencies": { From 00e0cdc4f977728d89f2e8ea1fe4162c107b2dda Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 23:32:13 +0100 Subject: [PATCH 1111/1397] chore(deps): bump @mantine/dates from 7.13.1 to 7.15.0 in /web/ui (#15643) Bumps [@mantine/dates](https://github.com/mantinedev/mantine/tree/HEAD/packages/@mantine/dates) from 7.13.1 to 7.15.0. - [Release notes](https://github.com/mantinedev/mantine/releases) - [Changelog](https://github.com/mantinedev/mantine/blob/master/CHANGELOG.md) - [Commits](https://github.com/mantinedev/mantine/commits/7.15.0/packages/@mantine/dates) --- updated-dependencies: - dependency-name: "@mantine/dates" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 17 +++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 01c0fa6d90..0006b5026e 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -22,7 +22,7 @@ "@lezer/highlight": "^1.2.1", "@mantine/code-highlight": "^7.15.0", "@mantine/core": "^7.11.2", - "@mantine/dates": "^7.13.1", + "@mantine/dates": "^7.15.0", "@mantine/hooks": "^7.11.2", "@mantine/notifications": "^7.13.1", "@microsoft/fetch-event-source": "^2.0.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index d8f79be9ba..72a3c84c05 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -36,7 +36,7 @@ "@lezer/highlight": "^1.2.1", "@mantine/code-highlight": "^7.15.0", "@mantine/core": "^7.11.2", - "@mantine/dates": "^7.13.1", + "@mantine/dates": "^7.15.0", "@mantine/hooks": "^7.11.2", "@mantine/notifications": "^7.13.1", "@microsoft/fetch-event-source": "^2.0.1", @@ -2169,18 +2169,19 @@ } }, "node_modules/@mantine/dates": { - "version": "7.13.1", - "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-7.13.1.tgz", - "integrity": "sha512-KzzAehnftPAiGhJhOaRcWBuQ5+f5HrqnpNjH2/0KN+dv3gUfitAbapXOmCYOTdzS9Zk+RqqsD5VKvsbr1giXtQ==", + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-7.15.0.tgz", + "integrity": "sha512-EM1Tp29DH6i6XH+fPkpNsjUQA32u+4yqOggFtytvuHgDITOj82xh2NIZGSyGH/rzkcy/Bh/t65pK3SoZJGTY6Q==", + "license": "MIT", "dependencies": { "clsx": "^2.1.1" }, "peerDependencies": { - "@mantine/core": "7.13.1", - "@mantine/hooks": "7.13.1", + "@mantine/core": "7.15.0", + "@mantine/hooks": "7.15.0", "dayjs": ">=1.0.0", - "react": "^18.2.0", - "react-dom": "^18.2.0" + "react": "^18.x || ^19.x", + "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/hooks": { From ec95602af9be9fc89508c1c6f969a0c6c8c9fb21 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 23:34:49 +0100 Subject: [PATCH 1112/1397] chore(deps): bump @lezer/highlight in /web/ui/react-app (#15660) Bumps [@lezer/highlight](https://github.com/lezer-parser/highlight) from 1.2.0 to 1.2.1. - [Changelog](https://github.com/lezer-parser/highlight/blob/main/CHANGELOG.md) - [Commits](https://github.com/lezer-parser/highlight/compare/1.2.0...1.2.1) --- updated-dependencies: - dependency-name: "@lezer/highlight" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 9 +++++---- web/ui/react-app/package.json | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 736e75be8f..bb98a37f98 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -20,7 +20,7 @@ "@fortawesome/free-solid-svg-icons": "6.5.2", "@fortawesome/react-fontawesome": "0.2.2", "@lezer/common": "^1.2.3", - "@lezer/highlight": "^1.2.0", + "@lezer/highlight": "^1.2.1", "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.8.1", @@ -4192,9 +4192,10 @@ "license": "MIT" }, "node_modules/@lezer/highlight": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.0.tgz", - "integrity": "sha512-WrS5Mw51sGrpqjlh3d4/fOwpEV2Hd3YOkp9DBt4k8XZQcoTHZFB7sx030A6OcahF4J1nDQAa3jXlTVVYH50IFA==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.1.tgz", + "integrity": "sha512-Z5duk4RN/3zuVO7Jq0pGLJ3qynpxUVsh7IbUbGj88+uV2ApSAn6kWg2au3iJb+0Zi7kKtqffIESgNcRXWZWmSA==", + "license": "MIT", "dependencies": { "@lezer/common": "^1.0.0" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index c82915499a..8ad7df3e49 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -15,7 +15,7 @@ "@fortawesome/free-solid-svg-icons": "6.5.2", "@fortawesome/react-fontawesome": "0.2.2", "@lezer/common": "^1.2.3", - "@lezer/highlight": "^1.2.0", + "@lezer/highlight": "^1.2.1", "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.8.1", From d6a24b2e798a5b6998f8e69484472319d7782abb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:35:21 -0600 Subject: [PATCH 1113/1397] chore(deps): bump @mantine/notifications in /web/ui (#15658) Bumps [@mantine/notifications](https://github.com/mantinedev/mantine/tree/HEAD/packages/@mantine/notifications) from 7.13.1 to 7.15.0. - [Release notes](https://github.com/mantinedev/mantine/releases) - [Changelog](https://github.com/mantinedev/mantine/blob/master/CHANGELOG.md) - [Commits](https://github.com/mantinedev/mantine/commits/7.15.0/packages/@mantine/notifications) --- updated-dependencies: - dependency-name: "@mantine/notifications" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 28 +++++++++++++++------------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 0006b5026e..ce9f22cd4a 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -24,7 +24,7 @@ "@mantine/core": "^7.11.2", "@mantine/dates": "^7.15.0", "@mantine/hooks": "^7.11.2", - "@mantine/notifications": "^7.13.1", + "@mantine/notifications": "^7.15.0", "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 72a3c84c05..0be1155a69 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -38,7 +38,7 @@ "@mantine/core": "^7.11.2", "@mantine/dates": "^7.15.0", "@mantine/hooks": "^7.11.2", - "@mantine/notifications": "^7.13.1", + "@mantine/notifications": "^7.15.0", "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", @@ -2194,26 +2194,28 @@ } }, "node_modules/@mantine/notifications": { - "version": "7.13.1", - "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-7.13.1.tgz", - "integrity": "sha512-Lc66wRar/nqADoaSlLHntREWbMlDDVs/Sabla2ac/V8jftLOnQpVPMefMpFVGYNJdhT3mG/9bguZV5K7pkjSXQ==", + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-7.15.0.tgz", + "integrity": "sha512-F1g2mFRUTk++ATsbsi8T2WaTRhejB05FusvG3iHC4/a1+0K5Vjh2Armt3VNY0vsUR3V5RdAnP8uZDlMhM7YTQQ==", + "license": "MIT", "dependencies": { - "@mantine/store": "7.13.1", + "@mantine/store": "7.15.0", "react-transition-group": "4.4.5" }, "peerDependencies": { - "@mantine/core": "7.13.1", - "@mantine/hooks": "7.13.1", - "react": "^18.2.0", - "react-dom": "^18.2.0" + "@mantine/core": "7.15.0", + "@mantine/hooks": "7.15.0", + "react": "^18.x || ^19.x", + "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/store": { - "version": "7.13.1", - "resolved": "https://registry.npmjs.org/@mantine/store/-/store-7.13.1.tgz", - "integrity": "sha512-/ZiVU8oFMkzSNrXqAvxb9ZfHWgVg7E8apUEQCzBh9sxgxdVoM9Y1+2YqOoi885hxskmPpkmGP+VGOJnQ6OKJig==", + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@mantine/store/-/store-7.15.0.tgz", + "integrity": "sha512-XzQcVFTkD0XspPNsB2NivzbAeZUrLFGO5j8hvKcmGGvUWYlR99GbL7q13ujwJQnNpElqAPSeuN161tnbCqB+Ng==", + "license": "MIT", "peerDependencies": { - "react": "^18.2.0" + "react": "^18.x || ^19.x" } }, "node_modules/@marijn/find-cluster-break": { From bb54eaff97959cf196a0e3106be4c4a79215848d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:36:03 -0600 Subject: [PATCH 1114/1397] chore(deps): bump @nexucis/kvsearch in /web/ui/react-app (#15659) Bumps [@nexucis/kvsearch](https://github.com/Nexucis/kvsearch) from 0.8.1 to 0.9.1. - [Release notes](https://github.com/Nexucis/kvsearch/releases) - [Commits](https://github.com/Nexucis/kvsearch/compare/kvsearch-0.8.1...kvsearch-0.9.1) --- updated-dependencies: - dependency-name: "@nexucis/kvsearch" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 17 ++++++----------- web/ui/react-app/package.json | 2 +- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index bb98a37f98..74b0a8da6a 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -23,7 +23,7 @@ "@lezer/highlight": "^1.2.1", "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.5.1", - "@nexucis/kvsearch": "^0.8.1", + "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.55.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", @@ -4222,19 +4222,14 @@ "license": "MIT" }, "node_modules/@nexucis/kvsearch": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/@nexucis/kvsearch/-/kvsearch-0.8.1.tgz", - "integrity": "sha512-YACEbxIKE+bHn40YYqZFJIb6lGuIeEHt/HvX3capg5mU9QWJqHg5yienqQgy3vZfx4oMBPmgQhTQiPGd2fS7Jg==", + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/@nexucis/kvsearch/-/kvsearch-0.9.1.tgz", + "integrity": "sha512-+nUvULLBTinsvPrui8ZAYj+K0zqQsVxCxKvOlPRIRD5qOGiNkDrbNIFTOxgiAa1jgcPdLpCRMU5nfNZ2As9pIQ==", + "license": "MIT", "dependencies": { - "@nexucis/fuzzy": "^0.4.1" + "@nexucis/fuzzy": "^0.5.1" } }, - "node_modules/@nexucis/kvsearch/node_modules/@nexucis/fuzzy": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/@nexucis/fuzzy/-/fuzzy-0.4.1.tgz", - "integrity": "sha512-oe+IW6ELwVGYL3340M+nKIT1exZizOjxdUFlTs36BqzxTENBbynG+cCWr4RNaUQF3bV78NspKwTBpTlnYADrTA==", - "license": "MIT" - }, "node_modules/@nicolo-ribaudo/eslint-scope-5-internals": { "version": "5.1.1-v1", "resolved": "https://registry.npmjs.org/@nicolo-ribaudo/eslint-scope-5-internals/-/eslint-scope-5-internals-5.1.1-v1.tgz", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 8ad7df3e49..8f8d2dba77 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -18,7 +18,7 @@ "@lezer/highlight": "^1.2.1", "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.5.1", - "@nexucis/kvsearch": "^0.8.1", + "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.55.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", From 032391a5edd800c8945d0ce63d7540d50789bc4b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:37:00 -0600 Subject: [PATCH 1115/1397] chore(deps): bump @codemirror/autocomplete in /web/ui/react-app (#15649) Bumps [@codemirror/autocomplete](https://github.com/codemirror/autocomplete) from 6.17.0 to 6.18.3. - [Changelog](https://github.com/codemirror/autocomplete/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/autocomplete/compare/6.17.0...6.18.3) --- updated-dependencies: - dependency-name: "@codemirror/autocomplete" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 8 ++++---- web/ui/react-app/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 74b0a8da6a..0faa78421b 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -8,7 +8,7 @@ "name": "@prometheus-io/app", "version": "0.300.0", "dependencies": { - "@codemirror/autocomplete": "^6.17.0", + "@codemirror/autocomplete": "^6.18.3", "@codemirror/commands": "^6.7.1", "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.1", @@ -2237,9 +2237,9 @@ "dev": true }, "node_modules/@codemirror/autocomplete": { - "version": "6.17.0", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.17.0.tgz", - "integrity": "sha512-fdfj6e6ZxZf8yrkMHUSJJir7OJkHkZKaOZGzLWIYp2PZ3jd+d+UjG8zVPqJF6d3bKxkhvXTPan/UZ1t7Bqm0gA==", + "version": "6.18.3", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.3.tgz", + "integrity": "sha512-1dNIOmiM0z4BIBwxmxEfA1yoxh1MF/6KPBbh20a5vphGV0ictKlgQsbJs6D6SkR6iJpGbpwRsa6PFMNlg9T9pQ==", "license": "MIT", "dependencies": { "@codemirror/language": "^6.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 8f8d2dba77..f444bfe06e 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -3,7 +3,7 @@ "version": "0.300.0", "private": true, "dependencies": { - "@codemirror/autocomplete": "^6.17.0", + "@codemirror/autocomplete": "^6.18.3", "@codemirror/commands": "^6.7.1", "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.1", From 4b142dc7a11b12b4199be05b767eb63cf9400d82 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:37:16 -0600 Subject: [PATCH 1116/1397] chore(deps-dev): bump typescript from 5.6.2 to 5.7.2 in /web/ui (#15650) Bumps [typescript](https://github.com/microsoft/TypeScript) from 5.6.2 to 5.7.2. - [Release notes](https://github.com/microsoft/TypeScript/releases) - [Changelog](https://github.com/microsoft/TypeScript/blob/main/azure-pipelines.release.yml) - [Commits](https://github.com/microsoft/TypeScript/compare/v5.6.2...v5.7.2) --- updated-dependencies: - dependency-name: typescript dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/package-lock.json | 9 +++++---- web/ui/package.json | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 0be1155a69..a76fb165c7 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -18,7 +18,7 @@ "eslint-config-prettier": "^9.1.0", "prettier": "^3.4.2", "ts-jest": "^29.2.2", - "typescript": "^5.6.2", + "typescript": "^5.7.2", "vite": "^6.0.3" } }, @@ -8941,10 +8941,11 @@ } }, "node_modules/typescript": { - "version": "5.6.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.2.tgz", - "integrity": "sha512-NW8ByodCSNCwZeghjN3o+JX5OFH0Ojg6sadjEKY4huZ52TqbJTJnDo5+Tw98lSy63NZvi4n+ez5m2u5d4PkZyw==", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.2.tgz", + "integrity": "sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg==", "dev": true, + "license": "Apache-2.0", "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" diff --git a/web/ui/package.json b/web/ui/package.json index c10a915c1b..ad531e67a3 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -21,7 +21,7 @@ "eslint-config-prettier": "^9.1.0", "prettier": "^3.4.2", "ts-jest": "^29.2.2", - "typescript": "^5.6.2", + "typescript": "^5.7.2", "vite": "^6.0.3" } } From 9362ba32f52fc8f7d0aae45ec9dadc6bc94e6344 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:38:32 -0600 Subject: [PATCH 1117/1397] chore(deps-dev): bump @types/node in /web/ui/react-app (#15651) Bumps [@types/node](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/node) from 20.14.13 to 22.10.2. - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/node) --- updated-dependencies: - dependency-name: "@types/node" dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 17 +++++++++-------- web/ui/react-app/package.json | 2 +- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 0faa78421b..3d62f5b4ae 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -53,7 +53,7 @@ "@types/flot": "0.0.36", "@types/jest": "^29.5.14", "@types/jquery": "^3.5.32", - "@types/node": "^20.14.9", + "@types/node": "^22.10.2", "@types/react": "^17.0.71", "@types/react-copy-to-clipboard": "^5.0.7", "@types/react-dom": "^17.0.25", @@ -5403,12 +5403,12 @@ "dev": true }, "node_modules/@types/node": { - "version": "20.14.13", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.13.tgz", - "integrity": "sha512-+bHoGiZb8UiQ0+WEtmph2IWQCjIqg8MDZMAV+ppRRhUZnquF5mQkP/9vpSwJClEiSM/C7fZZExPzfU0vJTyp8w==", + "version": "22.10.2", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.10.2.tgz", + "integrity": "sha512-Xxr6BBRCAOQixvonOye19wnzyDiUtTeqldOOmj3CkeblonbccA12PFwlufvRdrpjXxqnmUaeiU5EOA+7s5diUQ==", "license": "MIT", "dependencies": { - "undici-types": "~5.26.4" + "undici-types": "~6.20.0" } }, "node_modules/@types/node-forge": { @@ -23103,9 +23103,10 @@ "dev": true }, "node_modules/undici-types": { - "version": "5.26.5", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", - "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + "version": "6.20.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz", + "integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==", + "license": "MIT" }, "node_modules/unicode-canonical-property-names-ecmascript": { "version": "2.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index f444bfe06e..a1542a9d55 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -70,7 +70,7 @@ "@types/flot": "0.0.36", "@types/jest": "^29.5.14", "@types/jquery": "^3.5.32", - "@types/node": "^20.14.9", + "@types/node": "^22.10.2", "@types/react": "^17.0.71", "@types/react-copy-to-clipboard": "^5.0.7", "@types/react-dom": "^17.0.25", From 5aa80fb1f8dd7e88b4ef4060e72482c9179a2097 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:38:51 -0600 Subject: [PATCH 1118/1397] chore(deps): bump react-router-dom from 7.0.1 to 7.0.2 in /web/ui (#15652) Bumps [react-router-dom](https://github.com/remix-run/react-router/tree/HEAD/packages/react-router-dom) from 7.0.1 to 7.0.2. - [Release notes](https://github.com/remix-run/react-router/releases) - [Changelog](https://github.com/remix-run/react-router/blob/main/packages/react-router-dom/CHANGELOG.md) - [Commits](https://github.com/remix-run/react-router/commits/react-router-dom@7.0.2/packages/react-router-dom) --- updated-dependencies: - dependency-name: react-router-dom dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 28 +++++++++++++++++----------- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index ce9f22cd4a..013df2aa4a 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -44,7 +44,7 @@ "react-dom": "^18.3.1", "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.1.2", - "react-router-dom": "^7.0.1", + "react-router-dom": "^7.0.2", "sanitize-html": "^2.13.1", "uplot": "^1.6.31", "uplot-react": "^1.2.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index a76fb165c7..ca61bba35e 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -58,7 +58,7 @@ "react-dom": "^18.3.1", "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.1.2", - "react-router-dom": "^7.0.1", + "react-router-dom": "^7.0.2", "sanitize-html": "^2.13.1", "uplot": "^1.6.31", "uplot-react": "^1.2.2", @@ -2836,7 +2836,8 @@ "node_modules/@types/cookie": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz", - "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==" + "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==", + "license": "MIT" }, "node_modules/@types/estree": { "version": "1.0.6", @@ -4076,6 +4077,7 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.0.2.tgz", "integrity": "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA==", + "license": "MIT", "engines": { "node": ">=18" } @@ -8021,9 +8023,10 @@ } }, "node_modules/react-router": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.0.1.tgz", - "integrity": "sha512-WVAhv9oWCNsja5AkK6KLpXJDSJCQizOIyOd4vvB/+eHGbYx5vkhcmcmwWjQ9yqkRClogi+xjEg9fNEOd5EX/tw==", + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.0.2.tgz", + "integrity": "sha512-m5AcPfTRUcjwmhBzOJGEl6Y7+Crqyju0+TgTQxoS4SO+BkWbhOrcfZNq6wSWdl2BBbJbsAoBUb8ZacOFT+/JlA==", + "license": "MIT", "dependencies": { "@types/cookie": "^0.6.0", "cookie": "^1.0.1", @@ -8044,11 +8047,12 @@ } }, "node_modules/react-router-dom": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.0.1.tgz", - "integrity": "sha512-duBzwAAiIabhFPZfDjcYpJ+f08TMbPMETgq254GWne2NW1ZwRHhZLj7tpSp8KGb7JvZzlLcjGUnqLxpZQVEPng==", + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.0.2.tgz", + "integrity": "sha512-VJOQ+CDWFDGaWdrG12Nl+d7yHtLaurNgAQZVgaIy7/Xd+DojgmYLosFfZdGz1wpxmjJIAkAMVTKWcvkx1oggAw==", + "license": "MIT", "dependencies": { - "react-router": "7.0.1" + "react-router": "7.0.2" }, "engines": { "node": ">=20.0.0" @@ -8388,7 +8392,8 @@ "node_modules/set-cookie-parser": { "version": "2.7.1", "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.1.tgz", - "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==" + "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==", + "license": "MIT" }, "node_modules/shebang-command": { "version": "2.0.0", @@ -8903,7 +8908,8 @@ "node_modules/turbo-stream": { "version": "2.4.0", "resolved": "https://registry.npmjs.org/turbo-stream/-/turbo-stream-2.4.0.tgz", - "integrity": "sha512-FHncC10WpBd2eOmGwpmQsWLDoK4cqsA/UT/GqNoaKOQnT8uzhtCbg3EoUDMvqpOSAI0S26mr0rkjzbOO6S3v1g==" + "integrity": "sha512-FHncC10WpBd2eOmGwpmQsWLDoK4cqsA/UT/GqNoaKOQnT8uzhtCbg3EoUDMvqpOSAI0S26mr0rkjzbOO6S3v1g==", + "license": "ISC" }, "node_modules/type-check": { "version": "0.4.0", From db36cbcc5e7ea4c5d8f337d6d3b9213b1c004ec2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:46:34 -0600 Subject: [PATCH 1119/1397] chore(deps): bump @tanstack/react-query from 5.62.0 to 5.62.7 in /web/ui (#15654) Bumps [@tanstack/react-query](https://github.com/TanStack/query/tree/HEAD/packages/react-query) from 5.62.0 to 5.62.7. - [Release notes](https://github.com/TanStack/query/releases) - [Commits](https://github.com/TanStack/query/commits/v5.62.7/packages/react-query) --- updated-dependencies: - dependency-name: "@tanstack/react-query" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 013df2aa4a..6709046dd5 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -31,7 +31,7 @@ "@prometheus-io/codemirror-promql": "0.300.0", "@reduxjs/toolkit": "^2.5.0", "@tabler/icons-react": "^3.19.0", - "@tanstack/react-query": "^5.62.0", + "@tanstack/react-query": "^5.62.7", "@testing-library/jest-dom": "^6.6.3", "@testing-library/react": "^16.1.0", "@types/lodash": "^4.17.13", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index ca61bba35e..d3e3ca2867 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -45,7 +45,7 @@ "@prometheus-io/codemirror-promql": "0.300.0", "@reduxjs/toolkit": "^2.5.0", "@tabler/icons-react": "^3.19.0", - "@tanstack/react-query": "^5.62.0", + "@tanstack/react-query": "^5.62.7", "@testing-library/jest-dom": "^6.6.3", "@testing-library/react": "^16.1.0", "@types/lodash": "^4.17.13", @@ -2673,20 +2673,22 @@ } }, "node_modules/@tanstack/query-core": { - "version": "5.62.0", - "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.62.0.tgz", - "integrity": "sha512-sx38bGrqF9bop92AXOvzDr0L9fWDas5zXdPglxa9cuqeVSWS7lY6OnVyl/oodfXjgOGRk79IfCpgVmxrbHuFHg==", + "version": "5.62.7", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.62.7.tgz", + "integrity": "sha512-fgpfmwatsrUal6V+8EC2cxZIQVl9xvL7qYa03gsdsCy985UTUlS4N+/3hCzwR0PclYDqisca2AqR1BVgJGpUDA==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/tannerlinsley" } }, "node_modules/@tanstack/react-query": { - "version": "5.62.0", - "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.62.0.tgz", - "integrity": "sha512-tj2ltjAn2a3fs+Dqonlvs6GyLQ/LKVJE2DVSYW+8pJ3P6/VCVGrfqv5UEchmlP7tLOvvtZcOuSyI2ooVlR5Yqw==", + "version": "5.62.7", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.62.7.tgz", + "integrity": "sha512-+xCtP4UAFDTlRTYyEjLx0sRtWyr5GIk7TZjZwBu4YaNahi3Rt2oMyRqfpfVrtwsqY2sayP4iXVCwmC+ZqqFmuw==", + "license": "MIT", "dependencies": { - "@tanstack/query-core": "5.62.0" + "@tanstack/query-core": "5.62.7" }, "funding": { "type": "github", From 94aa9e944cbcc398f34fa59321a086562e95e33c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 01:08:34 +0100 Subject: [PATCH 1120/1397] chore(deps): bump sanitize-html and @types/sanitize-html (#15663) Bumps [sanitize-html](https://github.com/apostrophecms/sanitize-html) and [@types/sanitize-html](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/sanitize-html). These dependencies needed to be updated together. Updates `sanitize-html` from 2.13.0 to 2.13.1 - [Changelog](https://github.com/apostrophecms/sanitize-html/blob/main/CHANGELOG.md) - [Commits](https://github.com/apostrophecms/sanitize-html/compare/2.13.0...2.13.1) Updates `@types/sanitize-html` from 2.11.0 to 2.13.0 - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/sanitize-html) --- updated-dependencies: - dependency-name: sanitize-html dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: "@types/sanitize-html" dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 18 ++++++++++-------- web/ui/react-app/package.json | 4 ++-- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 3d62f5b4ae..f2d9366ede 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -42,7 +42,7 @@ "react-router-dom": "^5.3.4", "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", - "sanitize-html": "^2.13.0", + "sanitize-html": "^2.13.1", "sass": "1.82.0", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" @@ -58,7 +58,7 @@ "@types/react-copy-to-clipboard": "^5.0.7", "@types/react-dom": "^17.0.25", "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.11.0", + "@types/sanitize-html": "^2.13.0", "@types/sinon": "^17.0.3", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "enzyme": "^3.11.0", @@ -5522,10 +5522,11 @@ "dev": true }, "node_modules/@types/sanitize-html": { - "version": "2.11.0", - "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.11.0.tgz", - "integrity": "sha512-7oxPGNQHXLHE48r/r/qjn7q0hlrs3kL7oZnGj0Wf/h9tj/6ibFyRkNbsDxaBBZ4XUZ0Dx5LGCyDJ04ytSofacQ==", + "version": "2.13.0", + "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.13.0.tgz", + "integrity": "sha512-X31WxbvW9TjIhZZNyNBZ/p5ax4ti7qsNDBDEnH4zAgmEh35YnFD1UiS6z9Cd34kKm0LslFW0KPmTQzu/oGtsqQ==", "dev": true, + "license": "MIT", "dependencies": { "htmlparser2": "^8.0.0" } @@ -21182,9 +21183,10 @@ "dev": true }, "node_modules/sanitize-html": { - "version": "2.13.0", - "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.13.0.tgz", - "integrity": "sha512-Xff91Z+4Mz5QiNSLdLWwjgBDm5b1RU6xBT0+12rapjiaR7SwfRdjw8f+6Rir2MXKLrDicRFHdb51hGOAxmsUIA==", + "version": "2.13.1", + "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.13.1.tgz", + "integrity": "sha512-ZXtKq89oue4RP7abL9wp/9URJcqQNABB5GGJ2acW1sdO8JTVl92f4ygD7Yc9Ze09VAZhnt2zegeU0tbNsdcLYg==", + "license": "MIT", "dependencies": { "deepmerge": "^4.2.2", "escape-string-regexp": "^4.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index a1542a9d55..8c82a216a2 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -37,7 +37,7 @@ "react-router-dom": "^5.3.4", "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", - "sanitize-html": "^2.13.0", + "sanitize-html": "^2.13.1", "sass": "1.82.0", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" @@ -75,7 +75,7 @@ "@types/react-copy-to-clipboard": "^5.0.7", "@types/react-dom": "^17.0.25", "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.11.0", + "@types/sanitize-html": "^2.13.0", "@types/sinon": "^17.0.3", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "enzyme": "^3.11.0", From 463dd3ee82e6369b8486a4017c0d17ce1d82c157 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 01:09:10 +0100 Subject: [PATCH 1121/1397] chore(deps): bump @codemirror/autocomplete in /web/ui (#15656) Bumps [@codemirror/autocomplete](https://github.com/codemirror/autocomplete) from 6.18.1 to 6.18.3. - [Changelog](https://github.com/codemirror/autocomplete/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/autocomplete/compare/6.18.1...6.18.3) --- updated-dependencies: - dependency-name: "@codemirror/autocomplete" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 11 ++++++----- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 6709046dd5..5274e30ef1 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -12,7 +12,7 @@ "test": "vitest" }, "dependencies": { - "@codemirror/autocomplete": "^6.18.1", + "@codemirror/autocomplete": "^6.18.3", "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.4.1", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 726f4f0daf..5136d036c3 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -33,7 +33,7 @@ "lru-cache": "^11.0.2" }, "devDependencies": { - "@codemirror/autocomplete": "^6.18.1", + "@codemirror/autocomplete": "^6.18.3", "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.3.3", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index d3e3ca2867..a8be55640c 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -26,7 +26,7 @@ "name": "@prometheus-io/mantine-ui", "version": "0.300.0", "dependencies": { - "@codemirror/autocomplete": "^6.18.1", + "@codemirror/autocomplete": "^6.18.3", "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.4.1", @@ -154,7 +154,7 @@ "lru-cache": "^11.0.2" }, "devDependencies": { - "@codemirror/autocomplete": "^6.18.1", + "@codemirror/autocomplete": "^6.18.3", "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.3.3", @@ -779,9 +779,10 @@ "peer": true }, "node_modules/@codemirror/autocomplete": { - "version": "6.18.1", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.1.tgz", - "integrity": "sha512-iWHdj/B1ethnHRTwZj+C1obmmuCzquH29EbcKr0qIjA9NfDeBDJ7vs+WOHsFeLeflE4o+dHfYndJloMKHUkWUA==", + "version": "6.18.3", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.3.tgz", + "integrity": "sha512-1dNIOmiM0z4BIBwxmxEfA1yoxh1MF/6KPBbh20a5vphGV0ictKlgQsbJs6D6SkR6iJpGbpwRsa6PFMNlg9T9pQ==", + "license": "MIT", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", From f2326995b9a0a75ec28184bd1cfe44d08256b512 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 18:09:43 -0600 Subject: [PATCH 1122/1397] chore(deps-dev): bump ts-jest from 29.2.2 to 29.2.5 in /web/ui/react-app (#15646) Bumps [ts-jest](https://github.com/kulshekhar/ts-jest) from 29.2.2 to 29.2.5. - [Release notes](https://github.com/kulshekhar/ts-jest/releases) - [Changelog](https://github.com/kulshekhar/ts-jest/blob/main/CHANGELOG.md) - [Commits](https://github.com/kulshekhar/ts-jest/compare/v29.2.2...v29.2.5) --- updated-dependencies: - dependency-name: ts-jest dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 50 +++++++++--------------------- web/ui/react-app/package.json | 2 +- 2 files changed, 16 insertions(+), 36 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index f2d9366ede..7119a0492d 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -72,7 +72,7 @@ "prettier": "^2.8.8", "react-scripts": "^5.0.1", "sinon": "^19.0.2", - "ts-jest": "^29.2.2" + "ts-jest": "^29.2.5" }, "optionalDependencies": { "fsevents": "^2.3.3" @@ -21353,13 +21353,11 @@ } }, "node_modules/semver": { - "version": "7.6.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", - "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", + "version": "7.6.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, + "license": "ISC", "bin": { "semver": "bin/semver.js" }, @@ -21367,24 +21365,6 @@ "node": ">=10" } }, - "node_modules/semver/node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/semver/node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - }, "node_modules/send": { "version": "0.19.0", "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", @@ -22763,21 +22743,21 @@ "dev": true }, "node_modules/ts-jest": { - "version": "29.2.2", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.2.2.tgz", - "integrity": "sha512-sSW7OooaKT34AAngP6k1VS669a0HdLxkQZnlC7T76sckGCokXFnvJ3yRlQZGRTAoV5K19HfSgCiSwWOSIfcYlg==", + "version": "29.2.5", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.2.5.tgz", + "integrity": "sha512-KD8zB2aAZrcKIdGk4OwpJggeLcH1FgrICqDSROWqlnJXGCXK4Mn6FcdK2B6670Xr73lHMG1kHw8R87A0ecZ+vA==", "dev": true, "license": "MIT", "dependencies": { - "bs-logger": "0.x", - "ejs": "^3.0.0", - "fast-json-stable-stringify": "2.x", + "bs-logger": "^0.2.6", + "ejs": "^3.1.10", + "fast-json-stable-stringify": "^2.1.0", "jest-util": "^29.0.0", "json5": "^2.2.3", - "lodash.memoize": "4.x", - "make-error": "1.x", - "semver": "^7.5.3", - "yargs-parser": "^21.0.1" + "lodash.memoize": "^4.1.2", + "make-error": "^1.3.6", + "semver": "^7.6.3", + "yargs-parser": "^21.1.1" }, "bin": { "ts-jest": "cli.js" diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 8c82a216a2..a00b5c75be 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -89,7 +89,7 @@ "prettier": "^2.8.8", "react-scripts": "^5.0.1", "sinon": "^19.0.2", - "ts-jest": "^29.2.2" + "ts-jest": "^29.2.5" }, "jest": { "snapshotSerializers": [ From abddce3b36f2786b2db7c165f793121f472e0e8a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 18:10:58 -0600 Subject: [PATCH 1123/1397] chore(deps): bump @tabler/icons-react from 3.19.0 to 3.24.0 in /web/ui (#15640) Bumps [@tabler/icons-react](https://github.com/tabler/tabler-icons/tree/HEAD/packages/icons-react) from 3.19.0 to 3.24.0. - [Release notes](https://github.com/tabler/tabler-icons/releases) - [Commits](https://github.com/tabler/tabler-icons/commits/v3.24.0/packages/icons-react) --- updated-dependencies: - dependency-name: "@tabler/icons-react" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 5274e30ef1..f9dc894064 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -30,7 +30,7 @@ "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.300.0", "@reduxjs/toolkit": "^2.5.0", - "@tabler/icons-react": "^3.19.0", + "@tabler/icons-react": "^3.24.0", "@tanstack/react-query": "^5.62.7", "@testing-library/jest-dom": "^6.6.3", "@testing-library/react": "^16.1.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index a8be55640c..f3c0a06b2f 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -44,7 +44,7 @@ "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.300.0", "@reduxjs/toolkit": "^2.5.0", - "@tabler/icons-react": "^3.19.0", + "@tabler/icons-react": "^3.24.0", "@tanstack/react-query": "^5.62.7", "@testing-library/jest-dom": "^6.6.3", "@testing-library/react": "^16.1.0", @@ -2650,20 +2650,22 @@ } }, "node_modules/@tabler/icons": { - "version": "3.19.0", - "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-3.19.0.tgz", - "integrity": "sha512-A4WEWqpdbTfnpFEtwXqwAe9qf9sp1yRPvzppqAuwcoF0q5YInqB+JkJtSFToCyBpPVeLxJUxxkapLvt2qQgnag==", + "version": "3.24.0", + "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-3.24.0.tgz", + "integrity": "sha512-qNis9e90QcdxAGV3wNIeX0Ba2R7ktm0cnqOToKHJfC2kj3fvJwEVLsw63K0/fm7NW8rSZjDSTQRmMnSg8g/wrg==", + "license": "MIT", "funding": { "type": "github", "url": "https://github.com/sponsors/codecalm" } }, "node_modules/@tabler/icons-react": { - "version": "3.19.0", - "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-3.19.0.tgz", - "integrity": "sha512-AqEWGI0tQWgqo6ZjMO5yJ9sYT8oXLuAM/up0hN9iENS6IdtNZryKrkNSiMgpwweNTpl8wFFG/dAZ959S91A/uQ==", + "version": "3.24.0", + "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-3.24.0.tgz", + "integrity": "sha512-m9c7TmlcDmKqvZAasG5rv1YvazZDrVEhNdNFa2d1Bzotc0dh+iceFdiZCEcYPDb5UcRyLAMvOaOC9y/5sfMMWw==", + "license": "MIT", "dependencies": { - "@tabler/icons": "3.19.0" + "@tabler/icons": "3.24.0" }, "funding": { "type": "github", From 9f93a2a825abd7c8f50785a3e72f9fc9ca8ba980 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 18:11:43 -0600 Subject: [PATCH 1124/1397] chore(deps): bump react-redux from 9.1.2 to 9.2.0 in /web/ui (#15645) Bumps [react-redux](https://github.com/reduxjs/react-redux) from 9.1.2 to 9.2.0. - [Release notes](https://github.com/reduxjs/react-redux/releases) - [Changelog](https://github.com/reduxjs/react-redux/blob/master/CHANGELOG.md) - [Commits](https://github.com/reduxjs/react-redux/compare/v9.1.2...v9.2.0) --- updated-dependencies: - dependency-name: react-redux dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 30 +++++++++++++++--------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index f9dc894064..4e4b0d1817 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -43,7 +43,7 @@ "react": "^18.3.1", "react-dom": "^18.3.1", "react-infinite-scroll-component": "^6.1.0", - "react-redux": "^9.1.2", + "react-redux": "^9.2.0", "react-router-dom": "^7.0.2", "sanitize-html": "^2.13.1", "uplot": "^1.6.31", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index f3c0a06b2f..88feb75ebf 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -57,7 +57,7 @@ "react": "^18.3.1", "react-dom": "^18.3.1", "react-infinite-scroll-component": "^6.1.0", - "react-redux": "^9.1.2", + "react-redux": "^9.2.0", "react-router-dom": "^7.0.2", "sanitize-html": "^2.13.1", "uplot": "^1.6.31", @@ -3011,9 +3011,9 @@ "license": "MIT" }, "node_modules/@types/use-sync-external-store": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/@types/use-sync-external-store/-/use-sync-external-store-0.0.3.tgz", - "integrity": "sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==", + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/@types/use-sync-external-store/-/use-sync-external-store-0.0.6.tgz", + "integrity": "sha512-zFDAD+tlpf2r4asuHEj0XH6pY6i0g5NeAHPn+15wk3BV6JA69eERFXC1gyGThDkVa1zCyKr5jox1+2LbV/AMLg==", "license": "MIT" }, "node_modules/@types/yargs": { @@ -7950,17 +7950,17 @@ } }, "node_modules/react-redux": { - "version": "9.1.2", - "resolved": "https://registry.npmjs.org/react-redux/-/react-redux-9.1.2.tgz", - "integrity": "sha512-0OA4dhM1W48l3uzmv6B7TXPCGmokUU4p1M44DGN2/D9a1FjVPukVjER1PcPX97jIg6aUeLq1XJo1IpfbgULn0w==", + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/react-redux/-/react-redux-9.2.0.tgz", + "integrity": "sha512-ROY9fvHhwOD9ySfrF0wmvu//bKCQ6AeZZq1nJNtbDC+kk5DuSuNX/n6YWYF/SYy7bSba4D4FSz8DJeKY/S/r+g==", "license": "MIT", "dependencies": { - "@types/use-sync-external-store": "^0.0.3", - "use-sync-external-store": "^1.0.0" + "@types/use-sync-external-store": "^0.0.6", + "use-sync-external-store": "^1.4.0" }, "peerDependencies": { - "@types/react": "^18.2.25", - "react": "^18.0", + "@types/react": "^18.2.25 || ^19", + "react": "^18.0 || ^19", "redux": "^5.0.0" }, "peerDependenciesMeta": { @@ -9132,12 +9132,12 @@ } }, "node_modules/use-sync-external-store": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.2.tgz", - "integrity": "sha512-PElTlVMwpblvbNqQ82d2n6RjStvdSoNe9FG28kNfz3WiXilJm4DdNkEzRhCZuIDwY8U08WVihhGR5iRqAwfDiw==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.4.0.tgz", + "integrity": "sha512-9WXSPC5fMv61vaupRkCKCxsPxBocVnwakBEkMIHHpkTTg6icbJtg6jzgtLDm4bl3cSHAca52rYWih0k4K3PfHw==", "license": "MIT", "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "node_modules/util-deprecate": { From 987b42f44d79930bd9576c695c07bc576d99922f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 21:23:07 -0600 Subject: [PATCH 1125/1397] chore(deps): bump @codemirror/lint in /web/ui/react-app (#15647) Bumps [@codemirror/lint](https://github.com/codemirror/lint) from 6.8.1 to 6.8.4. - [Changelog](https://github.com/codemirror/lint/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/lint/compare/6.8.1...6.8.4) --- updated-dependencies: - dependency-name: "@codemirror/lint" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 10 +++++----- web/ui/react-app/package.json | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 7119a0492d..3004c65391 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -11,7 +11,7 @@ "@codemirror/autocomplete": "^6.18.3", "@codemirror/commands": "^6.7.1", "@codemirror/language": "^6.10.6", - "@codemirror/lint": "^6.8.1", + "@codemirror/lint": "^6.8.4", "@codemirror/search": "^6.5.8", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.35.3", @@ -2281,13 +2281,13 @@ } }, "node_modules/@codemirror/lint": { - "version": "6.8.1", - "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.1.tgz", - "integrity": "sha512-IZ0Y7S4/bpaunwggW2jYqwLuHj0QtESf5xcROewY6+lDNwZ/NzvR4t+vpYgg9m7V8UXLPYqG+lu3DF470E5Oxg==", + "version": "6.8.4", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.4.tgz", + "integrity": "sha512-u4q7PnZlJUojeRe8FJa/njJcMctISGgPQ4PnWsd9268R4ZTtU+tfFYmwkBvgcrK2+QQ8tYFVALVb5fVJykKc5A==", "license": "MIT", "dependencies": { "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.0.0", + "@codemirror/view": "^6.35.0", "crelt": "^1.0.5" } }, diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index a00b5c75be..8a35b5944d 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -6,7 +6,7 @@ "@codemirror/autocomplete": "^6.18.3", "@codemirror/commands": "^6.7.1", "@codemirror/language": "^6.10.6", - "@codemirror/lint": "^6.8.1", + "@codemirror/lint": "^6.8.4", "@codemirror/search": "^6.5.8", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.35.3", From 69102779e525006f5d46c28cebc06ea4c8a4a566 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 04:55:57 +0100 Subject: [PATCH 1126/1397] chore(deps): bump @prometheus-io/codemirror-promql in /web/ui/react-app (#15653) Bumps [@prometheus-io/codemirror-promql](https://github.com/prometheus/prometheus) from 0.55.0 to 0.300.1. - [Release notes](https://github.com/prometheus/prometheus/releases) - [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/prometheus/compare/v0.55.0...v0.300.1) --- updated-dependencies: - dependency-name: "@prometheus-io/codemirror-promql" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 26 +++++++++++++------------- web/ui/react-app/package.json | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 3004c65391..284a55edf3 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -24,7 +24,7 @@ "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.55.0", + "@prometheus-io/codemirror-promql": "0.300.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.8", @@ -4653,12 +4653,12 @@ } }, "node_modules/@prometheus-io/codemirror-promql": { - "version": "0.55.0", - "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.55.0.tgz", - "integrity": "sha512-W+aBBToIvxHbcDsQYJSpgaMtcLUCy3SMIK6jluaEgJrkpOfEJnItZu/rvCC/ehCz2c+h+6WkPJklH8WogsXyEg==", + "version": "0.300.1", + "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.300.1.tgz", + "integrity": "sha512-sr8cDrPgNtZfeRz0s1HPgokkwELcND21rUkwlP7zYmu9U51HrXIB8DD1c9pCfDmq8ifa5WB2CAYXQYdLDCBNww==", "dependencies": { - "@prometheus-io/lezer-promql": "0.55.0", - "lru-cache": "^7.18.3" + "@prometheus-io/lezer-promql": "0.300.1", + "lru-cache": "^11.0.1" }, "engines": { "node": ">=12.0.0" @@ -4673,9 +4673,9 @@ } }, "node_modules/@prometheus-io/lezer-promql": { - "version": "0.55.0", - "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.55.0.tgz", - "integrity": "sha512-DHg6l6pfDnE8eLsj4DyXhFDe7OsqSBw2EnSVG4biddzLsIF5gXKazIswYTGHJ26CGHHiDPcbXjhlm9dEWI2aJA==", + "version": "0.300.1", + "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.300.1.tgz", + "integrity": "sha512-5Xw6AiRnFiat42p39djMwr9BGYUVa8MMElUSVGQ8+y9Okmkw09eVil7NPhzzFtIJ1BSZ6jZhlUNXJ/LbnYEJ5Q==", "peerDependencies": { "@lezer/highlight": "^1.1.2", "@lezer/lr": "^1.2.3" @@ -16055,11 +16055,11 @@ } }, "node_modules/lru-cache": { - "version": "7.18.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", - "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "version": "11.0.2", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.0.2.tgz", + "integrity": "sha512-123qHRfJBmo2jXDbo/a5YOQrJoHF/GNQTLzQ5+IdK5pWpceK17yRc6ozlWd25FxvGKQbIUs91fDFkXmDHTKcyA==", "engines": { - "node": ">=12" + "node": "20 || >=22" } }, "node_modules/magic-string": { diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 8a35b5944d..928ae33ee1 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -19,7 +19,7 @@ "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.55.0", + "@prometheus-io/codemirror-promql": "0.300.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.8", From 3c6700bade37841768b642e1a038ec25abaee851 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 05:04:57 +0100 Subject: [PATCH 1127/1397] chore(deps): bump golang.org/x/tools from 0.27.0 to 0.28.0 (#15593) Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.27.0 to 0.28.0. - [Release notes](https://github.com/golang/tools/releases) - [Commits](https://github.com/golang/tools/compare/v0.27.0...v0.28.0) --- updated-dependencies: - dependency-name: golang.org/x/tools dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 14 +++++++------- go.sum | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index 5e94985788..ee900756e8 100644 --- a/go.mod +++ b/go.mod @@ -75,10 +75,10 @@ require ( go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 golang.org/x/oauth2 v0.24.0 - golang.org/x/sync v0.9.0 - golang.org/x/sys v0.27.0 - golang.org/x/text v0.20.0 - golang.org/x/tools v0.27.0 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.28.0 + golang.org/x/text v0.21.0 + golang.org/x/tools v0.28.0 google.golang.org/api v0.209.0 google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 google.golang.org/grpc v1.68.1 @@ -187,11 +187,11 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/metric v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.29.0 // indirect + golang.org/x/crypto v0.30.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.31.0 // indirect - golang.org/x/term v0.26.0 // indirect + golang.org/x/net v0.32.0 // indirect + golang.org/x/term v0.27.0 // indirect golang.org/x/time v0.8.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241113202542-65e8d215514f // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect diff --git a/go.sum b/go.sum index dc6e4f0427..2a40f9a9ac 100644 --- a/go.sum +++ b/go.sum @@ -541,8 +541,8 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= @@ -570,8 +570,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= @@ -583,8 +583,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -615,17 +615,17 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -638,8 +638,8 @@ golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= -golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From ceeab58e7d355ffa314671ff14957742298e499e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 04:06:30 +0000 Subject: [PATCH 1128/1397] chore(deps): bump github.com/prometheus/common from 0.60.1 to 0.61.0 Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.60.1 to 0.61.0. - [Release notes](https://github.com/prometheus/common/releases) - [Changelog](https://github.com/prometheus/common/blob/main/RELEASE.md) - [Commits](https://github.com/prometheus/common/compare/v0.60.1...v0.61.0) --- updated-dependencies: - dependency-name: github.com/prometheus/common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ee900756e8..6c15a6690b 100644 --- a/go.mod +++ b/go.mod @@ -52,7 +52,7 @@ require ( github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.60.1 + github.com/prometheus/common v0.61.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/exporter-toolkit v0.13.1 github.com/prometheus/sigv4 v0.1.0 diff --git a/go.sum b/go.sum index 2a40f9a9ac..0a62fa626c 100644 --- a/go.sum +++ b/go.sum @@ -438,8 +438,8 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/exporter-toolkit v0.13.1 h1:Evsh0gWQo2bdOHlnz9+0Nm7/OFfIwhE2Ws4A2jIlR04= From d64d1c4c0a1a2c2fb4d07ba6e8244e36d827214a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 12 Dec 2024 10:59:06 +0100 Subject: [PATCH 1129/1397] Benchmark encoding classic and nhcb MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- tsdb/record/record_test.go | 140 +++++++++++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index e26f964072..e64182c6fb 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -15,13 +15,16 @@ package record import ( + "fmt" "math/rand" "testing" "github.com/stretchr/testify/require" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/util/testutil" @@ -464,3 +467,140 @@ func TestRecord_MetadataDecodeUnknownExtraFields(t *testing.T) { require.NoError(t, err) require.Equal(t, expectedMetadata, decMetadata) } + +type recordsMaker struct { + name string + init func(int, int, int) +} + +// BenchmarkWAL_HistogramLog measures efficiency of encoding classic +// histograms and native historgrams with custom buckets (NHCB). +func BenchmarkWAL_HistogramEncoding(b *testing.B) { + // Cache for the refs. + var series []RefSeries + var samples []RefSample + var nhcbs []RefHistogramSample + + resetCache := func() { + series = nil + samples = nil + nhcbs = nil + } + + initClassicRefs := func(labelCount, histograms, buckets int) { + ref := chunks.HeadSeriesRef(0) + lbls := map[string]string{} + for i := range labelCount { + lbls[fmt.Sprintf("l%d", i)] = fmt.Sprintf("v%d", i) + } + for i := range histograms { + lbls[model.MetricNameLabel] = fmt.Sprintf("series_%d_count", i) + series = append(series, RefSeries{ + Ref: ref, + Labels: labels.FromMap(lbls), + }) + samples = append(samples, RefSample{ + Ref: ref, + T: 100, + V: float64(i), + }) + ref++ + + lbls[model.MetricNameLabel] = fmt.Sprintf("series_%d_sum", i) + series = append(series, RefSeries{ + Ref: ref, + Labels: labels.FromMap(lbls), + }) + samples = append(samples, RefSample{ + Ref: ref, + T: 100, + V: float64(i), + }) + ref++ + + if buckets == 0 { + continue + } + lbls[model.MetricNameLabel] = fmt.Sprintf("series_%d_bucket", i) + for j := range buckets { + lbls[model.BucketLabel] = fmt.Sprintf("%d", j) + series = append(series, RefSeries{ + Ref: ref, + Labels: labels.FromMap(lbls), + }) + samples = append(samples, RefSample{ + Ref: ref, + T: 100, + V: float64(i + j), + }) + ref++ + } + delete(lbls, model.BucketLabel) + } + } + + initNHCBRefs := func(labelCount, histograms, buckets int) { + ref := chunks.HeadSeriesRef(0) + lbls := map[string]string{} + for i := range labelCount { + lbls[fmt.Sprintf("l%d", i)] = fmt.Sprintf("v%d", i) + } + for i := range histograms { + lbls[model.MetricNameLabel] = fmt.Sprintf("series_%d", i) + series = append(series, RefSeries{ + Ref: ref, + Labels: labels.FromMap(lbls), + }) + h := &histogram.Histogram{ + Schema: histogram.CustomBucketsSchema, + Count: uint64(i), + Sum: float64(i), + PositiveSpans: []histogram.Span{{Length: uint32(buckets)}}, + PositiveBuckets: make([]int64, buckets+1), + CustomValues: make([]float64, buckets), + } + for j := range buckets { + h.PositiveBuckets[j] = int64(i + j) + } + nhcbs = append(nhcbs, RefHistogramSample{ + Ref: ref, + T: 100, + H: h, + }) + ref++ + } + } + + for _, maker := range []recordsMaker{ + { + name: "classic", + init: initClassicRefs, + }, + { + name: "nhcb", + init: initNHCBRefs, + }, + } { + for _, labelCount := range []int{0, 10, 50} { + for _, histograms := range []int{10, 100, 1000} { + for _, buckets := range []int{0, 1, 10, 100} { + b.Run(fmt.Sprintf("%s labels=%d histograms=%d buckets=%d", maker.name, labelCount, histograms, buckets), func(b *testing.B) { + resetCache() + maker.init(labelCount, histograms, buckets) + enc := Encoder{} + for range b.N { + var buf []byte + enc.Series(series, buf) + enc.Samples(samples, buf) + var leftOver []RefHistogramSample + _, leftOver = enc.HistogramSamples(nhcbs, buf) + if len(leftOver) > 0 { + enc.CustomBucketsHistogramSamples(leftOver, buf) + } + } + }) + } + } + } + } +} From aa9fef75acbe261729008a8a1c45440e2f968f74 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Thu, 12 Dec 2024 10:06:16 +0000 Subject: [PATCH 1130/1397] Apply suggestions from code review Co-authored-by: Arthur Silva Sens Co-authored-by: Ayoub Mrini Signed-off-by: Bartlomiej Plotka --- config/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config/config.go b/config/config.go index 551bbd5127..5d49287899 100644 --- a/config/config.go +++ b/config/config.go @@ -310,10 +310,10 @@ func (c Config) String() string { // the ones from the scrape_config_files. // This method does not write to config, and it's concurrency safe (the pointer receiver is for efficiency). // This method also assumes the Config was created by Load, due to races, -// read mode https://github.com/prometheus/prometheus/issues/15538. +// read more https://github.com/prometheus/prometheus/issues/15538. func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { if !c.loaded { - return nil, errors.New("main config scrape configs was not validated and loaded; GetScrapeConfigs method can only be used on configuration from the config.Load or config.LoadFile") + return nil, errors.New("config was not validated and loaded; GetScrapeConfigs method can only be used on configuration from the config.Load or config.LoadFile") } scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs)) From 134a0c7d7843775ec1300ef1b0031320ff891770 Mon Sep 17 00:00:00 2001 From: bwplotka Date: Thu, 12 Dec 2024 10:57:27 +0000 Subject: [PATCH 1131/1397] Improved error. Signed-off-by: bwplotka --- config/config.go | 6 ++++-- config/config_test.go | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/config/config.go b/config/config.go index 5d49287899..83a4f5860d 100644 --- a/config/config.go +++ b/config/config.go @@ -309,11 +309,13 @@ func (c Config) String() string { // GetScrapeConfigs returns the read-only, validated scrape configurations including // the ones from the scrape_config_files. // This method does not write to config, and it's concurrency safe (the pointer receiver is for efficiency). -// This method also assumes the Config was created by Load, due to races, +// This method also assumes the Config was created by Load or LoadFile function, it returns error +// if it was not. We can't re-validate or apply globals here due to races, // read more https://github.com/prometheus/prometheus/issues/15538. func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { if !c.loaded { - return nil, errors.New("config was not validated and loaded; GetScrapeConfigs method can only be used on configuration from the config.Load or config.LoadFile") + // Programmatic error, we warn before more confusing errors would happen due to lack of the globalization. + return nil, errors.New("scrape config cannot be fetched, main config was not validated and loaded correctly; should not happen") } scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs)) diff --git a/config/config_test.go b/config/config_test.go index 0b1feea8b1..437b858b00 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -2557,7 +2557,7 @@ func TestGetScrapeConfigs_Loaded(t *testing.T) { t.Run("without load", func(t *testing.T) { c := &Config{} _, err := c.GetScrapeConfigs() - require.EqualError(t, err, "main config scrape configs was not validated and loaded; GetScrapeConfigs method can only be used on configuration from the config.Load or config.LoadFile") + require.EqualError(t, err, "scrape config cannot be fetched, main config was not validated and loaded correctly; should not happen") }) t.Run("with load", func(t *testing.T) { c, err := Load("", promslog.NewNopLogger()) From fdb1516af1c671457bc88687370b95911d4915a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 12 Dec 2024 12:47:43 +0100 Subject: [PATCH 1132/1397] Fix lint errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- tsdb/record/record_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index e64182c6fb..a035a45fc2 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -17,11 +17,13 @@ package record import ( "fmt" "math/rand" + "strconv" "testing" "github.com/stretchr/testify/require" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunks" @@ -523,7 +525,7 @@ func BenchmarkWAL_HistogramEncoding(b *testing.B) { } lbls[model.MetricNameLabel] = fmt.Sprintf("series_%d_bucket", i) for j := range buckets { - lbls[model.BucketLabel] = fmt.Sprintf("%d", j) + lbls[model.BucketLabel] = fmt.Sprintf("%g", j) series = append(series, RefSeries{ Ref: ref, Labels: labels.FromMap(lbls), From cf36792e14736ecdc0b9f81635c18b54c331e306 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 12 Dec 2024 12:49:28 +0100 Subject: [PATCH 1133/1397] Fix unused import MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- tsdb/record/record_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index a035a45fc2..7cab35bd43 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -17,7 +17,6 @@ package record import ( "fmt" "math/rand" - "strconv" "testing" "github.com/stretchr/testify/require" From df88de5800b57f615cdbf7010d02bc4cae4098e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 12 Dec 2024 12:52:01 +0100 Subject: [PATCH 1134/1397] Fix lint for real MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- tsdb/record/record_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index 7cab35bd43..6e9c6e483c 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -524,7 +524,7 @@ func BenchmarkWAL_HistogramEncoding(b *testing.B) { } lbls[model.MetricNameLabel] = fmt.Sprintf("series_%d_bucket", i) for j := range buckets { - lbls[model.BucketLabel] = fmt.Sprintf("%g", j) + lbls[model.BucketLabel] = fmt.Sprintf("%d.0", j) series = append(series, RefSeries{ Ref: ref, Labels: labels.FromMap(lbls), From 7d0f6b7e6d72049f39dfa0dd36eaf941d92413ac Mon Sep 17 00:00:00 2001 From: Jeremiah Harbach Date: Thu, 12 Dec 2024 06:57:08 -0600 Subject: [PATCH 1135/1397] Add specific Azure permissions required for Azure SD (#15549) Add specific Azure permissions required for Azure SD Co-authored-by: Ayoub Mrini Signed-off-by: Jeremiah Harbach --------- Signed-off-by: Jeremiah Harbach Co-authored-by: Ayoub Mrini --- docs/configuration/configuration.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 746360633e..f76a8bfbb9 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -676,6 +676,13 @@ http_headers: Azure SD configurations allow retrieving scrape targets from Azure VMs. +The discovery requires at least the following permissions: + +* `Microsoft.Compute/virtualMachines/read`: Required for VM discovery +* `Microsoft.Network/networkInterfaces/read`: Required for VM discovery +* `Microsoft.Compute/virtualMachineScaleSets/virtualMachines/read`: Required for scale set (VMSS) discovery +* `Microsoft.Compute/virtualMachineScaleSets/virtualMachines/networkInterfaces/read`: Required for scale set (VMSS) discovery + The following meta labels are available on targets during [relabeling](#relabel_config): * `__meta_azure_machine_id`: the machine ID From ceb2f653ba550f1d3dae4ec03f786391ff96769e Mon Sep 17 00:00:00 2001 From: Anand Rajagopal Date: Fri, 13 Dec 2024 01:19:40 +0000 Subject: [PATCH 1136/1397] Add an option to restore new rule groups added to existing rule manager Signed-off-by: Anand Rajagopal --- rules/fixtures/alert_rule.yaml | 6 ++ rules/fixtures/alert_rule1.yaml | 6 ++ rules/manager.go | 32 ++++---- rules/manager_test.go | 133 ++++++++++++++++++++++++++++++++ 4 files changed, 164 insertions(+), 13 deletions(-) create mode 100644 rules/fixtures/alert_rule.yaml create mode 100644 rules/fixtures/alert_rule1.yaml diff --git a/rules/fixtures/alert_rule.yaml b/rules/fixtures/alert_rule.yaml new file mode 100644 index 0000000000..0b6d69dafe --- /dev/null +++ b/rules/fixtures/alert_rule.yaml @@ -0,0 +1,6 @@ +groups: + - name: test + interval: 1s + rules: + - alert: rule1 + expr: 1 < bool 2 diff --git a/rules/fixtures/alert_rule1.yaml b/rules/fixtures/alert_rule1.yaml new file mode 100644 index 0000000000..306ff41b6e --- /dev/null +++ b/rules/fixtures/alert_rule1.yaml @@ -0,0 +1,6 @@ +groups: + - name: test2 + interval: 1s + rules: + - alert: rule2 + expr: 1 < bool 2 diff --git a/rules/manager.go b/rules/manager.go index 6e9bf64691..9bdb66f1b0 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -89,12 +89,13 @@ func DefaultEvalIterationFunc(ctx context.Context, g *Group, evalTimestamp time. // The Manager manages recording and alerting rules. type Manager struct { - opts *ManagerOptions - groups map[string]*Group - mtx sync.RWMutex - block chan struct{} - done chan struct{} - restored bool + opts *ManagerOptions + groups map[string]*Group + mtx sync.RWMutex + block chan struct{} + done chan struct{} + restored bool + restoreNewRuleGroups bool logger *slog.Logger } @@ -121,6 +122,10 @@ type ManagerOptions struct { ConcurrentEvalsEnabled bool RuleConcurrencyController RuleConcurrencyController RuleDependencyController RuleDependencyController + // At present, manager only restores `for` state when manager is newly created which happens + // during restarts. This flag provides an option to restore the `for` state when new rule groups are + // added to an existing manager + RestoreNewRuleGroups bool Metrics *Metrics } @@ -153,11 +158,12 @@ func NewManager(o *ManagerOptions) *Manager { } m := &Manager{ - groups: map[string]*Group{}, - opts: o, - block: make(chan struct{}), - done: make(chan struct{}), - logger: o.Logger, + groups: map[string]*Group{}, + opts: o, + block: make(chan struct{}), + done: make(chan struct{}), + logger: o.Logger, + restoreNewRuleGroups: o.RestoreNewRuleGroups, } return m @@ -295,7 +301,7 @@ func (m *Manager) LoadGroups( ) (map[string]*Group, []error) { groups := make(map[string]*Group) - shouldRestore := !m.restored + shouldRestore := !m.restored || m.restoreNewRuleGroups for _, fn := range filenames { rgs, errs := m.opts.GroupLoader.Load(fn) @@ -328,7 +334,7 @@ func (m *Manager) LoadGroups( labels.FromMap(r.Annotations), externalLabels, externalURL, - m.restored, + !shouldRestore, m.logger.With("alert", r.Alert), )) continue diff --git a/rules/manager_test.go b/rules/manager_test.go index 6afac993d8..bbc0a6023e 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -2112,6 +2112,139 @@ func TestAsyncRuleEvaluation(t *testing.T) { }) } +func TestNewRuleGroupRestoration(t *testing.T) { + store := teststorage.New(t) + t.Cleanup(func() { store.Close() }) + var ( + inflightQueries atomic.Int32 + maxInflight atomic.Int32 + maxConcurrency int64 + interval = 60 * time.Second + ) + + waitForEvaluations := func(t *testing.T, ch <-chan int32, targetCount int32) { + for { + select { + case cnt := <-ch: + if cnt == targetCount { + return + } + case <-time.After(5 * time.Second): + return + } + } + } + + files := []string{"fixtures/alert_rule.yaml"} + + option := optsFactory(store, &maxInflight, &inflightQueries, maxConcurrency) + option.Queryable = store + option.Appendable = store + option.NotifyFunc = func(ctx context.Context, expr string, alerts ...*Alert) {} + + var evalCount atomic.Int32 + ch := make(chan int32) + noopEvalIterFunc := func(ctx context.Context, g *Group, evalTimestamp time.Time) { + evalCount.Inc() + ch <- evalCount.Load() + } + + ruleManager := NewManager(option) + go ruleManager.Run() + err := ruleManager.Update(interval, files, labels.EmptyLabels(), "", noopEvalIterFunc) + require.NoError(t, err) + + waitForEvaluations(t, ch, 3) + require.Equal(t, int32(3), evalCount.Load()) + ruleGroups := make(map[string]struct{}) + for _, group := range ruleManager.groups { + ruleGroups[group.Name()] = struct{}{} + require.False(t, group.shouldRestore) + for _, rule := range group.rules { + require.True(t, rule.(*AlertingRule).restored.Load()) + } + } + + files = append(files, "fixtures/alert_rule1.yaml") + err = ruleManager.Update(interval, files, labels.EmptyLabels(), "", nil) + require.NoError(t, err) + ruleManager.Stop() + for _, group := range ruleManager.groups { + // new rule groups added to existing manager will not be restored + require.False(t, group.shouldRestore) + } +} + +func TestNewRuleGroupRestorationWithRestoreNewGroupOption(t *testing.T) { + store := teststorage.New(t) + t.Cleanup(func() { store.Close() }) + var ( + inflightQueries atomic.Int32 + maxInflight atomic.Int32 + maxConcurrency int64 + interval = 60 * time.Second + ) + + waitForEvaluations := func(t *testing.T, ch <-chan int32, targetCount int32) { + for { + select { + case cnt := <-ch: + if cnt == targetCount { + return + } + case <-time.After(5 * time.Second): + return + } + } + } + + files := []string{"fixtures/alert_rule.yaml"} + + option := optsFactory(store, &maxInflight, &inflightQueries, maxConcurrency) + option.Queryable = store + option.Appendable = store + option.RestoreNewRuleGroups = true + option.NotifyFunc = func(ctx context.Context, expr string, alerts ...*Alert) {} + + var evalCount atomic.Int32 + ch := make(chan int32) + noopEvalIterFunc := func(ctx context.Context, g *Group, evalTimestamp time.Time) { + evalCount.Inc() + ch <- evalCount.Load() + } + + ruleManager := NewManager(option) + go ruleManager.Run() + err := ruleManager.Update(interval, files, labels.EmptyLabels(), "", noopEvalIterFunc) + require.NoError(t, err) + + waitForEvaluations(t, ch, 3) + require.Equal(t, int32(3), evalCount.Load()) + ruleGroups := make(map[string]struct{}) + for _, group := range ruleManager.groups { + ruleGroups[group.Name()] = struct{}{} + require.False(t, group.shouldRestore) + for _, rule := range group.rules { + require.True(t, rule.(*AlertingRule).restored.Load()) + } + } + + files = append(files, "fixtures/alert_rule1.yaml") + err = ruleManager.Update(interval, files, labels.EmptyLabels(), "", nil) + require.NoError(t, err) + // stop eval + ruleManager.Stop() + for _, group := range ruleManager.groups { + if _, OK := ruleGroups[group.Name()]; OK { + // already restored + require.False(t, group.shouldRestore) + continue + } + // new rule groups added to existing manager will be restored + require.True(t, group.shouldRestore) + } +} + func TestBoundedRuleEvalConcurrency(t *testing.T) { storage := teststorage.New(t) t.Cleanup(func() { storage.Close() }) From 9009724c5b68d917b68b6c8801893ec2dc21522f Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Fri, 13 Dec 2024 18:04:55 +0530 Subject: [PATCH 1137/1397] [BUGFIX] PromQL: Adds test for `absent`, `absent_over_time` and `deriv` func with histograms (#15667) Add test for `absent`, `absent_over_time` and `deriv` func with histograms Signed-off-by: Neeraj Gartia --------- Signed-off-by: Neeraj Gartia --- promql/promqltest/testdata/functions.test | 29 +++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 2ed7ffb6a4..a00ed8a3ea 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -256,6 +256,9 @@ clear load 5m testcounter_reset_middle_total 0+10x4 0+10x5 http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10 + testcounter_reset_middle_mix 0+10x4 0+10x5 {{schema:0 sum:1 count:1}} {{schema:1 sum:2 count:2}} + http_requests_mix{job="app-server", instance="1", group="canary"} 0+80x10 {{schema:0 sum:1 count:1}} + http_requests_histogram{job="app-server", instance="1", group="canary"} {{schema:0 sum:1 count:2}}x10 # deriv should return the same as rate in simple cases. eval instant at 50m rate(http_requests_total{group="canary", instance="1", job="app-server"}[50m]) @@ -268,6 +271,16 @@ eval instant at 50m deriv(http_requests_total{group="canary", instance="1", job= eval instant at 50m deriv(testcounter_reset_middle_total[100m]) {} 0.010606060606060607 +# deriv should ignore histograms. +eval instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m]) + {group="canary", instance="1", job="app-server"} 0.26666666666666666 + +eval instant at 100m deriv(testcounter_reset_middle_mix[110m]) + {} 0.010606060606060607 + +eval instant at 50m deriv(http_requests_histogram[60m]) + #empty + # predict_linear should return correct result. # X/s = [ 0, 300, 600, 900,1200,1500,1800,2100,2400,2700,3000] # Y = [ 0, 10, 20, 30, 40, 0, 10, 20, 30, 40, 50] @@ -1110,11 +1123,16 @@ clear # Don't return anything when there's something there. load 5m http_requests{job="api-server", instance="0", group="production"} 0+10x10 + http_requests_histogram{job="api-server", instance="0", group="production"} {{schema:0 sum:1 count:1}}x11 eval instant at 50m absent(http_requests) eval instant at 50m absent(sum(http_requests)) +eval instant at 50m absent(http_requests_histogram) + +eval instant at 50m absent(sum(http_requests_histogram)) + clear eval instant at 50m absent(sum(nonexistent{job="testjob", instance="testinstance"})) @@ -1162,6 +1180,7 @@ load 1m httpd_handshake_failures_total{instance="127.0.0.1",job="node"} 1+1x15 httpd_log_lines_total{instance="127.0.0.1",job="node"} 1 ssl_certificate_expiry_seconds{job="ingress"} NaN NaN NaN NaN NaN + http_requests_histogram{path="/foo",instance="127.0.0.1",job="httpd"} {{schema:0 sum:1 count:1}}x11 eval instant at 5m absent_over_time(http_requests_total[5m]) @@ -1205,6 +1224,16 @@ eval instant at 5m absent_over_time({job="ingress"}[4m]) eval instant at 10m absent_over_time({job="ingress"}[4m]) {job="ingress"} 1 +eval instant at 10m absent_over_time(http_requests_histogram[5m]) + +eval instant at 10m absent_over_time(rate(http_requests_histogram[5m])[5m:1m]) + +eval instant at 20m absent_over_time(http_requests_histogram[5m]) + {} 1 + +eval instant at 20m absent_over_time(rate(http_requests_histogram[5m])[5m:1m]) + {} 1 + clear # Testdata for present_over_time() From afde4707c52ccce477c5dd10cdd2af05c641369c Mon Sep 17 00:00:00 2001 From: Jan Horstmann Date: Mon, 4 Nov 2024 09:06:52 +0100 Subject: [PATCH 1138/1397] Update mixin dashboard Update and rewrite the mixin dashboards to use the grafonnet ([1]) library. Grafana has deprecated angular plugins ([2]) as used by grafonnet-lib ([3]) with removal pending for grafana version 12. Additionally grafonnet-lib is deprecated/unmaintained in favor of grafonnet. Therefore the mixin dashboards have been updated to use grafonnet. Closes: https://github.com/prometheus/prometheus/issues/14404 [1] https://github.com/grafana/grafonnet [2] https://grafana.com/docs/grafana/latest/developers/angular_deprecation/ [3] https://github.com/grafana/grafonnet-lib Signed-off-by: Jan Horstmann --- .../prometheus-mixin/dashboards.libsonnet | 1185 +++++++++++------ .../prometheus-mixin/jsonnetfile.json | 15 +- 2 files changed, 789 insertions(+), 411 deletions(-) diff --git a/documentation/prometheus-mixin/dashboards.libsonnet b/documentation/prometheus-mixin/dashboards.libsonnet index 2bdd168cc9..22b8c92e6e 100644 --- a/documentation/prometheus-mixin/dashboards.libsonnet +++ b/documentation/prometheus-mixin/dashboards.libsonnet @@ -1,438 +1,825 @@ -local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet'; -local g = import 'github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet'; +local grafana = import 'github.com/grafana/grafonnet/gen/grafonnet-latest/main.libsonnet'; local dashboard = grafana.dashboard; -local row = grafana.row; -local singlestat = grafana.singlestat; -local prometheus = grafana.prometheus; -local graphPanel = grafana.graphPanel; -local tablePanel = grafana.tablePanel; -local template = grafana.template; +local prometheus = grafana.query.prometheus; +local variable = dashboard.variable; +local panel = grafana.panel; +local row = panel.row; + { grafanaDashboards+:: { + + local panelTimeSeriesStdOptions = + {} + + panel.timeSeries.queryOptions.withDatasource('prometheus', '$datasource') + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(10) + + panel.timeSeries.fieldConfig.defaults.custom.withShowPoints('never') + + panel.timeSeries.options.tooltip.withMode('multi') + , + + local panelTimeSeriesStacking = + {} + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(100) + + panel.timeSeries.fieldConfig.defaults.custom.withLineWidth(0) + + panel.timeSeries.fieldConfig.defaults.custom.stacking.withMode('normal') + , + 'prometheus.json': + local showMultiCluster = $._config.showMultiCluster; - local dashboard = g.dashboard( - '%(prefix)sOverview' % $._config.grafanaPrometheus - ); - local templatedDashboard = if showMultiCluster then - dashboard - .addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, $._config.clusterLabel) - .addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job') - .addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance') - else - dashboard - .addMultiTemplate('job', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'job') - .addMultiTemplate('instance', 'prometheus_build_info{job=~"$job"}', 'instance'); - templatedDashboard - .addRow( - g.row('Prometheus Stats') - .addPanel( - g.panel('Prometheus Stats') + - g.tablePanel(if showMultiCluster then [ - 'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})', - 'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})', - ] else [ - 'count by (job, instance, version) (prometheus_build_info{job=~"$job", instance=~"$instance"})', - 'max by (job, instance) (time() - process_start_time_seconds{job=~"$job", instance=~"$instance"})', - ], { - cluster: { alias: if showMultiCluster then 'Cluster' else '' }, - job: { alias: 'Job' }, - instance: { alias: 'Instance' }, - version: { alias: 'Version' }, - 'Value #A': { alias: 'Count', type: 'hidden' }, - 'Value #B': { alias: 'Uptime', type: 'number', unit: 's' }, - }) - ) - ) - .addRow( - g.row('Discovery') - .addPanel( - g.panel('Target Sync') + - g.queryPanel(if showMultiCluster then 'sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3' - else 'sum(rate(prometheus_target_sync_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m])) by (scrape_job) * 1e3', - if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}' - else '{{scrape_job}}') + - { yaxes: g.yaxes('ms') } - ) - .addPanel( - g.panel('Targets') + - g.queryPanel(if showMultiCluster then 'sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})' - else 'sum(prometheus_sd_discovered_targets{job=~"$job",instance=~"$instance"})', - if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}' - else 'Targets') + - g.stack - ) - ) - .addRow( - g.row('Retrieval') - .addPanel( - g.panel('Average Scrape Interval Duration') + - g.queryPanel(if showMultiCluster then 'rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3' - else 'rate(prometheus_target_interval_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~"$job",instance=~"$instance"}[5m]) * 1e3', - if showMultiCluster then '{{cluster}}:{{job}}:{{instance}} {{interval}} configured' - else '{{interval}} configured') + - { yaxes: g.yaxes('ms') } - ) - .addPanel( - g.panel('Scrape failures') + - g.queryPanel(if showMultiCluster then [ - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', - ] else [ - 'sum by (job) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total[1m]))', - 'sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total[1m]))', - 'sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total[1m]))', - 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total[1m]))', - 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total[1m]))', - ], if showMultiCluster then [ - 'exceeded body size limit: {{cluster}} {{job}} {{instance}}', - 'exceeded sample limit: {{cluster}} {{job}} {{instance}}', - 'duplicate timestamp: {{cluster}} {{job}} {{instance}}', - 'out of bounds: {{cluster}} {{job}} {{instance}}', - 'out of order: {{cluster}} {{job}} {{instance}}', - ] else [ - 'exceeded body size limit: {{job}}', - 'exceeded sample limit: {{job}}', - 'duplicate timestamp: {{job}}', - 'out of bounds: {{job}}', - 'out of order: {{job}}', - ]) + - g.stack - ) - .addPanel( - g.panel('Appended Samples') + - g.queryPanel(if showMultiCluster then 'rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])' - else 'rate(prometheus_tsdb_head_samples_appended_total{job=~"$job",instance=~"$instance"}[5m])', - if showMultiCluster then '{{cluster}} {{job}} {{instance}}' - else '{{job}} {{instance}}') + - g.stack - ) - ) - .addRow( - g.row('Storage') - .addPanel( - g.panel('Head Series') + - g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' - else 'prometheus_tsdb_head_series{job=~"$job",instance=~"$instance"}', - if showMultiCluster then '{{cluster}} {{job}} {{instance}} head series' - else '{{job}} {{instance}} head series') + - g.stack - ) - .addPanel( - g.panel('Head Chunks') + - g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' - else 'prometheus_tsdb_head_chunks{job=~"$job",instance=~"$instance"}', - if showMultiCluster then '{{cluster}} {{job}} {{instance}} head chunks' - else '{{job}} {{instance}} head chunks') + - g.stack - ) - ) - .addRow( - g.row('Query') - .addPanel( - g.panel('Query Rate') + - g.queryPanel(if showMultiCluster then 'rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])' - else 'rate(prometheus_engine_query_duration_seconds_count{job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])', - if showMultiCluster then '{{cluster}} {{job}} {{instance}}' - else '{{job}} {{instance}}') + - g.stack, - ) - .addPanel( - g.panel('Stage Duration') + - g.queryPanel(if showMultiCluster then 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3' - else 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",job=~"$job",instance=~"$instance"}) * 1e3', - if showMultiCluster then '{{slice}}' - else '{{slice}}') + - { yaxes: g.yaxes('ms') } + - g.stack, - ) - ) + { - tags: $._config.grafanaPrometheus.tags, - refresh: $._config.grafanaPrometheus.refresh, - }, + + local datasourceVariable = + variable.datasource.new('datasource', 'prometheus') + + variable.datasource.generalOptions.withLabel('Data source') + + variable.datasource.generalOptions.withCurrent('default') + + variable.datasource.generalOptions.showOnDashboard.withLabelAndValue() + ; + + local clusterVariable = + variable.query.new('cluster') + + variable.query.generalOptions.withLabel('cluster') + + variable.query.withDatasourceFromVariable(datasourceVariable) + + variable.query.refresh.onTime() + + variable.query.withSort(type='alphabetical', asc=false) + + variable.query.selectionOptions.withIncludeAll(true, '.+') + + variable.query.selectionOptions.withMulti(true) + + variable.query.generalOptions.withCurrent('$__all') + + variable.query.queryTypes.withLabelValues($._config.clusterLabel, metric='prometheus_build_info{%(prometheusSelector)s}' % $._config) + + variable.datasource.generalOptions.showOnDashboard.withLabelAndValue() + ; + + local jobVariable = + variable.query.new('job') + + variable.query.generalOptions.withLabel('job') + + variable.query.withDatasourceFromVariable(datasourceVariable) + + variable.query.refresh.onTime() + + variable.query.withSort(type='alphabetical', asc=false) + + variable.query.selectionOptions.withIncludeAll(true, '.+') + + variable.query.selectionOptions.withMulti(true) + + if showMultiCluster then + variable.query.queryTypes.withLabelValues('job', metric='prometheus_build_info{cluster=~"$cluster"}') + else + variable.query.queryTypes.withLabelValues('job', metric='prometheus_build_info{%(prometheusSelector)s}' % $._config) + ; + + local instanceVariable = + variable.query.new('instance') + + variable.query.generalOptions.withLabel('instance') + + variable.query.withDatasourceFromVariable(datasourceVariable) + + variable.query.refresh.onTime() + + variable.query.withSort(type='alphabetical', asc=false) + + variable.query.selectionOptions.withIncludeAll(true, '.+') + + variable.query.selectionOptions.withMulti(true) + + if showMultiCluster then + variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{cluster=~"$cluster", job=~"$job"}') + else + variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{job=~"$job"}') + ; + + local prometheusStats = + panel.table.new('Prometheus Stats') + + panel.table.queryOptions.withDatasource('prometheus', '$datasource') + + panel.table.standardOptions.withUnit('short') + + panel.table.standardOptions.withDecimals(2) + + panel.table.standardOptions.withDisplayName('') + + panel.table.standardOptions.withOverrides([ + panel.table.standardOptions.override.byName.new('Time') + + panel.table.standardOptions.override.byName.withProperty('displayName', 'Time') + + panel.table.standardOptions.override.byName.withProperty('custom.align', null) + + panel.table.standardOptions.override.byName.withProperty('custom.hidden', 'true'), + panel.table.standardOptions.override.byName.new('cluster') + + panel.table.standardOptions.override.byName.withProperty('custom.align', null) + + panel.table.standardOptions.override.byName.withProperty('unit', 'short') + + panel.table.standardOptions.override.byName.withProperty('decimals', 2) + + if showMultiCluster then panel.table.standardOptions.override.byName.withProperty('displayName', 'Cluster') else {}, + panel.table.standardOptions.override.byName.new('job') + + panel.table.standardOptions.override.byName.withProperty('custom.align', null) + + panel.table.standardOptions.override.byName.withProperty('unit', 'short') + + panel.table.standardOptions.override.byName.withProperty('decimals', 2) + + panel.table.standardOptions.override.byName.withProperty('displayName', 'Job'), + panel.table.standardOptions.override.byName.new('instance') + + panel.table.standardOptions.override.byName.withProperty('displayName', 'Instance') + + panel.table.standardOptions.override.byName.withProperty('custom.align', null) + + panel.table.standardOptions.override.byName.withProperty('unit', 'short') + + panel.table.standardOptions.override.byName.withProperty('decimals', 2), + panel.table.standardOptions.override.byName.new('version') + + panel.table.standardOptions.override.byName.withProperty('displayName', 'Version') + + panel.table.standardOptions.override.byName.withProperty('custom.align', null) + + panel.table.standardOptions.override.byName.withProperty('unit', 'short') + + panel.table.standardOptions.override.byName.withProperty('decimals', 2), + panel.table.standardOptions.override.byName.new('Value #A') + + panel.table.standardOptions.override.byName.withProperty('displayName', 'Count') + + panel.table.standardOptions.override.byName.withProperty('custom.align', null) + + panel.table.standardOptions.override.byName.withProperty('unit', 'short') + + panel.table.standardOptions.override.byName.withProperty('decimals', 2) + + panel.table.standardOptions.override.byName.withProperty('custom.hidden', 'true'), + panel.table.standardOptions.override.byName.new('Value #B') + + panel.table.standardOptions.override.byName.withProperty('displayName', 'Uptime') + + panel.table.standardOptions.override.byName.withProperty('custom.align', null) + + panel.table.standardOptions.override.byName.withProperty('unit', 's'), + ]) + + if showMultiCluster then + panel.table.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})' + ) + + prometheus.withFormat('table') + + prometheus.withInstant(true) + + prometheus.withLegendFormat(''), + prometheus.new( + '$datasource', + 'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})' + ) + + prometheus.withFormat('table') + + prometheus.withInstant(true) + + prometheus.withLegendFormat(''), + ]) + else + panel.table.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'count by (job, instance, version) (prometheus_build_info{job=~"$job", instance=~"$instance"})' + ) + + prometheus.withFormat('table') + + prometheus.withInstant(true) + + prometheus.withLegendFormat(''), + prometheus.new( + '$datasource', + 'max by (job, instance) (time() - process_start_time_seconds{job=~"$job", instance=~"$instance"})' + ) + + prometheus.withFormat('table') + + prometheus.withInstant(true) + + prometheus.withLegendFormat(''), + ]) + ; + + local targetSync = + panel.timeSeries.new('Target Sync') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panel.timeSeries.standardOptions.withUnit('ms') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}'), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'sum(rate(prometheus_target_sync_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m])) by (scrape_job) * 1e3' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{scrape_job}}'), + ]) + ; + + local targets = + panel.timeSeries.new('Targets') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panelTimeSeriesStacking + + panel.timeSeries.standardOptions.withUnit('short') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{cluster}}:{{job}}:{{instance}}'), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'sum(prometheus_sd_discovered_targets{job=~"$job",instance=~"$instance"})' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('Targets'), + ]) + ; + + local averageScrapeIntervalDuration = + panel.timeSeries.new('Average Scrape Interval Duration') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panel.timeSeries.standardOptions.withUnit('ms') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{cluster}}:{{job}}:{{instance}} {{interval}} configured'), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_target_interval_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~"$job",instance=~"$instance"}[5m]) * 1e3' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{interval}} configured'), + ]) + ; + + local scrapeFailures = + panel.timeSeries.new('Scrape failures') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panelTimeSeriesStacking + + panel.timeSeries.standardOptions.withUnit('ms') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('exceeded body size limit: {{cluster}} {{job}} {{instance}}'), + prometheus.new( + '$datasource', + 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('exceeded sample limit: {{cluster}} {{job}} {{instance}}'), + prometheus.new( + '$datasource', + 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('duplicate timestamp: {{cluster}} {{job}} {{instance}}'), + prometheus.new( + '$datasource', + 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('out of bounds: {{cluster}} {{job}} {{instance}}'), + prometheus.new( + '$datasource', + 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('out of order: {{cluster}} {{job}} {{instance}}'), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'sum by (job) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('exceeded body size limit: {{job}}'), + prometheus.new( + '$datasource', + 'sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('exceeded sample limit: {{job}}'), + prometheus.new( + '$datasource', + 'sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('duplicate timestamp: {{job}}'), + prometheus.new( + '$datasource', + 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('out of bounds: {{job}}'), + prometheus.new( + '$datasource', + 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total[1m]))' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('out of order: {{job}}'), + ]) + ; + + local appendedSamples = + panel.timeSeries.new('Appended Samples') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panelTimeSeriesStacking + + panel.timeSeries.standardOptions.withUnit('short') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}}'), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_tsdb_head_samples_appended_total{job=~"$job",instance=~"$instance"}[5m])' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{job}} {{instance}}'), + ]) + ; + + local headSeries = + panel.timeSeries.new('Head Series') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panelTimeSeriesStacking + + panel.timeSeries.standardOptions.withUnit('short') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}} head series'), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_tsdb_head_series{job=~"$job",instance=~"$instance"}' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{job}} {{instance}} head series'), + ]) + ; + + local headChunks = + panel.timeSeries.new('Head Chunks') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panelTimeSeriesStacking + + panel.timeSeries.standardOptions.withUnit('short') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}} head chunks'), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_tsdb_head_chunks{job=~"$job",instance=~"$instance"}' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{job}} {{instance}} head chunks'), + ]) + ; + + local queryRate = + panel.timeSeries.new('Query Rate') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panelTimeSeriesStacking + + panel.timeSeries.standardOptions.withUnit('short') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}}'), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_engine_query_duration_seconds_count{job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{job}} {{instance}}'), + ]) + ; + + local stageDuration = + panel.timeSeries.new('Stage Duration') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withSort('desc') + + panel.timeSeries.standardOptions.withMin(0) + + panelTimeSeriesStacking + + panel.timeSeries.standardOptions.withUnit('ms') + + if showMultiCluster then + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{slice}}'), + ]) + else + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",job=~"$job",instance=~"$instance"}) * 1e3' + ) + + prometheus.withFormat('time_series') + + prometheus.withLegendFormat('{{slice}}'), + ]) + ; + + dashboard.new('%(prefix)sOverview' % $._config.grafanaPrometheus) + + dashboard.time.withFrom('now-1h') + + dashboard.withTags($._config.grafanaPrometheus.tags) + + dashboard.timepicker.withRefreshIntervals($._config.grafanaPrometheus.refresh) + + dashboard.withVariables(std.prune([ + datasourceVariable, + if showMultiCluster then clusterVariable, + jobVariable, + instanceVariable, + ])) + + dashboard.withPanels( + grafana.util.grid.makeGrid([ + row.new('Prometheus Stats') + + row.withPanels([ + prometheusStats, + ]), + ], panelWidth=24, panelHeight=7) + + + grafana.util.grid.makeGrid([ + row.new('Discovery') + + row.withPanels([ + targetSync, + targets, + ]), + ], panelWidth=12, panelHeight=7, startY=8) + + + grafana.util.grid.makeGrid([ + row.new('Retrieval') + + row.withPanels([ + averageScrapeIntervalDuration, + scrapeFailures, + appendedSamples, + ]), + ], panelWidth=8, panelHeight=7, startY=16) + + + grafana.util.grid.makeGrid([ + row.new('Storage') + + row.withPanels([ + headSeries, + headChunks, + ]), + row.new('Query') + + row.withPanels([ + queryRate, + stageDuration, + ]), + ], panelWidth=12, panelHeight=7, startY=24) + ), // Remote write specific dashboard. 'prometheus-remote-write.json': + + local datasourceVariable = + variable.datasource.new('datasource', 'prometheus') + + variable.datasource.generalOptions.withCurrent('default') + + variable.datasource.generalOptions.showOnDashboard.withLabelAndValue() + ; + + local clusterVariable = + variable.query.new('cluster') + + variable.query.withDatasourceFromVariable(datasourceVariable) + + variable.query.refresh.onTime() + + variable.query.selectionOptions.withIncludeAll(true) + + variable.query.generalOptions.withCurrent('$__all') + + variable.query.queryTypes.withLabelValues($._config.clusterLabel, metric='prometheus_build_info') + + variable.datasource.generalOptions.showOnDashboard.withLabelAndValue() + ; + + local instanceVariable = + variable.query.new('instance') + + variable.query.withDatasourceFromVariable(datasourceVariable) + + variable.query.refresh.onTime() + + variable.query.selectionOptions.withIncludeAll(true) + + variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{cluster=~"$cluster"}') + ; + + local urlVariable = + variable.query.new('url') + + variable.query.withDatasourceFromVariable(datasourceVariable) + + variable.query.refresh.onTime() + + variable.query.selectionOptions.withIncludeAll(true) + + variable.query.queryTypes.withLabelValues('url', metric='prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}') + ; + local timestampComparison = - graphPanel.new( - 'Highest Timestamp In vs. Highest Timestamp Sent', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - ||| - ( - prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"} - - - ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"} != 0) - ) - |||, - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}', - )); + panel.timeSeries.new('Highest Timestamp In vs. Highest Timestamp Sent') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + ( + prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"} + - + ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"} != 0) + ) + ||| + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + ]); local timestampComparisonRate = - graphPanel.new( - 'Rate[5m]', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - ||| - clamp_min( - rate(prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}[5m]) - - - ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) - , 0) - |||, - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}', - )); + panel.timeSeries.new('Rate[5m]') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + clamp_min( + rate(prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}[5m]) + - + ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) + , 0) + ||| + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + ]); local samplesRate = - graphPanel.new( - 'Rate, in vs. succeeded or dropped [5m]', - datasource='$datasource', - span=12, - ) - .addTarget(prometheus.target( - ||| - rate( - prometheus_remote_storage_samples_in_total{cluster=~"$cluster", instance=~"$instance"}[5m]) - - - ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) - - - (rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) - |||, - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Rate, in vs. succeeded or dropped [5m]') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + ||| + rate( + prometheus_remote_storage_samples_in_total{cluster=~"$cluster", instance=~"$instance"}[5m]) + - + ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) + - + (rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) + ||| + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + ]); local currentShards = - graphPanel.new( - 'Current Shards', - datasource='$datasource', - span=12, - min_span=6, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Current Shards') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + ]); local maxShards = - graphPanel.new( - 'Max Shards', - datasource='$datasource', - span=4, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_max{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Max Shards') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_remote_storage_shards_max{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + ]); local minShards = - graphPanel.new( - 'Min Shards', - datasource='$datasource', - span=4, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_min{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Min Shards') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_remote_storage_shards_min{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + ]); local desiredShards = - graphPanel.new( - 'Desired Shards', - datasource='$datasource', - span=4, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shards_desired{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Desired Shards') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_remote_storage_shards_desired{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + ]); local shardsCapacity = - graphPanel.new( - 'Shard Capacity', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); - + panel.timeSeries.new('Shard Capacity') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + ]); local pendingSamples = - graphPanel.new( - 'Pending Samples', - datasource='$datasource', - span=6, - ) - .addTarget(prometheus.target( - 'prometheus_remote_storage_pending_samples{cluster=~"$cluster", instance=~"$instance", url=~"$url"} or prometheus_remote_storage_samples_pending{cluster=~"$cluster", instance=~"$instance", url=~"$url"}', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Pending Samples') + + panelTimeSeriesStdOptions + + panel.timeSeries.standardOptions.withUnit('short') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_remote_storage_pending_samples{cluster=~"$cluster", instance=~"$instance", url=~"$url"} or prometheus_remote_storage_samples_pending{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + ]); local walSegment = - graphPanel.new( - 'TSDB Current Segment', - datasource='$datasource', - span=6, - formatY1='none', - ) - .addTarget(prometheus.target( - 'prometheus_tsdb_wal_segment_current{cluster=~"$cluster", instance=~"$instance"}', - legendFormat='{{cluster}}:{{instance}}' - )); + panel.timeSeries.new('TSDB Current Segment') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withMode('single') + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(0) + + panel.timeSeries.standardOptions.withUnit('none') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_tsdb_wal_segment_current{cluster=~"$cluster", instance=~"$instance"}' + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}}'), + ]); local queueSegment = - graphPanel.new( - 'Remote Write Current Segment', - datasource='$datasource', - span=6, - formatY1='none', - ) - .addTarget(prometheus.target( - 'prometheus_wal_watcher_current_segment{cluster=~"$cluster", instance=~"$instance"}', - legendFormat='{{cluster}}:{{instance}} {{consumer}}' - )); + panel.timeSeries.new('Remote Write Current Segment') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withMode('single') + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(0) + + panel.timeSeries.standardOptions.withUnit('none') + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'prometheus_wal_watcher_current_segment{cluster=~"$cluster", instance=~"$instance"}' + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{consumer}}'), + ]); local droppedSamples = - graphPanel.new( - 'Dropped Samples', - datasource='$datasource', - span=3, - ) - .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Dropped Samples') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withMode('single') + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(0) + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + ]); local failedSamples = - graphPanel.new( - 'Failed Samples', - datasource='$datasource', - span=3, - ) - .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_failed_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Failed Samples') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withMode('single') + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(0) + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_remote_storage_failed_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + ]); local retriedSamples = - graphPanel.new( - 'Retried Samples', - datasource='$datasource', - span=3, - ) - .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_retried_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Retried Samples') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withMode('single') + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(0) + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_remote_storage_retried_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + ]); local enqueueRetries = - graphPanel.new( - 'Enqueue Retries', - datasource='$datasource', - span=3, - ) - .addTarget(prometheus.target( - 'rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])', - legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}' - )); + panel.timeSeries.new('Enqueue Retries') + + panelTimeSeriesStdOptions + + panel.timeSeries.options.tooltip.withMode('single') + + panel.timeSeries.fieldConfig.defaults.custom.withFillOpacity(0) + + panel.timeSeries.queryOptions.withTargets([ + prometheus.new( + '$datasource', + 'rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' + ) + + prometheus.withFormat('time_series') + + prometheus.withIntervalFactor(2) + + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + ]); - dashboard.new( - title='%(prefix)sRemote Write' % $._config.grafanaPrometheus, - editable=true - ) - .addTemplate( - { - hide: 0, - label: null, - name: 'datasource', - options: [], - query: 'prometheus', - refresh: 1, - regex: '', - type: 'datasource', - }, - ) - .addTemplate( - template.new( - 'cluster', - '$datasource', - 'label_values(prometheus_build_info, cluster)' % $._config, - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ) - ) - .addTemplate( - template.new( - 'instance', - '$datasource', - 'label_values(prometheus_build_info{cluster=~"$cluster"}, instance)' % $._config, - refresh='time', - current={ - selected: true, - text: 'All', - value: '$__all', - }, - includeAll=true, - ) - ) - .addTemplate( - template.new( - 'url', - '$datasource', - 'label_values(prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}, url)' % $._config, - refresh='time', - includeAll=true, - ) - ) - .addRow( - row.new('Timestamps') - .addPanel(timestampComparison) - .addPanel(timestampComparisonRate) - ) - .addRow( - row.new('Samples') - .addPanel(samplesRate) - ) - .addRow( - row.new( - 'Shards' - ) - .addPanel(currentShards) - .addPanel(maxShards) - .addPanel(minShards) - .addPanel(desiredShards) - ) - .addRow( - row.new('Shard Details') - .addPanel(shardsCapacity) - .addPanel(pendingSamples) - ) - .addRow( - row.new('Segments') - .addPanel(walSegment) - .addPanel(queueSegment) - ) - .addRow( - row.new('Misc. Rates') - .addPanel(droppedSamples) - .addPanel(failedSamples) - .addPanel(retriedSamples) - .addPanel(enqueueRetries) - ) + { - tags: $._config.grafanaPrometheus.tags, - refresh: $._config.grafanaPrometheus.refresh, - }, + dashboard.new('%(prefix)sRemote Write' % $._config.grafanaPrometheus) + + dashboard.time.withFrom('now-1h') + + dashboard.withTags($._config.grafanaPrometheus.tags) + + dashboard.timepicker.withRefreshIntervals($._config.grafanaPrometheus.refresh) + + dashboard.withVariables([ + datasourceVariable, + clusterVariable, + instanceVariable, + urlVariable, + ]) + + dashboard.withPanels( + grafana.util.grid.makeGrid([ + row.new('Timestamps') + + row.withPanels([ + timestampComparison, + timestampComparisonRate, + ]), + ], panelWidth=12, panelHeight=7) + + + grafana.util.grid.makeGrid([ + row.new('Samples') + + row.withPanels([ + samplesRate + + panel.timeSeries.gridPos.withW(24), + ]), + row.new('Shards'), + ], panelWidth=24, panelHeight=7, startY=8) + + + grafana.util.grid.wrapPanels([ + currentShards + + panel.timeSeries.gridPos.withW(24), + maxShards, + minShards, + desiredShards, + ], panelWidth=8, panelHeight=7, startY=16) + + + grafana.util.grid.makeGrid([ + row.new('Shard Details') + + row.withPanels([ + shardsCapacity, + pendingSamples, + ]), + row.new('Segments') + + row.withPanels([ + walSegment, + queueSegment, + ]), + ], panelWidth=12, panelHeight=7, startY=24) + + + grafana.util.grid.makeGrid([ + row.new('Misc. Rates') + + row.withPanels([ + droppedSamples, + failedSamples, + retriedSamples, + enqueueRetries, + ]), + ], panelWidth=6, panelHeight=7, startY=40) + ), }, } diff --git a/documentation/prometheus-mixin/jsonnetfile.json b/documentation/prometheus-mixin/jsonnetfile.json index 1c64fd0151..2d56d91245 100644 --- a/documentation/prometheus-mixin/jsonnetfile.json +++ b/documentation/prometheus-mixin/jsonnetfile.json @@ -4,20 +4,11 @@ { "source": { "git": { - "remote": "https://github.com/grafana/grafonnet-lib.git", - "subdir": "grafonnet" + "remote": "https://github.com/grafana/grafonnet.git", + "subdir": "gen/grafonnet-latest" } }, - "version": "master" - }, - { - "source": { - "git": { - "remote": "https://github.com/grafana/jsonnet-libs.git", - "subdir": "grafana-builder" - } - }, - "version": "master" + "version": "main" } ], "legacyImports": false From 7802ca263d3a4027762352f425e0fc6d18334560 Mon Sep 17 00:00:00 2001 From: Julien Duchesne Date: Fri, 13 Dec 2024 11:48:29 -0500 Subject: [PATCH 1139/1397] RuleDependencyController: Fix for indeterminate conditions (#15560) The dependency map being empty meant that all the rules were being set as having no dependencies or dependents. Which is the opposite of what we want Added two new tests that verify the behavior, they failed before the fix, running all the rules concurrently Signed-off-by: Julien Duchesne --- rules/fixtures/rules_indeterminates.yaml | 18 +++ rules/manager.go | 5 + rules/manager_test.go | 168 +++++++++++++++++++++++ 3 files changed, 191 insertions(+) create mode 100644 rules/fixtures/rules_indeterminates.yaml diff --git a/rules/fixtures/rules_indeterminates.yaml b/rules/fixtures/rules_indeterminates.yaml new file mode 100644 index 0000000000..a906d3b504 --- /dev/null +++ b/rules/fixtures/rules_indeterminates.yaml @@ -0,0 +1,18 @@ +groups: + - name: indeterminate + rules: + # This shouldn't run in parallel because of the open matcher + - record: job:http_requests:rate1m + expr: sum by (job)(rate(http_requests_total[1m])) + - record: job:http_requests:rate5m + expr: sum by (job)(rate(http_requests_total[5m])) + - record: job:http_requests:rate15m + expr: sum by (job)(rate(http_requests_total[15m])) + - record: job:http_requests:rate30m + expr: sum by (job)(rate(http_requests_total[30m])) + - record: job:http_requests:rate1h + expr: sum by (job)(rate(http_requests_total[1h])) + - record: job:http_requests:rate2h + expr: sum by (job)(rate(http_requests_total[2h])) + - record: matcher + expr: '{job="job1"}' diff --git a/rules/manager.go b/rules/manager.go index 6e9bf64691..df3d48700a 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -453,6 +453,11 @@ type ruleDependencyController struct{} // AnalyseRules implements RuleDependencyController. func (c ruleDependencyController) AnalyseRules(rules []Rule) { depMap := buildDependencyMap(rules) + + if depMap == nil { + return + } + for _, r := range rules { r.SetNoDependentRules(depMap.dependents(r) == 0) r.SetNoDependencyRules(depMap.dependencies(r) == 0) diff --git a/rules/manager_test.go b/rules/manager_test.go index 6afac993d8..327addcbc1 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -2110,6 +2110,45 @@ func TestAsyncRuleEvaluation(t *testing.T) { require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples)) } }) + + t.Run("asynchronous evaluation of independent rules, with indeterminate. Should be synchronous", func(t *testing.T) { + t.Parallel() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + inflightQueries := atomic.Int32{} + maxInflight := atomic.Int32{} + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + ruleCount := 7 + opts := optsFactory(storage, &maxInflight, &inflightQueries, 0) + + // Configure concurrency settings. + opts.ConcurrentEvalsEnabled = true + opts.MaxConcurrentEvals = int64(ruleCount) * 2 + opts.RuleConcurrencyController = nil + ruleManager := NewManager(opts) + + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_indeterminates.yaml"}...) + require.Empty(t, errs) + require.Len(t, groups, 1) + + for _, group := range groups { + require.Len(t, group.rules, ruleCount) + + start := time.Now() + + group.Eval(ctx, start) + + // Never expect more than 1 inflight query at a time. + require.EqualValues(t, 1, maxInflight.Load()) + // Each rule should take at least 1 second to execute sequentially. + require.GreaterOrEqual(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds()) + // Each rule produces one vector. + require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples)) + } + }) } func TestBoundedRuleEvalConcurrency(t *testing.T) { @@ -2222,3 +2261,132 @@ func TestLabels_FromMaps(t *testing.T) { require.Equal(t, expected, mLabels, "unexpected labelset") } + +func TestRuleDependencyController_AnalyseRules(t *testing.T) { + type expectedDependencies struct { + noDependentRules bool + noDependencyRules bool + } + + testCases := []struct { + name string + ruleFile string + expected map[string]expectedDependencies + }{ + { + name: "all independent rules", + ruleFile: "fixtures/rules_multiple_independent.yaml", + expected: map[string]expectedDependencies{ + "job:http_requests:rate1m": { + noDependentRules: true, + noDependencyRules: true, + }, + "job:http_requests:rate5m": { + noDependentRules: true, + noDependencyRules: true, + }, + "job:http_requests:rate15m": { + noDependentRules: true, + noDependencyRules: true, + }, + "job:http_requests:rate30m": { + noDependentRules: true, + noDependencyRules: true, + }, + "job:http_requests:rate1h": { + noDependentRules: true, + noDependencyRules: true, + }, + "job:http_requests:rate2h": { + noDependentRules: true, + noDependencyRules: true, + }, + }, + }, + { + name: "some dependent rules", + ruleFile: "fixtures/rules_multiple.yaml", + expected: map[string]expectedDependencies{ + "job:http_requests:rate1m": { + noDependentRules: true, + noDependencyRules: true, + }, + "job:http_requests:rate5m": { + noDependentRules: true, + noDependencyRules: true, + }, + "job:http_requests:rate15m": { + noDependentRules: false, + noDependencyRules: true, + }, + "TooManyRequests": { + noDependentRules: true, + noDependencyRules: false, + }, + }, + }, + { + name: "indeterminate rules", + ruleFile: "fixtures/rules_indeterminates.yaml", + expected: map[string]expectedDependencies{ + "job:http_requests:rate1m": { + noDependentRules: false, + noDependencyRules: false, + }, + "job:http_requests:rate5m": { + noDependentRules: false, + noDependencyRules: false, + }, + "job:http_requests:rate15m": { + noDependentRules: false, + noDependencyRules: false, + }, + "job:http_requests:rate30m": { + noDependentRules: false, + noDependencyRules: false, + }, + "job:http_requests:rate1h": { + noDependentRules: false, + noDependencyRules: false, + }, + "job:http_requests:rate2h": { + noDependentRules: false, + noDependencyRules: false, + }, + "matcher": { + noDependentRules: false, + noDependencyRules: false, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + + ruleManager := NewManager(&ManagerOptions{ + Context: context.Background(), + Logger: promslog.NewNopLogger(), + Appendable: storage, + QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil }, + }) + + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, tc.ruleFile) + require.Empty(t, errs) + require.Len(t, groups, 1) + + for _, g := range groups { + ruleManager.opts.RuleDependencyController.AnalyseRules(g.rules) + + for _, r := range g.rules { + exp, ok := tc.expected[r.Name()] + require.Truef(t, ok, "rule not found in expected: %s", r.String()) + require.Equalf(t, exp.noDependentRules, r.NoDependentRules(), "rule: %s", r.String()) + require.Equalf(t, exp.noDependencyRules, r.NoDependencyRules(), "rule: %s", r.String()) + } + } + }) + } +} From 953a8733428455c7a9fb752cc903ed4adcdb7131 Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Fri, 13 Dec 2024 21:32:20 +0000 Subject: [PATCH 1140/1397] update links to openmetrics to reference the v1.0.0 release Signed-off-by: David Ashpole --- docs/feature_flags.md | 2 +- model/exemplar/exemplar.go | 2 +- model/textparse/openmetricsparse.go | 2 +- scrape/manager_test.go | 2 +- storage/remote/otlptranslator/prometheus/normalize_name.go | 2 +- storage/remote/otlptranslator/prometheusremotewrite/helper.go | 2 +- tsdb/docs/format/wal.md | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 8c0e319f9c..67f31c260f 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -15,7 +15,7 @@ They may be enabled by default in future versions. `--enable-feature=exemplar-storage` -[OpenMetrics](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars) introduces the ability for scrape targets to add exemplars to certain metrics. Exemplars are references to data outside of the MetricSet. A common use case are IDs of program traces. +[OpenMetrics](https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars) introduces the ability for scrape targets to add exemplars to certain metrics. Exemplars are references to data outside of the MetricSet. A common use case are IDs of program traces. Exemplar storage is implemented as a fixed size circular buffer that stores exemplars in memory for all series. Enabling this feature will enable the storage of exemplars scraped by Prometheus. The config file block [storage](configuration/configuration.md#configuration-file)/[exemplars](configuration/configuration.md#exemplars) can be used to control the size of circular buffer by # of exemplars. An exemplar with just a `trace_id=` uses roughly 100 bytes of memory via the in-memory exemplar storage. If the exemplar storage is enabled, we will also append the exemplars to WAL for local persistence (for WAL duration). diff --git a/model/exemplar/exemplar.go b/model/exemplar/exemplar.go index 2c28b17257..d03940f1b2 100644 --- a/model/exemplar/exemplar.go +++ b/model/exemplar/exemplar.go @@ -18,7 +18,7 @@ import "github.com/prometheus/prometheus/model/labels" // ExemplarMaxLabelSetLength is defined by OpenMetrics: "The combined length of // the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 // UTF-8 characters." -// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars +// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars const ExemplarMaxLabelSetLength = 128 // Exemplar is additional information associated with a time series. diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index 16e805f3a9..f0dd51afee 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -337,7 +337,7 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 { } // All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds. - // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps + // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#timestamps ct := int64(p.val * 1000.0) p.setCTParseValues(ct, currHash, currName, true) return &ct diff --git a/scrape/manager_test.go b/scrape/manager_test.go index 6887ca1c43..0f1b9fe692 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -836,7 +836,7 @@ scrape_configs: require.Len(t, createdSeriesSamples, 1) // Conversion taken from common/expfmt.writeOpenMetricsFloat. // We don't check the ct timestamp as explicit ts was not implemented in expfmt.Encoder, - // but exists in OM https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#:~:text=An%20example%20with%20a%20Metric%20with%20no%20labels%2C%20and%20a%20MetricPoint%20with%20a%20timestamp%20and%20a%20created + // but exists in OM https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#:~:text=An%20example%20with%20a%20Metric%20with%20no%20labels%2C%20and%20a%20MetricPoint%20with%20a%20timestamp%20and%20a%20created // We can implement this, but we want to potentially get rid of OM 1.0 CT lines require.Equal(t, float64(timestamppb.New(ctTs).AsTime().UnixNano())/1e9, createdSeriesSamples[0].f) } else { diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index b5e07b02bd..6967ca013c 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -30,7 +30,7 @@ import ( // OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html // (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units) // Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units -// OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units +// OpenMetrics specification for units: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#units-and-base-units var unitMap = map[string]string{ // Time "d": "days", diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 62814bfc8d..2b2d32f2f7 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -50,7 +50,7 @@ const ( createdSuffix = "_created" // maxExemplarRunes is the maximum number of UTF-8 exemplar characters // according to the prometheus specification - // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars + // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars maxExemplarRunes = 128 // Trace and Span id keys are defined as part of the spec: // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification%2Fmetrics%2Fdatamodel.md#exemplars-2 diff --git a/tsdb/docs/format/wal.md b/tsdb/docs/format/wal.md index 5d93eb6d87..ce1934db25 100644 --- a/tsdb/docs/format/wal.md +++ b/tsdb/docs/format/wal.md @@ -126,7 +126,7 @@ The first row stores the starting id and the starting timestamp. Series reference and timestamp are encoded as deltas w.r.t the first exemplar. The first exemplar record begins at the second row. -See: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars +See: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars ``` ┌──────────────────────────────────────────────────────────────────┐ From e2f037e554a8a336ca4487324dd4ceee893fda0a Mon Sep 17 00:00:00 2001 From: Julien Duchesne Date: Fri, 13 Dec 2024 16:42:46 -0500 Subject: [PATCH 1141/1397] rules: Add new `RuleEvaluationTimeSum` field to groups (#15672) * feat(ruler): Add new `RuleEvaluationTimeSum` field to groups Coupled with a metric: `rule_group_last_rule_duration_sum_seconds` This will give us more observability into how fast a group runs with or without concurrency Signed-off-by: Julien Duchesne * Update rules/group.go Co-authored-by: gotjosh Signed-off-by: Julien Duchesne Signed-off-by: Julien Duchesne * Apply suggestions from code review Co-authored-by: gotjosh Signed-off-by: Julien Duchesne Signed-off-by: Julien Duchesne * Remove `in seconds`. A duration is a duration Signed-off-by: Julien Duchesne --------- Signed-off-by: Julien Duchesne Signed-off-by: Julien Duchesne Co-authored-by: gotjosh --- rules/group.go | 60 +++++++++++++++++++++++++++++++++---------- rules/manager.go | 1 + rules/manager_test.go | 12 ++++++--- 3 files changed, 56 insertions(+), 17 deletions(-) diff --git a/rules/group.go b/rules/group.go index 1f7953188b..1ce15be9f2 100644 --- a/rules/group.go +++ b/rules/group.go @@ -44,19 +44,20 @@ import ( // Group is a set of rules that have a logical relation. type Group struct { - name string - file string - interval time.Duration - queryOffset *time.Duration - limit int - rules []Rule - seriesInPreviousEval []map[string]labels.Labels // One per Rule. - staleSeries []labels.Labels - opts *ManagerOptions - mtx sync.Mutex - evaluationTime time.Duration - lastEvaluation time.Time // Wall-clock time of most recent evaluation. - lastEvalTimestamp time.Time // Time slot used for most recent evaluation. + name string + file string + interval time.Duration + queryOffset *time.Duration + limit int + rules []Rule + seriesInPreviousEval []map[string]labels.Labels // One per Rule. + staleSeries []labels.Labels + opts *ManagerOptions + mtx sync.Mutex + evaluationTime time.Duration // Time it took to evaluate the group. + evaluationRuleTimeSum time.Duration // Sum of time it took to evaluate each rule in the group. + lastEvaluation time.Time // Wall-clock time of most recent evaluation. + lastEvalTimestamp time.Time // Time slot used for most recent evaluation. shouldRestore bool @@ -115,6 +116,7 @@ func NewGroup(o GroupOptions) *Group { metrics.EvalFailures.WithLabelValues(key) metrics.GroupLastEvalTime.WithLabelValues(key) metrics.GroupLastDuration.WithLabelValues(key) + metrics.GroupLastRuleDurationSum.WithLabelValues(key) metrics.GroupRules.WithLabelValues(key).Set(float64(len(o.Rules))) metrics.GroupSamples.WithLabelValues(key) metrics.GroupInterval.WithLabelValues(key).Set(o.Interval.Seconds()) @@ -370,6 +372,28 @@ func (g *Group) setEvaluationTime(dur time.Duration) { g.evaluationTime = dur } +// GetRuleEvaluationTimeSum returns the sum of the time it took to evaluate each rule in the group irrespective of concurrency. +func (g *Group) GetRuleEvaluationTimeSum() time.Duration { + g.mtx.Lock() + defer g.mtx.Unlock() + return g.evaluationRuleTimeSum +} + +// updateRuleEvaluationTimeSum updates evaluationRuleTimeSum which is the sum of the time it took to evaluate each rule in the group irrespective of concurrency. +// It collects the times from the rules themselves. +func (g *Group) updateRuleEvaluationTimeSum() { + var sum time.Duration + for _, rule := range g.rules { + sum += rule.GetEvaluationDuration() + } + + g.metrics.GroupLastRuleDurationSum.WithLabelValues(GroupKey(g.file, g.name)).Set(sum.Seconds()) + + g.mtx.Lock() + defer g.mtx.Unlock() + g.evaluationRuleTimeSum = sum +} + // GetLastEvaluation returns the time the last evaluation of the rule group took place. func (g *Group) GetLastEvaluation() time.Time { g.mtx.Lock() @@ -874,6 +898,7 @@ type Metrics struct { GroupInterval *prometheus.GaugeVec GroupLastEvalTime *prometheus.GaugeVec GroupLastDuration *prometheus.GaugeVec + GroupLastRuleDurationSum *prometheus.GaugeVec GroupLastRestoreDuration *prometheus.GaugeVec GroupRules *prometheus.GaugeVec GroupSamples *prometheus.GaugeVec @@ -952,6 +977,14 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { }, []string{"rule_group"}, ), + GroupLastRuleDurationSum: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "rule_group_last_rule_duration_sum_seconds", + Help: "The sum of time in seconds it took to evaluate each rule in the group regardless of concurrency. This should be higher than the group duration if rules are evaluated concurrently.", + }, + []string{"rule_group"}, + ), GroupLastRestoreDuration: prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, @@ -989,6 +1022,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { m.GroupInterval, m.GroupLastEvalTime, m.GroupLastDuration, + m.GroupLastRuleDurationSum, m.GroupLastRestoreDuration, m.GroupRules, m.GroupSamples, diff --git a/rules/manager.go b/rules/manager.go index df3d48700a..edc67a832b 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -82,6 +82,7 @@ func DefaultEvalIterationFunc(ctx context.Context, g *Group, evalTimestamp time. timeSinceStart := time.Since(start) g.metrics.IterationDuration.Observe(timeSinceStart.Seconds()) + g.updateRuleEvaluationTimeSum() g.setEvaluationTime(timeSinceStart) g.setLastEvaluation(start) g.setLastEvalTimestamp(evalTimestamp) diff --git a/rules/manager_test.go b/rules/manager_test.go index 327addcbc1..94ee1e8b8b 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -1985,7 +1985,7 @@ func TestAsyncRuleEvaluation(t *testing.T) { require.Len(t, group.rules, ruleCount) start := time.Now() - group.Eval(ctx, start) + DefaultEvalIterationFunc(ctx, group, start) // Never expect more than 1 inflight query at a time. require.EqualValues(t, 1, maxInflight.Load()) @@ -1993,6 +1993,8 @@ func TestAsyncRuleEvaluation(t *testing.T) { require.GreaterOrEqual(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds()) // Each rule produces one vector. require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples)) + // Group duration is higher than the sum of rule durations (group overhead). + require.GreaterOrEqual(t, group.GetEvaluationTime(), group.GetRuleEvaluationTimeSum()) } }) @@ -2023,7 +2025,7 @@ func TestAsyncRuleEvaluation(t *testing.T) { require.Len(t, group.rules, ruleCount) start := time.Now() - group.Eval(ctx, start) + DefaultEvalIterationFunc(ctx, group, start) // Max inflight can be 1 synchronous eval and up to MaxConcurrentEvals concurrent evals. require.EqualValues(t, opts.MaxConcurrentEvals+1, maxInflight.Load()) @@ -2061,7 +2063,7 @@ func TestAsyncRuleEvaluation(t *testing.T) { require.Len(t, group.rules, ruleCount) start := time.Now() - group.Eval(ctx, start) + DefaultEvalIterationFunc(ctx, group, start) // Max inflight can be 1 synchronous eval and up to MaxConcurrentEvals concurrent evals. require.EqualValues(t, opts.MaxConcurrentEvals+1, maxInflight.Load()) @@ -2100,7 +2102,7 @@ func TestAsyncRuleEvaluation(t *testing.T) { start := time.Now() - group.Eval(ctx, start) + DefaultEvalIterationFunc(ctx, group, start) // Max inflight can be up to MaxConcurrentEvals concurrent evals, since there is sufficient concurrency to run all rules at once. require.LessOrEqual(t, int64(maxInflight.Load()), opts.MaxConcurrentEvals) @@ -2108,6 +2110,8 @@ func TestAsyncRuleEvaluation(t *testing.T) { require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds()) // Each rule produces one vector. require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples)) + // Group duration is less than the sum of rule durations + require.Less(t, group.GetEvaluationTime(), group.GetRuleEvaluationTimeSum()) } }) From 84e0f43a0c486458199259499857e0868aa1ac38 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 13 Dec 2024 12:50:05 +0100 Subject: [PATCH 1142/1397] Paginate rule groups, add infinite scroll to rules within groups This addresses extreme slowness when you have thousands of rules in potentially hundreds of rule groups. It can still be a bit slow even with pagination and infinite scroll for very large use cases, but it's much better already than before. Fixes https://github.com/prometheus/prometheus/issues/15551 Signed-off-by: Julius Volz --- .../src/components/SettingsMenu.tsx | 204 +++++++---- web/ui/mantine-ui/src/pages/AlertsPage.tsx | 304 +++++++++------- web/ui/mantine-ui/src/pages/RulesPage.tsx | 341 ++++++++++-------- .../src/state/localStorageMiddleware.ts | 1 + web/ui/mantine-ui/src/state/settingsSlice.ts | 12 + 5 files changed, 514 insertions(+), 348 deletions(-) diff --git a/web/ui/mantine-ui/src/components/SettingsMenu.tsx b/web/ui/mantine-ui/src/components/SettingsMenu.tsx index ada252d57c..aae38909d0 100644 --- a/web/ui/mantine-ui/src/components/SettingsMenu.tsx +++ b/web/ui/mantine-ui/src/components/SettingsMenu.tsx @@ -1,4 +1,12 @@ -import { Popover, ActionIcon, Fieldset, Checkbox, Stack } from "@mantine/core"; +import { + Popover, + ActionIcon, + Fieldset, + Checkbox, + Stack, + Group, + NumberInput, +} from "@mantine/core"; import { IconSettings } from "@tabler/icons-react"; import { FC } from "react"; import { useAppDispatch } from "../state/hooks"; @@ -13,6 +21,8 @@ const SettingsMenu: FC = () => { enableSyntaxHighlighting, enableLinter, showAnnotations, + ruleGroupsPerPage, + alertGroupsPerPage, } = useSettings(); const dispatch = useAppDispatch(); @@ -29,82 +39,126 @@ const SettingsMenu: FC = () => { - -
    - - dispatch( - updateSettings({ useLocalTime: event.currentTarget.checked }) - ) - } - /> -
    + + +
    + + dispatch( + updateSettings({ + useLocalTime: event.currentTarget.checked, + }) + ) + } + /> +
    -
    - - - dispatch( - updateSettings({ - enableQueryHistory: event.currentTarget.checked, - }) - ) - } - /> - - dispatch( - updateSettings({ - enableAutocomplete: event.currentTarget.checked, - }) - ) - } - /> - - dispatch( - updateSettings({ - enableSyntaxHighlighting: event.currentTarget.checked, - }) - ) - } - /> - - dispatch( - updateSettings({ - enableLinter: event.currentTarget.checked, - }) - ) - } - /> - -
    +
    + + + dispatch( + updateSettings({ + enableQueryHistory: event.currentTarget.checked, + }) + ) + } + /> + + dispatch( + updateSettings({ + enableAutocomplete: event.currentTarget.checked, + }) + ) + } + /> + + dispatch( + updateSettings({ + enableSyntaxHighlighting: event.currentTarget.checked, + }) + ) + } + /> + + dispatch( + updateSettings({ + enableLinter: event.currentTarget.checked, + }) + ) + } + /> + +
    +
    -
    - - dispatch( - updateSettings({ - showAnnotations: event.currentTarget.checked, - }) - ) - } - /> -
    -
    + +
    + + dispatch( + updateSettings({ + showAnnotations: event.currentTarget.checked, + }) + ) + } + /> +
    +
    + { + if (typeof value !== "number") { + return; + } + + dispatch( + updateSettings({ + alertGroupsPerPage: value, + }) + ); + }} + /> +
    +
    + { + if (typeof value !== "number") { + return; + } + + dispatch( + updateSettings({ + ruleGroupsPerPage: value, + }) + ); + }} + /> +
    +
    +
    ); diff --git a/web/ui/mantine-ui/src/pages/AlertsPage.tsx b/web/ui/mantine-ui/src/pages/AlertsPage.tsx index 1513f73d58..3143f0b41d 100644 --- a/web/ui/mantine-ui/src/pages/AlertsPage.tsx +++ b/web/ui/mantine-ui/src/pages/AlertsPage.tsx @@ -11,6 +11,7 @@ import { Alert, TextInput, Anchor, + Pagination, } from "@mantine/core"; import { useSuspenseAPIQuery } from "../api/api"; import { AlertingRule, AlertingRulesResult } from "../api/responseTypes/rules"; @@ -18,7 +19,7 @@ import badgeClasses from "../Badge.module.css"; import panelClasses from "../Panel.module.css"; import RuleDefinition from "../components/RuleDefinition"; import { humanizeDurationRelative, now } from "../lib/formatTime"; -import { Fragment, useMemo } from "react"; +import { Fragment, useEffect, useMemo } from "react"; import { StateMultiSelect } from "../components/StateMultiSelect"; import { IconInfoCircle, IconSearch } from "@tabler/icons-react"; import { LabelBadges } from "../components/LabelBadges"; @@ -26,6 +27,7 @@ import { useSettings } from "../state/settingsSlice"; import { ArrayParam, BooleanParam, + NumberParam, StringParam, useQueryParam, withDefault, @@ -33,6 +35,7 @@ import { import { useDebouncedValue } from "@mantine/hooks"; import { KVSearch } from "@nexucis/kvsearch"; import { inputIconStyle } from "../styles"; +import CustomInfiniteScroll from "../components/CustomInfiniteScroll"; type AlertsPageData = { // How many rules are in each state across all groups. @@ -132,6 +135,12 @@ const buildAlertsPageData = ( return pageData; }; +// Should be defined as a constant here instead of inline as a value +// to avoid unnecessary re-renders. Otherwise the empty array has +// a different reference on each render and causes subsequent memoized +// computations to re-run as long as no state filter is selected. +const emptyStateFilter: string[] = []; + export default function AlertsPage() { // Fetch the alerting rules data. const { data } = useSuspenseAPIQuery({ @@ -146,7 +155,7 @@ export default function AlertsPage() { // Define URL query params. const [stateFilter, setStateFilter] = useQueryParam( "state", - withDefault(ArrayParam, []) + withDefault(ArrayParam, emptyStateFilter) ); const [searchFilter, setSearchFilter] = useQueryParam( "search", @@ -158,132 +167,117 @@ export default function AlertsPage() { withDefault(BooleanParam, true) ); + const { alertGroupsPerPage } = useSettings(); + const [activePage, setActivePage] = useQueryParam( + "page", + withDefault(NumberParam, 1) + ); + // Update the page data whenever the fetched data or filters change. const alertsPageData: AlertsPageData = useMemo( () => buildAlertsPageData(data.data, debouncedSearch, stateFilter), [data, stateFilter, debouncedSearch] ); - const shownGroups = showEmptyGroups - ? alertsPageData.groups - : alertsPageData.groups.filter((g) => g.rules.length > 0); + const shownGroups = useMemo( + () => + showEmptyGroups + ? alertsPageData.groups + : alertsPageData.groups.filter((g) => g.rules.length > 0), + [alertsPageData.groups, showEmptyGroups] + ); - return ( - - - - o === "inactive" - ? badgeClasses.healthOk - : o === "pending" - ? badgeClasses.healthWarn - : badgeClasses.healthErr - } - optionCount={(o) => - alertsPageData.globalCounts[ - o as keyof typeof alertsPageData.globalCounts - ] - } - placeholder="Filter by rule state" - values={(stateFilter?.filter((v) => v !== null) as string[]) || []} - onChange={(values) => setStateFilter(values)} - /> - } - placeholder="Filter by rule name or labels" - value={searchFilter || ""} - onChange={(event) => - setSearchFilter(event.currentTarget.value || null) - } - > - - {alertsPageData.groups.length === 0 ? ( - }> - No rules found. - - ) : ( - !showEmptyGroups && - alertsPageData.groups.length !== shownGroups.length && ( - } - > - Hiding {alertsPageData.groups.length - shownGroups.length} empty - groups due to filters or no rules. - setShowEmptyGroups(true)}> - Show empty groups - - - ) - )} - - {shownGroups.map((g, i) => { - return ( - - - - - {g.name} - - - {g.file} - - - - {g.counts.firing > 0 && ( - - firing ({g.counts.firing}) - - )} - {g.counts.pending > 0 && ( - - pending ({g.counts.pending}) - - )} - {g.counts.inactive > 0 && ( - - inactive ({g.counts.inactive}) - - )} - - - {g.counts.total === 0 ? ( - }> - No rules in this group. - setShowEmptyGroups(false)} - > - Hide empty groups - - - ) : g.rules.length === 0 ? ( - }> - No rules in this group match your filter criteria (omitted{" "} - {g.counts.total} filtered rules). - setShowEmptyGroups(false)} - > - Hide empty groups - - - ) : ( + // If we were e.g. on page 10 and the number of total pages decreases to 5 (due to filtering + // or changing the max number of items per page), go to the largest possible page. + const totalPageCount = Math.ceil(shownGroups.length / alertGroupsPerPage); + const effectiveActivePage = Math.max(1, Math.min(activePage, totalPageCount)); + + useEffect(() => { + if (effectiveActivePage !== activePage) { + setActivePage(effectiveActivePage); + } + }, [effectiveActivePage, activePage, setActivePage]); + + const currentPageGroups = useMemo( + () => + shownGroups.slice( + (effectiveActivePage - 1) * alertGroupsPerPage, + effectiveActivePage * alertGroupsPerPage + ), + [shownGroups, effectiveActivePage, alertGroupsPerPage] + ); + + // We memoize the actual rendering of the page items to avoid re-rendering + // them on every state change. This is especially important when the user + // types into the search box, as the search filter changes on every keystroke, + // even before debouncing takes place (extracting the filters and results list + // into separate components would be an alternative to this, but it's kinda + // convenient to have in the same file IMO). + const renderedPageItems = useMemo( + () => + currentPageGroups.map((g, i) => ( + + + + + {g.name} + + + {g.file} + + + + {g.counts.firing > 0 && ( + + firing ({g.counts.firing}) + + )} + {g.counts.pending > 0 && ( + + pending ({g.counts.pending}) + + )} + {g.counts.inactive > 0 && ( + + inactive ({g.counts.inactive}) + + )} + + + {g.counts.total === 0 ? ( + }> + No rules in this group. + setShowEmptyGroups(false)} + > + Hide empty groups + + + ) : g.rules.length === 0 ? ( + }> + No rules in this group match your filter criteria (omitted{" "} + {g.counts.total} filtered rules). + setShowEmptyGroups(false)} + > + Hide empty groups + + + ) : ( + ( - {g.rules.map((r, j) => { + {items.map((r, j) => { return ( 0 && (
    - + Alert labels State Active Since @@ -405,9 +399,71 @@ export default function AlertsPage() { })} )} - - ); - })} + /> + )} + + )), + [currentPageGroups, showAnnotations, setShowEmptyGroups] + ); + + return ( + + + + o === "inactive" + ? badgeClasses.healthOk + : o === "pending" + ? badgeClasses.healthWarn + : badgeClasses.healthErr + } + optionCount={(o) => + alertsPageData.globalCounts[ + o as keyof typeof alertsPageData.globalCounts + ] + } + placeholder="Filter by rule state" + values={(stateFilter?.filter((v) => v !== null) as string[]) || []} + onChange={(values) => setStateFilter(values)} + /> + } + placeholder="Filter by rule name or labels" + value={searchFilter || ""} + onChange={(event) => + setSearchFilter(event.currentTarget.value || null) + } + > + + {alertsPageData.groups.length === 0 ? ( + }> + No rules found. + + ) : ( + !showEmptyGroups && + alertsPageData.groups.length !== shownGroups.length && ( + } + > + Hiding {alertsPageData.groups.length - shownGroups.length} empty + groups due to filters or no rules. + setShowEmptyGroups(true)}> + Show empty groups + + + ) + )} + + + {renderedPageItems} ); diff --git a/web/ui/mantine-ui/src/pages/RulesPage.tsx b/web/ui/mantine-ui/src/pages/RulesPage.tsx index a4ed44e7c2..0888fe4737 100644 --- a/web/ui/mantine-ui/src/pages/RulesPage.tsx +++ b/web/ui/mantine-ui/src/pages/RulesPage.tsx @@ -4,6 +4,7 @@ import { Badge, Card, Group, + Pagination, rem, Stack, Text, @@ -29,6 +30,10 @@ import { RulesResult } from "../api/responseTypes/rules"; import badgeClasses from "../Badge.module.css"; import RuleDefinition from "../components/RuleDefinition"; import { badgeIconStyle } from "../styles"; +import { NumberParam, useQueryParam, withDefault } from "use-query-params"; +import { useSettings } from "../state/settingsSlice"; +import { useEffect } from "react"; +import CustomInfiniteScroll from "../components/CustomInfiniteScroll"; const healthBadgeClass = (state: string) => { switch (state) { @@ -45,6 +50,23 @@ const healthBadgeClass = (state: string) => { export default function RulesPage() { const { data } = useSuspenseAPIQuery({ path: `/rules` }); + const { ruleGroupsPerPage } = useSettings(); + + const [activePage, setActivePage] = useQueryParam( + "page", + withDefault(NumberParam, 1) + ); + + // If we were e.g. on page 10 and the number of total pages decreases to 5 (due + // changing the max number of items per page), go to the largest possible page. + const totalPageCount = Math.ceil(data.data.groups.length / ruleGroupsPerPage); + const effectiveActivePage = Math.max(1, Math.min(activePage, totalPageCount)); + + useEffect(() => { + if (effectiveActivePage !== activePage) { + setActivePage(effectiveActivePage); + } + }, [effectiveActivePage, activePage, setActivePage]); return ( @@ -53,157 +75,178 @@ export default function RulesPage() { No rule groups configured. )} - {data.data.groups.map((g, i) => ( - - - - - {g.name} - - - {g.file} - + + {data.data.groups + .slice( + (effectiveActivePage - 1) * ruleGroupsPerPage, + effectiveActivePage * ruleGroupsPerPage + ) + .map((g, i) => ( + + + + + {g.name} + + + {g.file} + + + + + } + > + last run {humanizeDurationRelative(g.lastEvaluation, now())} + + + + } + > + took {humanizeDuration(parseFloat(g.evaluationTime) * 1000)} + + + + } + > + every {humanizeDuration(parseFloat(g.interval) * 1000)}{" "} + + + - - - } - > - last run {humanizeDurationRelative(g.lastEvaluation, now())} - - - - } - > - took {humanizeDuration(parseFloat(g.evaluationTime) * 1000)} - - - - } - > - every {humanizeDuration(parseFloat(g.interval) * 1000)}{" "} - - - - - {g.rules.length === 0 && ( - }> - No rules in rule group. - - )} - - {g.rules.map((r, j) => ( - - - - - {r.type === "alerting" ? ( - - - - ) : ( - - - - )} - {r.name} - - - - - } - > - {humanizeDurationRelative(r.lastEvaluation, now())} - - - - - - } - > - {humanizeDuration( - parseFloat(r.evaluationTime) * 1000 - )} - - - - - {r.health} - - - - - - - {r.lastError && ( - } + {g.rules.length === 0 && ( + }> + No rules in rule group. + + )} + ( + + {items.map((r, j) => ( + - Error: {r.lastError} - - )} - - - ))} - - - ))} + + + + {r.type === "alerting" ? ( + + + + ) : ( + + + + )} + {r.name} + + + + + + } + > + {humanizeDurationRelative( + r.lastEvaluation, + now() + )} + + + + + + } + > + {humanizeDuration( + parseFloat(r.evaluationTime) * 1000 + )} + + + + + {r.health} + + + + + + + {r.lastError && ( + } + > + Error: {r.lastError} + + )} + + + ))} + + )} + /> + + ))} ); } diff --git a/web/ui/mantine-ui/src/state/localStorageMiddleware.ts b/web/ui/mantine-ui/src/state/localStorageMiddleware.ts index 93eae950ea..79baa5ac64 100644 --- a/web/ui/mantine-ui/src/state/localStorageMiddleware.ts +++ b/web/ui/mantine-ui/src/state/localStorageMiddleware.ts @@ -63,6 +63,7 @@ startAppListening({ case "enableSyntaxHighlighting": case "enableLinter": case "showAnnotations": + case "ruleGroupsPerPage": return persistToLocalStorage(`settings.${key}`, value); } }); diff --git a/web/ui/mantine-ui/src/state/settingsSlice.ts b/web/ui/mantine-ui/src/state/settingsSlice.ts index ea744e0149..c4154b7253 100644 --- a/web/ui/mantine-ui/src/state/settingsSlice.ts +++ b/web/ui/mantine-ui/src/state/settingsSlice.ts @@ -14,6 +14,8 @@ interface Settings { enableSyntaxHighlighting: boolean; enableLinter: boolean; showAnnotations: boolean; + ruleGroupsPerPage: number; + alertGroupsPerPage: number; } // Declared/defined in public/index.html, value replaced by Prometheus when serving bundle. @@ -29,6 +31,8 @@ export const localStorageKeyEnableSyntaxHighlighting = "settings.enableSyntaxHighlighting"; export const localStorageKeyEnableLinter = "settings.enableLinter"; export const localStorageKeyShowAnnotations = "settings.showAnnotations"; +export const localStorageKeyRuleGroupsPerPage = "settings.ruleGroupsPerPage"; +export const localStorageKeyAlertGroupsPerPage = "settings.alertGroupsPerPage"; // This dynamically/generically determines the pathPrefix by stripping the first known // endpoint suffix from the window location path. It works out of the box for both direct @@ -95,6 +99,14 @@ export const initialState: Settings = { localStorageKeyShowAnnotations, true ), + ruleGroupsPerPage: initializeFromLocalStorage( + localStorageKeyRuleGroupsPerPage, + 10 + ), + alertGroupsPerPage: initializeFromLocalStorage( + localStorageKeyAlertGroupsPerPage, + 10 + ), }; export const settingsSlice = createSlice({ From ac4f8a5e23e6c03a12e694313cb7eac4f58e4296 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 16 Dec 2024 09:42:52 +0000 Subject: [PATCH 1143/1397] [ENHANCEMENT] TSDB: Improve calculation of space used by labels (#13880) * [ENHANCEMENT] TSDB: Improve calculation of space used by labels The labels for each series in the Head take up some some space in the Postings index, but far more space in the `memSeries` structure. Instead of having the Postings index calculate this overhead, which is a layering violation, have the caller pass in a function to do it. Provide three implementations of this function for the three Labels versions. Signed-off-by: Bryan Boreham --- model/labels/labels.go | 6 ++++++ model/labels/labels_dedupelabels.go | 5 +++++ model/labels/labels_stringlabels.go | 5 +++++ tsdb/head.go | 2 +- tsdb/index/postings.go | 5 +++-- tsdb/index/postings_test.go | 7 ++++--- 6 files changed, 24 insertions(+), 6 deletions(-) diff --git a/model/labels/labels.go b/model/labels/labels.go index f4de7496ce..0747ab90d9 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -19,6 +19,7 @@ import ( "bytes" "slices" "strings" + "unsafe" "github.com/cespare/xxhash/v2" ) @@ -488,3 +489,8 @@ func (b *ScratchBuilder) Labels() Labels { func (b *ScratchBuilder) Overwrite(ls *Labels) { *ls = append((*ls)[:0], b.add...) } + +// SizeOfLabels returns the approximate space required for n copies of a label. +func SizeOfLabels(name, value string, n uint64) uint64 { + return (uint64(len(name)) + uint64(unsafe.Sizeof(name)) + uint64(len(value)) + uint64(unsafe.Sizeof(value))) * n +} diff --git a/model/labels/labels_dedupelabels.go b/model/labels/labels_dedupelabels.go index da8a88cc15..a0d83e0044 100644 --- a/model/labels/labels_dedupelabels.go +++ b/model/labels/labels_dedupelabels.go @@ -815,3 +815,8 @@ func (b *ScratchBuilder) Overwrite(ls *Labels) { ls.syms = b.syms.nameTable ls.data = yoloString(b.overwriteBuffer) } + +// SizeOfLabels returns the approximate space required for n copies of a label. +func SizeOfLabels(name, value string, n uint64) uint64 { + return uint64(len(name)+len(value)) + n*4 // Assuming most symbol-table entries are 2 bytes long. +} diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index c64bb990e0..f49ed96f65 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -691,3 +691,8 @@ func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder { func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) { // no-op } + +// SizeOfLabels returns the approximate space required for n copies of a label. +func SizeOfLabels(name, value string, n uint64) uint64 { + return uint64(labelSize(&Label{Name: name, Value: value})) * n +} diff --git a/tsdb/head.go b/tsdb/head.go index c67c438e52..47f85d7713 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1048,7 +1048,7 @@ func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *ind return h.cardinalityCache } h.cardinalityCacheKey = cacheKey - h.cardinalityCache = h.postings.Stats(statsByLabelName, limit) + h.cardinalityCache = h.postings.Stats(statsByLabelName, limit, labels.SizeOfLabels) h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second return h.cardinalityCache diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index ea32ba5632..f9a284bc70 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -190,7 +190,8 @@ type PostingsStats struct { } // Stats calculates the cardinality statistics from postings. -func (p *MemPostings) Stats(label string, limit int) *PostingsStats { +// Caller can pass in a function which computes the space required for n series with a given label. +func (p *MemPostings) Stats(label string, limit int, labelSizeFunc func(string, string, uint64) uint64) *PostingsStats { var size uint64 p.mtx.RLock() @@ -218,7 +219,7 @@ func (p *MemPostings) Stats(label string, limit int) *PostingsStats { } seriesCnt := uint64(len(values)) labelValuePairs.push(Stat{Name: n + "=" + name, Count: seriesCnt}) - size += uint64(len(name)) * seriesCnt + size += labelSizeFunc(n, name, seriesCnt) } labelValueLength.push(Stat{Name: n, Count: size}) } diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 6ff5b9c060..6dd9f25bc0 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -939,7 +939,7 @@ func BenchmarkPostings_Stats(b *testing.B) { } b.ResetTimer() for n := 0; n < b.N; n++ { - p.Stats("__name__", 10) + p.Stats("__name__", 10, labels.SizeOfLabels) } } @@ -954,7 +954,8 @@ func TestMemPostingsStats(t *testing.T) { p.Add(2, labels.FromStrings("label", "value1")) // call the Stats method to calculate the cardinality statistics - stats := p.Stats("label", 10) + // passing a fake calculation so we get the same result regardless of compilation -tags. + stats := p.Stats("label", 10, func(name, value string, n uint64) uint64 { return uint64(len(name)+len(value)) * n }) // assert that the expected statistics were calculated require.Equal(t, uint64(2), stats.CardinalityMetricsStats[0].Count) @@ -963,7 +964,7 @@ func TestMemPostingsStats(t *testing.T) { require.Equal(t, uint64(3), stats.CardinalityLabelStats[0].Count) require.Equal(t, "label", stats.CardinalityLabelStats[0].Name) - require.Equal(t, uint64(24), stats.LabelValueStats[0].Count) + require.Equal(t, uint64(44), stats.LabelValueStats[0].Count) require.Equal(t, "label", stats.LabelValueStats[0].Name) require.Equal(t, uint64(2), stats.LabelValuePairsStats[0].Count) From bad1c75514589c002b71bdb17abf3fbfb922f6a7 Mon Sep 17 00:00:00 2001 From: Joel Beckmeyer Date: Fri, 13 Dec 2024 20:54:34 -0500 Subject: [PATCH 1144/1397] fix alignment of atomic uint64 on 32-bit Signed-off-by: Joel Beckmeyer --- util/testutil/context.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/testutil/context.go b/util/testutil/context.go index 0c9e0f6f64..ea4b0e3746 100644 --- a/util/testutil/context.go +++ b/util/testutil/context.go @@ -49,8 +49,8 @@ func (c *MockContext) Value(interface{}) interface{} { // MockContextErrAfter is a MockContext that will return an error after a certain // number of calls to Err(). type MockContextErrAfter struct { + count atomic.Uint64 MockContext - count atomic.Uint64 FailAfter uint64 } From bdace977445a004862b0b82c5d15992703fd16ab Mon Sep 17 00:00:00 2001 From: Joel Beckmeyer Date: Fri, 6 Dec 2024 15:57:35 -0500 Subject: [PATCH 1145/1397] fix TestCuttingNewHeadChunks/really_large_histograms on 32-bit Signed-off-by: Joel Beckmeyer --- model/histogram/test_utils.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/model/histogram/test_utils.go b/model/histogram/test_utils.go index 9e9a711c29..e6b33863bd 100644 --- a/model/histogram/test_utils.go +++ b/model/histogram/test_utils.go @@ -19,12 +19,12 @@ func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram { bucketsPerSide := numBuckets / 2 spanLength := uint32(bucketsPerSide / numSpans) // Given all bucket deltas are 1, sum bucketsPerSide + 1. - observationCount := bucketsPerSide * (1 + bucketsPerSide) + observationCount := uint64(bucketsPerSide) * (1 + uint64(bucketsPerSide)) var histograms []*Histogram for i := 0; i < numHistograms; i++ { h := &Histogram{ - Count: uint64(i + observationCount), + Count: uint64(i) + observationCount, ZeroCount: uint64(i), ZeroThreshold: 1e-128, Sum: 18.4 * float64(i+1), From 39f5a07236914ad0cf6c09d934f37ea7deda9886 Mon Sep 17 00:00:00 2001 From: Joel Beckmeyer Date: Sun, 15 Dec 2024 17:53:36 -0500 Subject: [PATCH 1146/1397] fix TestOOOHeadChunkReader_Chunk on 32-bit Signed-off-by: Joel Beckmeyer --- scrape/manager_test.go | 2 +- storage/merge_test.go | 4 ++-- tsdb/db_test.go | 40 +++++++++++++++++++------------------- tsdb/head_test.go | 4 ++-- tsdb/ooo_head_test.go | 8 ++++---- tsdb/querier_test.go | 12 ++++++------ tsdb/testutil.go | 16 +++++++-------- tsdb/tsdbutil/histogram.go | 28 +++++++++++++------------- 8 files changed, 57 insertions(+), 57 deletions(-) diff --git a/scrape/manager_test.go b/scrape/manager_test.go index 0f1b9fe692..b9c6f4c40e 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -894,7 +894,7 @@ func findSamplesForMetric(floats []floatSample, metricName string) (ret []floatS // generateTestHistogram generates the same thing as tsdbutil.GenerateTestHistogram, // but in the form of dto.Histogram. func generateTestHistogram(i int) *dto.Histogram { - helper := tsdbutil.GenerateTestHistogram(i) + helper := tsdbutil.GenerateTestHistogram(int64(i)) h := &dto.Histogram{} h.SampleCount = proto.Uint64(helper.Count) h.SampleSum = proto.Float64(helper.Sum) diff --git a/storage/merge_test.go b/storage/merge_test.go index 04d4e92078..92d951b839 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -385,13 +385,13 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) { } func histogramSample(ts int64, hint histogram.CounterResetHint) hSample { - h := tsdbutil.GenerateTestHistogram(int(ts + 1)) + h := tsdbutil.GenerateTestHistogram(ts + 1) h.CounterResetHint = hint return hSample{t: ts, h: h} } func floatHistogramSample(ts int64, hint histogram.CounterResetHint) fhSample { - fh := tsdbutil.GenerateTestFloatHistogram(int(ts + 1)) + fh := tsdbutil.GenerateTestFloatHistogram(ts + 1) fh.CounterResetHint = hint return fhSample{t: ts, fh: fh} } diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 306dc4579e..6e1f6c54b3 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -4101,7 +4101,7 @@ func TestOOOWALWrite(t *testing.T) { }, "integer histogram": { appendSample: func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) { - seriesRef, err := app.AppendHistogram(0, l, minutes(mins), tsdbutil.GenerateTestHistogram(int(mins)), nil) + seriesRef, err := app.AppendHistogram(0, l, minutes(mins), tsdbutil.GenerateTestHistogram(mins), nil) require.NoError(t, err) return seriesRef, nil }, @@ -4192,7 +4192,7 @@ func TestOOOWALWrite(t *testing.T) { }, "float histogram": { appendSample: func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) { - seriesRef, err := app.AppendHistogram(0, l, minutes(mins), nil, tsdbutil.GenerateTestFloatHistogram(int(mins))) + seriesRef, err := app.AppendHistogram(0, l, minutes(mins), nil, tsdbutil.GenerateTestFloatHistogram(mins)) require.NoError(t, err) return seriesRef, nil }, @@ -4736,12 +4736,12 @@ func TestMultipleEncodingsCommitOrder(t *testing.T) { return sample{t: ts, f: float64(ts)} } if valType == chunkenc.ValHistogram { - h := tsdbutil.GenerateTestHistogram(int(ts)) + h := tsdbutil.GenerateTestHistogram(ts) _, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) require.NoError(t, err) return sample{t: ts, h: h} } - fh := tsdbutil.GenerateTestFloatHistogram(int(ts)) + fh := tsdbutil.GenerateTestFloatHistogram(ts) _, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh) require.NoError(t, err) return sample{t: ts, fh: fh} @@ -5427,37 +5427,37 @@ func TestQuerierOOOQuery(t *testing.T) { }, "integer histogram": { appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { - h := tsdbutil.GenerateTestHistogram(int(ts)) + h := tsdbutil.GenerateTestHistogram(ts) if counterReset { h.CounterResetHint = histogram.CounterReset } return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) }, sampleFunc: func(ts int64) chunks.Sample { - return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))} + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)} }, }, "float histogram": { appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { - fh := tsdbutil.GenerateTestFloatHistogram(int(ts)) + fh := tsdbutil.GenerateTestFloatHistogram(ts) if counterReset { fh.CounterResetHint = histogram.CounterReset } return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh) }, sampleFunc: func(ts int64) chunks.Sample { - return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts))} + return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(ts)} }, }, "integer histogram counter resets": { // Adding counter reset to all histograms means each histogram will have its own chunk. appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { - h := tsdbutil.GenerateTestHistogram(int(ts)) + h := tsdbutil.GenerateTestHistogram(ts) h.CounterResetHint = histogram.CounterReset // For this scenario, ignore the counterReset argument. return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) }, sampleFunc: func(ts int64) chunks.Sample { - return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))} + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)} }, }, } @@ -5743,37 +5743,37 @@ func TestChunkQuerierOOOQuery(t *testing.T) { }, "integer histogram": { appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { - h := tsdbutil.GenerateTestHistogram(int(ts)) + h := tsdbutil.GenerateTestHistogram(ts) if counterReset { h.CounterResetHint = histogram.CounterReset } return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) }, sampleFunc: func(ts int64) chunks.Sample { - return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))} + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)} }, }, "float histogram": { appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { - fh := tsdbutil.GenerateTestFloatHistogram(int(ts)) + fh := tsdbutil.GenerateTestFloatHistogram(ts) if counterReset { fh.CounterResetHint = histogram.CounterReset } return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh) }, sampleFunc: func(ts int64) chunks.Sample { - return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts))} + return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(ts)} }, }, "integer histogram counter resets": { // Adding counter reset to all histograms means each histogram will have its own chunk. appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { - h := tsdbutil.GenerateTestHistogram(int(ts)) + h := tsdbutil.GenerateTestHistogram(ts) h.CounterResetHint = histogram.CounterReset // For this scenario, ignore the counterReset argument. return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) }, sampleFunc: func(ts int64) chunks.Sample { - return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))} + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)} }, }, "integer histogram with recode": { @@ -6908,7 +6908,7 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) { app := db.Appender(context.Background()) tsMs := ts * time.Minute.Milliseconds() if floatHistogram { - h := tsdbutil.GenerateTestFloatHistogram(val) + h := tsdbutil.GenerateTestFloatHistogram(int64(val)) h.CounterResetHint = hint _, err = app.AppendHistogram(0, l, tsMs, nil, h) require.NoError(t, err) @@ -6916,7 +6916,7 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) { return sample{t: tsMs, fh: h.Copy()} } - h := tsdbutil.GenerateTestHistogram(val) + h := tsdbutil.GenerateTestHistogram(int64(val)) h.CounterResetHint = hint _, err = app.AppendHistogram(0, l, tsMs, h, nil) require.NoError(t, err) @@ -7267,14 +7267,14 @@ func TestInterleavedInOrderAndOOOHistogramCompactionWithCounterResets(t *testing app := db.Appender(context.Background()) tsMs := ts if floatHistogram { - h := tsdbutil.GenerateTestFloatHistogram(val) + h := tsdbutil.GenerateTestFloatHistogram(int64(val)) _, err = app.AppendHistogram(0, l, tsMs, nil, h) require.NoError(t, err) require.NoError(t, app.Commit()) return sample{t: tsMs, fh: h.Copy()} } - h := tsdbutil.GenerateTestHistogram(val) + h := tsdbutil.GenerateTestHistogram(int64(val)) _, err = app.AppendHistogram(0, l, tsMs, h, nil) require.NoError(t, err) require.NoError(t, app.Commit()) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 2ca3aeffc7..fb158b593c 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -4732,7 +4732,7 @@ func TestOOOHistogramCounterResetHeaders(t *testing.T) { // OOO histogram for i := 1; i <= 5; i++ { - appendHistogram(100+int64(i), tsdbutil.GenerateTestHistogram(1000+i)) + appendHistogram(100+int64(i), tsdbutil.GenerateTestHistogram(1000+int64(i))) } // Nothing mmapped yet. checkOOOExpCounterResetHeader() @@ -4820,7 +4820,7 @@ func TestOOOHistogramCounterResetHeaders(t *testing.T) { appendHistogram(300, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(3000))) for i := 1; i <= 4; i++ { - appendHistogram(300+int64(i), tsdbutil.GenerateTestHistogram(3000+i)) + appendHistogram(300+int64(i), tsdbutil.GenerateTestHistogram(3000+int64(i))) } // One mmapped chunk with (ts, val) [(300, 3000), (301, 3001), (302, 3002), (303, 3003), (350, 4000)]. diff --git a/tsdb/ooo_head_test.go b/tsdb/ooo_head_test.go index 37a46e76da..203330fcd8 100644 --- a/tsdb/ooo_head_test.go +++ b/tsdb/ooo_head_test.go @@ -54,12 +54,12 @@ func TestOOOInsert(t *testing.T) { }, "integer histogram": { sampleFunc: func(ts int64) sample { - return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))} + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)} }, }, "float histogram": { sampleFunc: func(ts int64) sample { - return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts))} + return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(ts)} }, }, } @@ -118,12 +118,12 @@ func TestOOOInsertDuplicate(t *testing.T) { }, "integer histogram": { sampleFunc: func(ts int64) sample { - return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))} + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)} }, }, "float histogram": { sampleFunc: func(ts int64) sample { - return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts))} + return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(ts)} }, }, } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 482907757a..73788f061b 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -1826,12 +1826,12 @@ func checkCurrVal(t *testing.T, valType chunkenc.ValueType, it *populateWithDelS ts, h := it.AtHistogram(nil) require.Equal(t, int64(expectedTs), ts) h.CounterResetHint = histogram.UnknownCounterReset - require.Equal(t, tsdbutil.GenerateTestHistogram(expectedValue), h) + require.Equal(t, tsdbutil.GenerateTestHistogram(int64(expectedValue)), h) case chunkenc.ValFloatHistogram: ts, h := it.AtFloatHistogram(nil) require.Equal(t, int64(expectedTs), ts) h.CounterResetHint = histogram.UnknownCounterReset - require.Equal(t, tsdbutil.GenerateTestFloatHistogram(expectedValue), h) + require.Equal(t, tsdbutil.GenerateTestFloatHistogram(int64(expectedValue)), h) default: panic("unexpected value type") } @@ -3579,16 +3579,16 @@ func TestQueryWithDeletedHistograms(t *testing.T) { ctx := context.Background() testcases := map[string]func(int) (*histogram.Histogram, *histogram.FloatHistogram){ "intCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) { - return tsdbutil.GenerateTestHistogram(i), nil + return tsdbutil.GenerateTestHistogram(int64(i)), nil }, "intgauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) { - return tsdbutil.GenerateTestGaugeHistogram(rand.Int() % 1000), nil + return tsdbutil.GenerateTestGaugeHistogram(rand.Int63() % 1000), nil }, "floatCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) { - return nil, tsdbutil.GenerateTestFloatHistogram(i) + return nil, tsdbutil.GenerateTestFloatHistogram(int64(i)) }, "floatGauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) { - return nil, tsdbutil.GenerateTestGaugeFloatHistogram(rand.Int() % 1000) + return nil, tsdbutil.GenerateTestGaugeFloatHistogram(rand.Int63() % 1000) }, } diff --git a/tsdb/testutil.go b/tsdb/testutil.go index c39eb133c7..57516c6271 100644 --- a/tsdb/testutil.go +++ b/tsdb/testutil.go @@ -63,45 +63,45 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{ intHistogram: { sampleType: sampleMetricTypeHistogram, appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { - s := sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))} + s := sample{t: ts, h: tsdbutil.GenerateTestHistogram(value)} ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil) return ref, s, err }, sampleFunc: func(ts, value int64) sample { - return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))} + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(value)} }, }, floatHistogram: { sampleType: sampleMetricTypeHistogram, appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { - s := sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))} + s := sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(value)} ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh) return ref, s, err }, sampleFunc: func(ts, value int64) sample { - return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))} + return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(value)} }, }, gaugeIntHistogram: { sampleType: sampleMetricTypeHistogram, appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { - s := sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))} + s := sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(value)} ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil) return ref, s, err }, sampleFunc: func(ts, value int64) sample { - return sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))} + return sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(value)} }, }, gaugeFloatHistogram: { sampleType: sampleMetricTypeHistogram, appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { - s := sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))} + s := sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(value)} ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh) return ref, s, err }, sampleFunc: func(ts, value int64) sample { - return sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))} + return sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(value)} }, }, } diff --git a/tsdb/tsdbutil/histogram.go b/tsdb/tsdbutil/histogram.go index ce934a638d..60c3e5f726 100644 --- a/tsdb/tsdbutil/histogram.go +++ b/tsdb/tsdbutil/histogram.go @@ -21,7 +21,7 @@ import ( func GenerateTestHistograms(n int) (r []*histogram.Histogram) { for i := 0; i < n; i++ { - h := GenerateTestHistogram(i) + h := GenerateTestHistogram(int64(i)) if i > 0 { h.CounterResetHint = histogram.NotCounterReset } @@ -31,13 +31,13 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) { } func GenerateTestHistogramWithHint(n int, hint histogram.CounterResetHint) *histogram.Histogram { - h := GenerateTestHistogram(n) + h := GenerateTestHistogram(int64(n)) h.CounterResetHint = hint return h } // GenerateTestHistogram but it is up to the user to set any known counter reset hint. -func GenerateTestHistogram(i int) *histogram.Histogram { +func GenerateTestHistogram(i int64) *histogram.Histogram { return &histogram.Histogram{ Count: 12 + uint64(i*9), ZeroCount: 2 + uint64(i), @@ -48,16 +48,16 @@ func GenerateTestHistogram(i int) *histogram.Histogram { {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, - PositiveBuckets: []int64{int64(i + 1), 1, -1, 0}, + PositiveBuckets: []int64{i + 1, 1, -1, 0}, NegativeSpans: []histogram.Span{ {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, - NegativeBuckets: []int64{int64(i + 1), 1, -1, 0}, + NegativeBuckets: []int64{i + 1, 1, -1, 0}, } } -func GenerateTestCustomBucketsHistogram(i int) *histogram.Histogram { +func GenerateTestCustomBucketsHistogram(i int64) *histogram.Histogram { return &histogram.Histogram{ Count: 5 + uint64(i*4), Sum: 18.4 * float64(i+1), @@ -66,20 +66,20 @@ func GenerateTestCustomBucketsHistogram(i int) *histogram.Histogram { {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, - PositiveBuckets: []int64{int64(i + 1), 1, -1, 0}, + PositiveBuckets: []int64{i + 1, 1, -1, 0}, CustomValues: []float64{0, 1, 2, 3, 4}, } } func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) { for x := 0; x < n; x++ { - i := int(math.Sin(float64(x))*100) + 100 + i := int64(math.Sin(float64(x))*100) + 100 r = append(r, GenerateTestGaugeHistogram(i)) } return r } -func GenerateTestGaugeHistogram(i int) *histogram.Histogram { +func GenerateTestGaugeHistogram(i int64) *histogram.Histogram { h := GenerateTestHistogram(i) h.CounterResetHint = histogram.GaugeType return h @@ -87,7 +87,7 @@ func GenerateTestGaugeHistogram(i int) *histogram.Histogram { func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) { for i := 0; i < n; i++ { - h := GenerateTestFloatHistogram(i) + h := GenerateTestFloatHistogram(int64(i)) if i > 0 { h.CounterResetHint = histogram.NotCounterReset } @@ -97,7 +97,7 @@ func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) { } // GenerateTestFloatHistogram but it is up to the user to set any known counter reset hint. -func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram { +func GenerateTestFloatHistogram(i int64) *histogram.FloatHistogram { return &histogram.FloatHistogram{ Count: 12 + float64(i*9), ZeroCount: 2 + float64(i), @@ -117,7 +117,7 @@ func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram { } } -func GenerateTestCustomBucketsFloatHistogram(i int) *histogram.FloatHistogram { +func GenerateTestCustomBucketsFloatHistogram(i int64) *histogram.FloatHistogram { return &histogram.FloatHistogram{ Count: 5 + float64(i*4), Sum: 18.4 * float64(i+1), @@ -133,13 +133,13 @@ func GenerateTestCustomBucketsFloatHistogram(i int) *histogram.FloatHistogram { func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) { for x := 0; x < n; x++ { - i := int(math.Sin(float64(x))*100) + 100 + i := int64(math.Sin(float64(x))*100) + 100 r = append(r, GenerateTestGaugeFloatHistogram(i)) } return r } -func GenerateTestGaugeFloatHistogram(i int) *histogram.FloatHistogram { +func GenerateTestGaugeFloatHistogram(i int64) *histogram.FloatHistogram { h := GenerateTestFloatHistogram(i) h.CounterResetHint = histogram.GaugeType return h From c8c128b0f1c264455c2f7bc048c2492a54665bf5 Mon Sep 17 00:00:00 2001 From: Joel Beckmeyer Date: Mon, 16 Dec 2024 08:41:50 -0500 Subject: [PATCH 1147/1397] fix TestDropOldTimeSeries on 32-bit Signed-off-by: Joel Beckmeyer --- storage/remote/queue_manager_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 8369da3f12..202c71c348 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -2077,7 +2077,7 @@ func createTimeseriesWithOldSamples(numSamples, numSeries int, extraLabels ...la for j := 0; j < numSamples/2; j++ { sample := record.RefSample{ Ref: chunks.HeadSeriesRef(i), - T: int64(int(time.Now().UnixMilli()) + j), + T: time.Now().UnixMilli() + int64(j), V: float64(i), } samples = append(samples, sample) From 41dabfb4646cd0d7111aaebd56b4df75ceae9580 Mon Sep 17 00:00:00 2001 From: Joel Beckmeyer Date: Mon, 16 Dec 2024 10:42:56 -0500 Subject: [PATCH 1148/1397] fix topk/bottomk with numbers greater than int maxsize on 32-bit Signed-off-by: Joel Beckmeyer --- promql/engine.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index 13f8b06972..a23e899d04 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1352,7 +1352,7 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate } groups := make([]groupedAggregation, groupCount) - var k int + var k int64 var ratio float64 var seriess map[uint64]Series switch aggExpr.Op { @@ -1360,9 +1360,9 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate if !convertibleToInt64(param) { ev.errorf("Scalar value %v overflows int64", param) } - k = int(param) - if k > len(inputMatrix) { - k = len(inputMatrix) + k = int64(param) + if k > int64(len(inputMatrix)) { + k = int64(len(inputMatrix)) } if k < 1 { return nil, warnings @@ -3172,7 +3172,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // seriesToResult maps inputMatrix indexes to groups indexes. // For an instant query, returns a Matrix in descending order for topk or ascending for bottomk, or without any order for limitk / limit_ratio. // For a range query, aggregates output in the seriess map. -func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) { +func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int64, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) { op := e.Op var s Sample var annos annotations.Annotations @@ -3243,7 +3243,7 @@ seriesLoop: case s.H != nil: // Ignore histogram sample and add info annotation. annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("topk", e.PosRange)) - case len(group.heap) < k: + case int64(len(group.heap)) < k: heap.Push(&group.heap, &s) case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)): // This new element is bigger than the previous smallest element - overwrite that. @@ -3259,7 +3259,7 @@ seriesLoop: case s.H != nil: // Ignore histogram sample and add info annotation. annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("bottomk", e.PosRange)) - case len(group.heap) < k: + case int64(len(group.heap)) < k: heap.Push((*vectorByReverseValueHeap)(&group.heap), &s) case group.heap[0].F > s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)): // This new element is smaller than the previous biggest element - overwrite that. @@ -3270,13 +3270,13 @@ seriesLoop: } case parser.LIMITK: - if len(group.heap) < k { + if int64(len(group.heap)) < k { heap.Push(&group.heap, &s) } // LIMITK optimization: early break if we've added K elem to _every_ group, // especially useful for large timeseries where the user is exploring labels via e.g. // limitk(10, my_metric) - if !group.groupAggrComplete && len(group.heap) == k { + if !group.groupAggrComplete && int64(len(group.heap)) == k { group.groupAggrComplete = true groupsRemaining-- if groupsRemaining == 0 { From 5271dabb2967cce707159d0dc54a8e60327465ad Mon Sep 17 00:00:00 2001 From: Joel Beckmeyer Date: Mon, 16 Dec 2024 10:43:19 -0500 Subject: [PATCH 1149/1397] run all tests for 386 Signed-off-by: Joel Beckmeyer --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f7703d940d..c0d4302072 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -33,7 +33,7 @@ jobs: - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 - uses: ./.github/promci/actions/setup_environment - run: go test --tags=dedupelabels ./... - - run: GOARCH=386 go test ./cmd/prometheus + - run: GOARCH=386 go test ./... - uses: ./.github/promci/actions/check_proto with: version: "3.15.8" From 17d5bc4e54295a713052f15fe155fd3fc09ae20c Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Mon, 16 Dec 2024 17:20:51 +0100 Subject: [PATCH 1150/1397] Update comment on MemPostings.lvs There was a missing verb there. Signed-off-by: Oleg Zaytsev --- tsdb/index/postings.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index f9a284bc70..a2c5a82239 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -72,7 +72,7 @@ type MemPostings struct { // lvs holds the label values for each label name. // lvs[name] is essentially an unsorted append-only list of all keys in m[name] // mtx must be held when interacting with lvs. - // Since it's append-only, it is safe to the label values slice after releasing the lock. + // Since it's append-only, it is safe to read the label values slice after releasing the lock. lvs map[string][]string ordered bool From beca06c62955befa6676cc8b378c2d62d57e402b Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 17 Dec 2024 08:40:53 +0100 Subject: [PATCH 1151/1397] Clarify the NoUTF8EscapingWithSuffixes and UnderscoreEscapingWithSuffixes modes (#15567) Signed-off-by: Arve Knudsen --- config/config.go | 7 +++++-- docs/configuration/configuration.md | 4 ++-- documentation/examples/prometheus-otlp.yml | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/config/config.go b/config/config.go index 83a4f5860d..73282ac429 100644 --- a/config/config.go +++ b/config/config.go @@ -1427,10 +1427,13 @@ func getGoGCEnv() int { type translationStrategyOption string var ( - // NoUTF8EscapingWithSuffixes will keep UTF-8 characters as they are, units and type suffixes will still be added. + // NoUTF8EscapingWithSuffixes will accept metric/label names as they are. + // Unit and type suffixes may be added to metric names, according to certain rules. NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes" // UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus. - // This option will translate all UTF-8 characters to underscores, while adding units and type suffixes. + // This option will translate metric name characters that are not alphanumerics/underscores/colons to underscores, + // and label name characters that are not alphanumerics/underscores to underscores. + // Unit and type suffixes may be appended to metric names, according to certain rules. UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes" ) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index f76a8bfbb9..57f4013936 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -179,8 +179,8 @@ otlp: # - "UnderscoreEscapingWithSuffixes" refers to commonly agreed normalization used # by OpenTelemetry in https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus # - "NoUTF8EscapingWithSuffixes" is a mode that relies on UTF-8 support in Prometheus. - # It preserves all special characters like dots, but it still add required suffixes - # for units and _total like in UnderscoreEscapingWithSuffixes. + # It preserves all special characters like dots, but still adds required metric name suffixes + # for units and _total, as UnderscoreEscapingWithSuffixes does. [ translation_strategy: | default = "UnderscoreEscapingWithSuffixes" ] # Enables adding "service.name", "service.namespace" and "service.instance.id" # resource attributes to the "target_info" metric, on top of converting diff --git a/documentation/examples/prometheus-otlp.yml b/documentation/examples/prometheus-otlp.yml index f0a8ab8b11..07b1fa54b1 100644 --- a/documentation/examples/prometheus-otlp.yml +++ b/documentation/examples/prometheus-otlp.yml @@ -22,7 +22,7 @@ otlp: - k8s.pod.name - k8s.replicaset.name - k8s.statefulset.name - # Ingest OTLP data keeping UTF-8 characters in metric/label names. + # Ingest OTLP data keeping all characters in metric/label names. translation_strategy: NoUTF8EscapingWithSuffixes storage: From 6e3eba133d67a14aace172d52e1d4e9a3c9a8c83 Mon Sep 17 00:00:00 2001 From: bwplotka Date: Tue, 17 Dec 2024 10:54:03 +0000 Subject: [PATCH 1152/1397] prombench: Extend GH job for the upcoming custom benchmark feature. See https://github.com/prometheus/proposals/pull/41 Signed-off-by: bwplotka --- .github/workflows/prombench.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/prombench.yml b/.github/workflows/prombench.yml index 6ee172662b..dc4e29d4f7 100644 --- a/.github/workflows/prombench.yml +++ b/.github/workflows/prombench.yml @@ -15,6 +15,8 @@ env: PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }} PROVIDER: gke RELEASE: ${{ github.event.client_payload.RELEASE }} + BENCHMARK_VERSION: ${{ github.event.client_payload.BENCHMARK_VERSION }} + BENCHMARK_DIRECTORY: ${{ github.event.client_payload.BENCHMARK_DIRECTORY }} ZONE: europe-west3-a jobs: benchmark_start: From c8359fcd6bedbcd49bc9515d4bd789788ea4eadb Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 17 Dec 2024 17:34:24 +0100 Subject: [PATCH 1153/1397] Fix bug in lbl!~".+" shortcut (#15684) We were appending to the wrong slice, so instead of removing values, we were adding them. Signed-off-by: Oleg Zaytsev --- tsdb/querier.go | 2 +- tsdb/querier_test.go | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/tsdb/querier.go b/tsdb/querier.go index 34e9a81231..ce99df6a13 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -271,7 +271,7 @@ func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matc its = append(its, it) case m.Type == labels.MatchNotRegexp && m.Value == ".+": // .+ regexp matches any non-empty string: get postings for all label values and remove them. - its = append(notIts, ix.PostingsForAllLabelValues(ctx, m.Name)) + notIts = append(notIts, ix.PostingsForAllLabelValues(ctx, m.Name)) case labelMustBeSet[m.Name]: // If this matcher must be non-empty, we can be smarter. diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 482907757a..3462ca4093 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -3017,6 +3017,15 @@ func TestPostingsForMatchers(t *testing.T) { matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.*$")}, exp: []labels.Labels{}, }, + // Test shortcut i!~".+" + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", ".*"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".+")}, + exp: []labels.Labels{ + labels.FromStrings("n", "1"), + labels.FromStrings("n", "2"), + labels.FromStrings("n", "2.5"), + }, + }, } ir, err := h.Index() From 615195372d1ca5ca7251abbaddfe08ec0aa24b5a Mon Sep 17 00:00:00 2001 From: Julien Duchesne Date: Tue, 17 Dec 2024 16:11:55 -0500 Subject: [PATCH 1154/1397] Ruler: Move inner `eval` function (#15688) Refactoring in prevision of https://github.com/prometheus/prometheus/pull/15681 By moving the `eval` function outside of the for loop, we can modify the rule execution order more easily (without huge git changes) Signed-off-by: Julien Duchesne --- rules/group.go | 247 ++++++++++++++++++++++++------------------------- 1 file changed, 122 insertions(+), 125 deletions(-) diff --git a/rules/group.go b/rules/group.go index 1ce15be9f2..ecc96d0a12 100644 --- a/rules/group.go +++ b/rules/group.go @@ -23,6 +23,9 @@ import ( "sync" "time" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" "go.uber.org/atomic" "github.com/prometheus/prometheus/promql/parser" @@ -30,9 +33,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" @@ -511,149 +511,147 @@ func (g *Group) CopyState(from *Group) { // Rules can be evaluated concurrently if the `concurrent-rule-eval` feature flag is enabled. func (g *Group) Eval(ctx context.Context, ts time.Time) { var ( - samplesTotal atomic.Float64 - wg sync.WaitGroup + samplesTotal atomic.Float64 + ruleQueryOffset = g.QueryOffset() ) - - ruleQueryOffset := g.QueryOffset() - - for i, rule := range g.rules { - select { - case <-g.done: - return - default: + eval := func(i int, rule Rule, cleanup func()) { + if cleanup != nil { + defer cleanup() } - eval := func(i int, rule Rule, cleanup func()) { - if cleanup != nil { - defer cleanup() + logger := g.logger.With("name", rule.Name(), "index", i) + ctx, sp := otel.Tracer("").Start(ctx, "rule") + sp.SetAttributes(attribute.String("name", rule.Name())) + defer func(t time.Time) { + sp.End() + + since := time.Since(t) + g.metrics.EvalDuration.Observe(since.Seconds()) + rule.SetEvaluationDuration(since) + rule.SetEvaluationTimestamp(t) + }(time.Now()) + + if sp.SpanContext().IsSampled() && sp.SpanContext().HasTraceID() { + logger = logger.With("trace_id", sp.SpanContext().TraceID()) + } + + g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() + + vector, err := rule.Eval(ctx, ruleQueryOffset, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit()) + if err != nil { + rule.SetHealth(HealthBad) + rule.SetLastError(err) + sp.SetStatus(codes.Error, err.Error()) + g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() + + // Canceled queries are intentional termination of queries. This normally + // happens on shutdown and thus we skip logging of any errors here. + var eqc promql.ErrQueryCanceled + if !errors.As(err, &eqc) { + logger.Warn("Evaluating rule failed", "rule", rule, "err", err) } + return + } + rule.SetHealth(HealthGood) + rule.SetLastError(nil) + samplesTotal.Add(float64(len(vector))) - logger := g.logger.With("name", rule.Name(), "index", i) - ctx, sp := otel.Tracer("").Start(ctx, "rule") - sp.SetAttributes(attribute.String("name", rule.Name())) - defer func(t time.Time) { - sp.End() + if ar, ok := rule.(*AlertingRule); ok { + ar.sendAlerts(ctx, ts, g.opts.ResendDelay, g.interval, g.opts.NotifyFunc) + } + var ( + numOutOfOrder = 0 + numTooOld = 0 + numDuplicates = 0 + ) - since := time.Since(t) - g.metrics.EvalDuration.Observe(since.Seconds()) - rule.SetEvaluationDuration(since) - rule.SetEvaluationTimestamp(t) - }(time.Now()) - - if sp.SpanContext().IsSampled() && sp.SpanContext().HasTraceID() { - logger = logger.With("trace_id", sp.SpanContext().TraceID()) - } - - g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() - - vector, err := rule.Eval(ctx, ruleQueryOffset, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit()) - if err != nil { + app := g.opts.Appendable.Appender(ctx) + seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i])) + defer func() { + if err := app.Commit(); err != nil { rule.SetHealth(HealthBad) rule.SetLastError(err) sp.SetStatus(codes.Error, err.Error()) g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() - // Canceled queries are intentional termination of queries. This normally - // happens on shutdown and thus we skip logging of any errors here. - var eqc promql.ErrQueryCanceled - if !errors.As(err, &eqc) { - logger.Warn("Evaluating rule failed", "rule", rule, "err", err) - } + logger.Warn("Rule sample appending failed", "err", err) return } - rule.SetHealth(HealthGood) - rule.SetLastError(nil) - samplesTotal.Add(float64(len(vector))) + g.seriesInPreviousEval[i] = seriesReturned + }() - if ar, ok := rule.(*AlertingRule); ok { - ar.sendAlerts(ctx, ts, g.opts.ResendDelay, g.interval, g.opts.NotifyFunc) + for _, s := range vector { + if s.H != nil { + _, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H) + } else { + app.SetOptions(g.appOpts) + _, err = app.Append(0, s.Metric, s.T, s.F) } - var ( - numOutOfOrder = 0 - numTooOld = 0 - numDuplicates = 0 - ) - app := g.opts.Appendable.Appender(ctx) - seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i])) - defer func() { - if err := app.Commit(); err != nil { - rule.SetHealth(HealthBad) - rule.SetLastError(err) - sp.SetStatus(codes.Error, err.Error()) - g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() - - logger.Warn("Rule sample appending failed", "err", err) - return + if err != nil { + rule.SetHealth(HealthBad) + rule.SetLastError(err) + sp.SetStatus(codes.Error, err.Error()) + unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err } - g.seriesInPreviousEval[i] = seriesReturned - }() - - for _, s := range vector { - if s.H != nil { - _, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H) - } else { - app.SetOptions(g.appOpts) - _, err = app.Append(0, s.Metric, s.T, s.F) + switch { + case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample): + numOutOfOrder++ + logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) + case errors.Is(unwrappedErr, storage.ErrTooOldSample): + numTooOld++ + logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) + case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): + numDuplicates++ + logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) + default: + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) } + } else { + buf := [1024]byte{} + seriesReturned[string(s.Metric.Bytes(buf[:]))] = s.Metric + } + } + if numOutOfOrder > 0 { + logger.Warn("Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder) + } + if numTooOld > 0 { + logger.Warn("Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld) + } + if numDuplicates > 0 { + logger.Warn("Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates) + } - if err != nil { - rule.SetHealth(HealthBad) - rule.SetLastError(err) - sp.SetStatus(codes.Error, err.Error()) - unwrappedErr := errors.Unwrap(err) - if unwrappedErr == nil { - unwrappedErr = err - } - switch { - case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample): - numOutOfOrder++ - logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) - case errors.Is(unwrappedErr, storage.ErrTooOldSample): - numTooOld++ - logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) - case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): - numDuplicates++ - logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) - default: - logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) - } - } else { - buf := [1024]byte{} - seriesReturned[string(s.Metric.Bytes(buf[:]))] = s.Metric + for metric, lset := range g.seriesInPreviousEval[i] { + if _, ok := seriesReturned[metric]; !ok { + // Series no longer exposed, mark it stale. + _, err = app.Append(0, lset, timestamp.FromTime(ts.Add(-ruleQueryOffset)), math.Float64frombits(value.StaleNaN)) + unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err + } + switch { + case unwrappedErr == nil: + case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), + errors.Is(unwrappedErr, storage.ErrTooOldSample), + errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): + // Do not count these in logging, as this is expected if series + // is exposed from a different rule. + default: + logger.Warn("Adding stale sample failed", "sample", lset.String(), "err", err) } } - if numOutOfOrder > 0 { - logger.Warn("Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder) - } - if numTooOld > 0 { - logger.Warn("Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld) - } - if numDuplicates > 0 { - logger.Warn("Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates) - } + } + } - for metric, lset := range g.seriesInPreviousEval[i] { - if _, ok := seriesReturned[metric]; !ok { - // Series no longer exposed, mark it stale. - _, err = app.Append(0, lset, timestamp.FromTime(ts.Add(-ruleQueryOffset)), math.Float64frombits(value.StaleNaN)) - unwrappedErr := errors.Unwrap(err) - if unwrappedErr == nil { - unwrappedErr = err - } - switch { - case unwrappedErr == nil: - case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), - errors.Is(unwrappedErr, storage.ErrTooOldSample), - errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): - // Do not count these in logging, as this is expected if series - // is exposed from a different rule. - default: - logger.Warn("Adding stale sample failed", "sample", lset.String(), "err", err) - } - } - } + var wg sync.WaitGroup + for i, rule := range g.rules { + select { + case <-g.done: + return + default: } if ctrl := g.concurrencyController; ctrl.Allow(ctx, g, rule) { @@ -667,7 +665,6 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { eval(i, rule, nil) } } - wg.Wait() g.metrics.GroupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal.Load()) From 6d1b933cc16f22b8b4544cf3d14f712951e5d550 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 18 Dec 2024 12:38:34 +0800 Subject: [PATCH 1155/1397] upgrade crypto to fix vulnerability issue Signed-off-by: neozhao98 Signed-off-by: root --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6c15a6690b..0d071e483c 100644 --- a/go.mod +++ b/go.mod @@ -187,7 +187,7 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel/metric v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.30.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.22.0 // indirect golang.org/x/net v0.32.0 // indirect diff --git a/go.sum b/go.sum index 0a62fa626c..a931761d96 100644 --- a/go.sum +++ b/go.sum @@ -541,8 +541,8 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= -golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= From 6641f1121644d47c1be72d9b252f740c42a7497f Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 18 Dec 2024 11:47:31 +0000 Subject: [PATCH 1156/1397] Update go dependencies Preparation for release 3.1. Did not update Kubernetes as it forces Go to version 1.23, and we like to support 2 latest versions. Signed-off-by: Bryan Boreham --- documentation/examples/remote_storage/go.mod | 18 +-- documentation/examples/remote_storage/go.sum | 36 ++--- go.mod | 71 ++++----- go.sum | 159 ++++++++----------- web/ui/mantine-ui/src/promql/tools/go.mod | 2 +- 5 files changed, 131 insertions(+), 155 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 298236a8d5..d2b15dbe52 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -1,6 +1,6 @@ module github.com/prometheus/prometheus/documentation/examples/remote_storage -go 1.22.0 +go 1.22.7 require ( github.com/alecthomas/kingpin/v2 v2.4.0 @@ -8,8 +8,8 @@ require ( github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.8 github.com/prometheus/client_golang v1.20.5 - github.com/prometheus/common v0.60.1 - github.com/prometheus/prometheus v0.53.1 + github.com/prometheus/common v0.61.0 + github.com/prometheus/prometheus v1.99.0 github.com/stretchr/testify v1.10.0 ) @@ -55,15 +55,15 @@ require ( go.opentelemetry.io/otel/trace v1.27.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.27.0 // indirect - golang.org/x/net v0.29.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect + golang.org/x/crypto v0.30.0 // indirect + golang.org/x/net v0.32.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/protobuf v1.35.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apimachinery v0.29.3 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index bd11b9d04e..860b9a944b 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -264,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -323,8 +323,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -344,20 +344,20 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -373,17 +373,17 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -411,8 +411,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/go.mod b/go.mod index c40a1d4919..afb6608729 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/prometheus/prometheus -go 1.22.0 +go 1.22.7 -toolchain go1.23.0 +toolchain go1.23.4 require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 @@ -12,31 +12,31 @@ require ( github.com/Code-Hex/go-generics-cache v1.5.1 github.com/KimMachineGun/automemlimit v0.6.1 github.com/alecthomas/kingpin/v2 v2.4.0 - github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b github.com/aws/aws-sdk-go v1.55.5 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.131.0 - github.com/docker/docker v27.4.0+incompatible + github.com/digitalocean/godo v1.132.0 + github.com/docker/docker v27.4.1+incompatible github.com/edsrzf/mmap-go v1.2.0 github.com/envoyproxy/go-control-plane v0.13.1 github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb - github.com/fsnotify/fsnotify v1.7.0 + github.com/fsnotify/fsnotify v1.8.0 github.com/go-openapi/strfmt v0.23.0 github.com/go-zookeeper/zk v1.0.4 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.6.0 - github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad github.com/google/uuid v1.6.0 github.com/gophercloud/gophercloud v1.14.1 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.30.0 - github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 - github.com/hetznercloud/hcloud-go/v2 v2.17.0 + github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec + github.com/hetznercloud/hcloud-go/v2 v2.17.1 github.com/ionos-cloud/sdk-go/v6 v6.3.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 @@ -54,22 +54,22 @@ require ( github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.61.0 github.com/prometheus/common/assets v0.2.0 - github.com/prometheus/exporter-toolkit v0.13.1 + github.com/prometheus/exporter-toolkit v0.13.2 github.com/prometheus/sigv4 v0.1.0 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.10.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v1.21.0 - go.opentelemetry.io/collector/semconv v0.115.0 - go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 - go.opentelemetry.io/otel v1.32.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 - go.opentelemetry.io/otel/sdk v1.32.0 - go.opentelemetry.io/otel/trace v1.32.0 + go.opentelemetry.io/collector/pdata v1.22.0 + go.opentelemetry.io/collector/semconv v0.116.0 + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 + go.opentelemetry.io/otel v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 + go.opentelemetry.io/otel/sdk v1.33.0 + go.opentelemetry.io/otel/trace v1.33.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 @@ -79,10 +79,10 @@ require ( golang.org/x/sys v0.28.0 golang.org/x/text v0.21.0 golang.org/x/tools v0.28.0 - google.golang.org/api v0.209.0 - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 - google.golang.org/grpc v1.68.1 - google.golang.org/protobuf v1.35.2 + google.golang.org/api v0.213.0 + google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 + google.golang.org/grpc v1.69.0 + google.golang.org/protobuf v1.36.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.3 @@ -93,9 +93,9 @@ require ( ) require ( - cloud.google.com/go/auth v0.10.2 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect - cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/auth v0.13.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect @@ -121,17 +121,16 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.22.2 // indirect github.com/go-openapi/errors v0.22.0 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/loads v0.21.5 // indirect github.com/go-openapi/spec v0.20.14 // indirect - github.com/go-openapi/swag v0.22.9 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/validate v0.23.0 // indirect github.com/go-resty/resty/v2 v2.15.3 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/glog v1.2.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect @@ -140,7 +139,7 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.14.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -151,7 +150,7 @@ require ( github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/imdario/mergo v0.3.16 // indirect + github.com/imdario/mergo v0.3.6 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect @@ -184,16 +183,16 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.22.0 // indirect golang.org/x/net v0.32.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/time v0.8.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241113202542-65e8d215514f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 4a41df91d1..324ad46f93 100644 --- a/go.sum +++ b/go.sum @@ -1,11 +1,11 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/auth v0.10.2 h1:oKF7rgBfSHdp/kuhXtqU/tNDr0mZqhYbEh+6SiqzkKo= -cloud.google.com/go/auth v0.10.2/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= -cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= -cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= -cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= -cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= +cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= @@ -42,8 +42,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -91,14 +91,14 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/digitalocean/godo v1.131.0 h1:0WHymufAV5avpodT0h5/pucUVfO4v7biquOIqhLeROY= -github.com/digitalocean/godo v1.131.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.132.0 h1:n0x6+ZkwbyQBtIU1wwBhv26EINqHg0wWQiBXlwYg/HQ= +github.com/digitalocean/godo v1.132.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v27.4.0+incompatible h1:I9z7sQ5qyzO0BfAb9IMOawRkAGxhYsidKiTMcm0DU+A= -github.com/docker/docker v27.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4= +github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -126,8 +126,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= @@ -146,8 +146,8 @@ github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo= github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= @@ -156,8 +156,8 @@ github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/ github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= -github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= github.com/go-resty/resty/v2 v2.15.3 h1:bqff+hcqAflpiF591hhJzNdkRsFhlB96CYfBwSFvql8= @@ -177,21 +177,11 @@ github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVI github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= @@ -205,9 +195,7 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -216,8 +204,8 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g= -github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -235,8 +223,8 @@ github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrR github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ= github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= @@ -283,14 +271,14 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w= -github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= +github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977VrmffaCX/OBm17dEVJUcWn5dW+eqs3aIJ/A= +github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.17.0 h1:ge0w2piey9SV6XGyU/wQ6HBR24QyMbJ3wLzezplqR68= -github.com/hetznercloud/hcloud-go/v2 v2.17.0/go.mod h1:zfyZ4Orx+mPpYDzWAxXR7DHGL50nnlZ5Edzgs1o6f/s= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/hetznercloud/hcloud-go/v2 v2.17.1 h1:DPi019dv0WCiECEmtcuTgc//hBvnxESb6QlJnAb4a04= +github.com/hetznercloud/hcloud-go/v2 v2.17.1/go.mod h1:6ygmBba+FdawR2lLp/d9uJljY2k0dTYthprrI8usdLw= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/ionos-cloud/sdk-go/v6 v6.3.0 h1:/lTieTH9Mo/CWm3cTlFLnK10jgxjUGkAqRffGqvPteY= github.com/ionos-cloud/sdk-go/v6 v6.3.0/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= @@ -442,8 +430,8 @@ github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFS github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= -github.com/prometheus/exporter-toolkit v0.13.1 h1:Evsh0gWQo2bdOHlnz9+0Nm7/OFfIwhE2Ws4A2jIlR04= -github.com/prometheus/exporter-toolkit v0.13.1/go.mod h1:ujdv2YIOxtdFxxqtloLpbqmxd5J0Le6IITUvIRSWjj0= +github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ= +github.com/prometheus/exporter-toolkit v0.13.2/go.mod h1:tCqnfx21q6qN1KA4U3Bfb8uWzXfijIrJz3/kTIqMV7g= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -485,7 +473,6 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= @@ -501,32 +488,34 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= -go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= -go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0 h1:7F3XCD6WYzDkwbi8I8N+oYJWquPVScnRosKGgqjsR8c= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0/go.mod h1:Dk3C0BfIlZDZ5c6eVS7TYiH2vssuyUU3vUsgbrR+5V4= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/pdata v1.22.0 h1:3yhjL46NLdTMoP8rkkcE9B0pzjf2973crn0KKhX5UrI= +go.opentelemetry.io/collector/pdata v1.22.0/go.mod h1:nLLf6uDg8Kn5g3WNZwGyu8+kf77SwOqQvMTb5AXEbEY= +go.opentelemetry.io/collector/semconv v0.116.0 h1:63xCZomsKJAWmKGWD3lnORiE3WKW6AO4LjnzcHzGx3Y= +go.opentelemetry.io/collector/semconv v0.116.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 h1:xwH3QJv6zL4u+gkPUu59NeT1Gyw9nScWT8FQpKLUJJI= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0/go.mod h1:uosvgpqTcTXtcPQORTbEkZNDQTCDOgTz1fe6aLSyqrQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= @@ -566,7 +555,6 @@ golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -644,37 +632,26 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.209.0 h1:Ja2OXNlyRlWCWu8o+GgI4yUn/wz9h/5ZfFbKz+dQX+w= -google.golang.org/api v0.209.0/go.mod h1:I53S168Yr/PNDNMi5yPnDc0/LGRZO6o7PoEbl/HY3CM= +google.golang.org/api v0.213.0 h1:KmF6KaDyFqB417T68tMPbVmmwtIXs2VB60OJKIHB0xQ= +google.golang.org/api v0.213.0/go.mod h1:V0T5ZhNUUNpYAlL306gFZPFt5F5D/IeyLoktduYYnvQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241113202542-65e8d215514f h1:C1QccEa9kUwvMgEUORqQD9S17QesQijxjZ84sO82mfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241113202542-65e8d215514f/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 h1:ChAdCYNQFDk5fYvFZMywKLIijG7TC2m1C2CMEu11G3o= +google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484/go.mod h1:KRUmxRI4JmbpAm8gcZM4Jsffi859fo5LQjILwuqj9z8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= -google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI= +google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ= +google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/web/ui/mantine-ui/src/promql/tools/go.mod b/web/ui/mantine-ui/src/promql/tools/go.mod index aac6e019aa..40a7876a0a 100644 --- a/web/ui/mantine-ui/src/promql/tools/go.mod +++ b/web/ui/mantine-ui/src/promql/tools/go.mod @@ -1,6 +1,6 @@ module github.com/prometheus/prometheus/web/ui/mantine-ui/src/promql/tools -go 1.22.0 +go 1.22.7 require ( github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc From 221f50e4137aec5bc22dcbbed73399d11b85a3b2 Mon Sep 17 00:00:00 2001 From: bwplotka Date: Wed, 18 Dec 2024 14:11:33 +0000 Subject: [PATCH 1157/1397] ci: We removed funbench functionality from test-infra, removing CI job too. Signed-off-by: bwplotka --- .github/workflows/funcbench.yml | 61 --------------------------------- 1 file changed, 61 deletions(-) delete mode 100644 .github/workflows/funcbench.yml diff --git a/.github/workflows/funcbench.yml b/.github/workflows/funcbench.yml deleted file mode 100644 index 8959f82142..0000000000 --- a/.github/workflows/funcbench.yml +++ /dev/null @@ -1,61 +0,0 @@ -on: - repository_dispatch: - types: [funcbench_start] -name: Funcbench Workflow -permissions: - contents: read - -jobs: - run_funcbench: - name: Running funcbench - if: github.event.action == 'funcbench_start' - runs-on: ubuntu-latest - env: - AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }} - BRANCH: ${{ github.event.client_payload.BRANCH }} - BENCH_FUNC_REGEX: ${{ github.event.client_payload.BENCH_FUNC_REGEX }} - PACKAGE_PATH: ${{ github.event.client_payload.PACKAGE_PATH }} - GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }} - GITHUB_ORG: prometheus - GITHUB_REPO: prometheus - GITHUB_STATUS_TARGET_URL: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}} - LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }} - GKE_PROJECT_ID: macro-mile-203600 - PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }} - PROVIDER: gke - ZONE: europe-west3-a - steps: - - name: Update status to pending - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"pending","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" - - name: Prepare nodepool - uses: docker://prominfra/funcbench:master - with: - entrypoint: "docker_entrypoint" - args: make deploy - - name: Delete all resources - if: always() - uses: docker://prominfra/funcbench:master - with: - entrypoint: "docker_entrypoint" - args: make clean - - name: Update status to failure - if: failure() - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"failure","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" - - name: Update status to success - if: success() - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"success","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" From 5122ff2f5908b4605b9eb15402e609ab2ac93714 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 19 Dec 2024 15:41:50 +0000 Subject: [PATCH 1158/1397] [DEPS] Pin fsnotify to avoid deadlock bug Bug filed upstream at https://github.com/fsnotify/fsnotify/issues/656 Signed-off-by: Bryan Boreham --- go.mod | 3 +++ go.sum | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index afb6608729..01af5f1c8b 100644 --- a/go.mod +++ b/go.mod @@ -212,3 +212,6 @@ exclude ( github.com/grpc-ecosystem/grpc-gateway v1.14.7 google.golang.org/api v0.30.0 ) + +// Pin until https://github.com/fsnotify/fsnotify/issues/656 is resolved. +replace github.com/fsnotify/fsnotify v1.8.0 => github.com/fsnotify/fsnotify v1.7.0 diff --git a/go.sum b/go.sum index 324ad46f93..72abefc4c3 100644 --- a/go.sum +++ b/go.sum @@ -126,8 +126,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= From 0e99ca3e8c53249b357a52ec4c9b1354a26381fe Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Fri, 20 Dec 2024 04:20:28 +0530 Subject: [PATCH 1159/1397] [BUGFIX] PromQL: Fix `deriv`, `predict_linear` and `double_exponential_smoothing` with histograms (#15686) PromQL: Fix deriv, predict_linear and double_exponential_smoothing with histograms Signed-off-by: Neeraj Gartia --------- Signed-off-by: Neeraj Gartia --- promql/functions.go | 33 ++++++++++++++--- promql/promqltest/testdata/functions.test | 44 ++++++++++++++++++++--- util/annotations/annotations.go | 8 +++++ 3 files changed, 75 insertions(+), 10 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index da1821fd18..5f31a3db18 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -355,7 +355,7 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // https://en.wikipedia.org/wiki/Exponential_smoothing . func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] - + metricName := samples.Metric.Get(labels.MetricName) // The smoothing factor argument. sf := vals[1].(Vector)[0].F @@ -374,6 +374,10 @@ func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions // Can't do the smoothing operation with less than two points. if l < 2 { + // Annotate mix of float and histogram. + if l == 1 && len(samples.Histograms) > 0 { + return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return enh.Out, nil } @@ -394,7 +398,9 @@ func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions s0, s1 = s1, x+y } - + if len(samples.Histograms) > 0 { + return append(enh.Out, Sample{F: s1}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return append(enh.Out, Sample{F: s1}), nil } @@ -1110,10 +1116,15 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f // === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) === func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] + metricName := samples.Metric.Get(labels.MetricName) - // No sense in trying to compute a derivative without at least two points. + // No sense in trying to compute a derivative without at least two float points. // Drop this Vector element. if len(samples.Floats) < 2 { + // Annotate mix of float and histogram. + if len(samples.Floats) == 1 && len(samples.Histograms) > 0 { + return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return enh.Out, nil } @@ -1121,6 +1132,9 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // to avoid floating point accuracy issues, see // https://github.com/prometheus/prometheus/issues/2674 slope, _ := linearRegression(samples.Floats, samples.Floats[0].T) + if len(samples.Histograms) > 0 { + return append(enh.Out, Sample{F: slope}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return append(enh.Out, Sample{F: slope}), nil } @@ -1128,13 +1142,22 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] duration := vals[1].(Vector)[0].F - // No sense in trying to predict anything without at least two points. + metricName := samples.Metric.Get(labels.MetricName) + + // No sense in trying to predict anything without at least two float points. // Drop this Vector element. if len(samples.Floats) < 2 { + // Annotate mix of float and histogram. + if len(samples.Floats) == 1 && len(samples.Histograms) > 0 { + return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return enh.Out, nil } - slope, intercept := linearRegression(samples.Floats, enh.Ts) + slope, intercept := linearRegression(samples.Floats, enh.Ts) + if len(samples.Histograms) > 0 { + return append(enh.Out, Sample{F: slope*duration + intercept}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return append(enh.Out, Sample{F: slope*duration + intercept}), nil } diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index a00ed8a3ea..7bba44869e 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -258,7 +258,8 @@ load 5m http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10 testcounter_reset_middle_mix 0+10x4 0+10x5 {{schema:0 sum:1 count:1}} {{schema:1 sum:2 count:2}} http_requests_mix{job="app-server", instance="1", group="canary"} 0+80x10 {{schema:0 sum:1 count:1}} - http_requests_histogram{job="app-server", instance="1", group="canary"} {{schema:0 sum:1 count:2}}x10 + http_requests_histogram{job="app-server", instance="1", group="canary"} {{schema:0 sum:1 count:2}}x10 + http_requests_inf{job="app-server", instance="1", group="canary"} -Inf 0+80x10 Inf # deriv should return the same as rate in simple cases. eval instant at 50m rate(http_requests_total{group="canary", instance="1", job="app-server"}[50m]) @@ -271,15 +272,19 @@ eval instant at 50m deriv(http_requests_total{group="canary", instance="1", job= eval instant at 50m deriv(testcounter_reset_middle_total[100m]) {} 0.010606060606060607 -# deriv should ignore histograms. -eval instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m]) +# deriv should ignore histograms with info annotation. +eval_info instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m]) {group="canary", instance="1", job="app-server"} 0.26666666666666666 -eval instant at 100m deriv(testcounter_reset_middle_mix[110m]) +eval_info instant at 100m deriv(testcounter_reset_middle_mix[110m]) {} 0.010606060606060607 eval instant at 50m deriv(http_requests_histogram[60m]) - #empty + #empty + +# deriv should return NaN in case of +Inf or -Inf found. +eval instant at 100m deriv(http_requests_inf[100m]) + {job="app-server", instance="1", group="canary"} NaN # predict_linear should return correct result. # X/s = [ 0, 300, 600, 900,1200,1500,1800,2100,2400,2700,3000] @@ -316,6 +321,20 @@ eval instant at 10m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3 eval instant at 70m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) {} 89.54545454545455 +# predict_linear should ignore histogram with info annotation. +eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 3000) + {} 70 + +eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 50m) + {} 70 + +eval instant at 60m predict_linear(http_requests_histogram[60m], 50m) + #empty + +# predict_linear should return NaN in case of +Inf or -Inf found. +eval instant at 100m predict_linear(http_requests_inf[100m], 6000) + {job="app-server", instance="1", group="canary"} NaN + # With http_requests_total, there is a sample value exactly at the end of # the range, and it has exactly the predicted value, so predict_linear # can be emulated with deriv. @@ -719,6 +738,11 @@ load 10s http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 + http_requests_mix{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 {{schema:0 count:1 sum:2}}x1000 + http_requests_mix{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 {{schema:0 count:1 sum:2}}x1000 + http_requests_mix{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 {{schema:0 count:1 sum:2}}x1000 + http_requests_mix{job="api-server", instance="1", group="canary"} 0+40x2000 {{schema:0 count:1 sum:2}}x1000 + http_requests_histogram{job="api-server", instance="1", group="canary"} {{schema:0 count:1 sum:2}}x1000 eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) {job="api-server", instance="0", group="production"} 8000 @@ -726,6 +750,16 @@ eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) {job="api-server", instance="0", group="canary"} 24000 {job="api-server", instance="1", group="canary"} 32000 +# double_exponential_smoothing should ignore histogram with info annotation. +eval_info instant at 20010s double_exponential_smoothing(http_requests_mix[1m], 0.01, 0.1) + {job="api-server", instance="0", group="production"} 30100 + {job="api-server", instance="1", group="production"} 30200 + {job="api-server", instance="0", group="canary"} 80300 + {job="api-server", instance="1", group="canary"} 80000 + +eval instant at 10000s double_exponential_smoothing(http_requests_histogram[1m], 0.01, 0.1) + #empty + # negative trends clear load 10s diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go index 1b743f7057..5b2fde152b 100644 --- a/util/annotations/annotations.go +++ b/util/annotations/annotations.go @@ -148,6 +148,7 @@ var ( HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) IncompatibleTypesInBinOpInfo = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo) HistogramIgnoredInAggregationInfo = fmt.Errorf("%w: ignored histogram in", PromQLInfo) + HistogramIgnoredInMixedRangeInfo = fmt.Errorf("%w: ignored histograms in a range containing both floats and histograms for metric name", PromQLInfo) ) type annoErr struct { @@ -293,3 +294,10 @@ func NewHistogramIgnoredInAggregationInfo(aggregation string, pos posrange.Posit Err: fmt.Errorf("%w %s aggregation", HistogramIgnoredInAggregationInfo, aggregation), } } + +func NewHistogramIgnoredInMixedRangeInfo(metricName string, pos posrange.PositionRange) error { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %q", HistogramIgnoredInMixedRangeInfo, metricName), + } +} From 8d5236f9273fb0be9c55c84a9e089b59b51d8946 Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Fri, 20 Dec 2024 04:22:26 +0530 Subject: [PATCH 1160/1397] PromQL: Adds tests for `delta` with histograms (#15674) PromQL: Adds tests for delta with histograms Signed-off-by: Neeraj Gartia -------- Signed-off-by: Neeraj Gartia --- promql/promqltest/testdata/functions.test | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 7bba44869e..fb84f8bd24 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -234,11 +234,25 @@ clear load 5m http_requests{path="/foo"} 0 50 100 150 200 http_requests{path="/bar"} 200 150 100 50 0 + http_requests_gauge{path="/foo"} {{schema:0 sum:0 count:0 buckets:[0 0 0] counter_reset_hint:gauge}}+{{schema:0 sum:1 count:2 buckets:[1 1 1] counter_reset_hint:gauge}}x5 + http_requests_counter{path="/foo"} {{schema:0 sum:0 count:0 buckets:[0 0 0]}}+{{schema:0 sum:1 count:2 buckets:[1 1 1]}}x5 + http_requests_mix{path="/foo"} 0 50 100 {{schema:0 sum:0 count:0 buckets:[0 0 0] counter_reset_hint:gauge}} {{schema:0 sum:1 count:2 buckets:[1 1 1] counter_reset_hint:gauge}} eval instant at 20m delta(http_requests[20m]) {path="/foo"} 200 {path="/bar"} -200 +eval instant at 20m delta(http_requests_gauge[20m]) + {path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}} + +# delta emits warn annotation for non-gauge histogram types. +eval_warn instant at 20m delta(http_requests_counter[20m]) + {path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}} + +# delta emits warn annotation for mix of histogram and floats. +eval_warn instant at 20m delta(http_requests_mix[20m]) + #empty + clear # Tests for idelta(). From f7373a1f91e63556ca3d0a8d21896b2cdace260c Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 19 Dec 2024 23:56:37 +0100 Subject: [PATCH 1161/1397] PromQL: improve some test comments This amends #15686. Signed-off-by: beorn7 --- promql/promqltest/testdata/functions.test | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index fb84f8bd24..6d2ade3abc 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -286,13 +286,14 @@ eval instant at 50m deriv(http_requests_total{group="canary", instance="1", job= eval instant at 50m deriv(testcounter_reset_middle_total[100m]) {} 0.010606060606060607 -# deriv should ignore histograms with info annotation. +# deriv should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. eval_info instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m]) {group="canary", instance="1", job="app-server"} 0.26666666666666666 eval_info instant at 100m deriv(testcounter_reset_middle_mix[110m]) {} 0.010606060606060607 +# deriv should silently ignore ranges consisting only of histograms. eval instant at 50m deriv(http_requests_histogram[60m]) #empty @@ -335,13 +336,14 @@ eval instant at 10m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3 eval instant at 70m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) {} 89.54545454545455 -# predict_linear should ignore histogram with info annotation. +# predict_linear should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 3000) {} 70 eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 50m) {} 70 +# predict_linear should silently ignore ranges consisting only of histograms. eval instant at 60m predict_linear(http_requests_histogram[60m], 50m) #empty @@ -764,13 +766,14 @@ eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) {job="api-server", instance="0", group="canary"} 24000 {job="api-server", instance="1", group="canary"} 32000 -# double_exponential_smoothing should ignore histogram with info annotation. +# double_exponential_smoothing should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. eval_info instant at 20010s double_exponential_smoothing(http_requests_mix[1m], 0.01, 0.1) {job="api-server", instance="0", group="production"} 30100 {job="api-server", instance="1", group="production"} 30200 {job="api-server", instance="0", group="canary"} 80300 {job="api-server", instance="1", group="canary"} 80000 +# double_exponential_smoothing should silently ignore ranges consisting only of histograms. eval instant at 10000s double_exponential_smoothing(http_requests_histogram[1m], 0.01, 0.1) #empty From 11e4673e0bbc7a824ecbb1b9e1b0211f0d3e6daa Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Wed, 28 Aug 2024 16:47:13 -0300 Subject: [PATCH 1162/1397] Support ingesting PWRv2's Created Timestamp as 0 samples Signed-off-by: Arthur Silva Sens --- storage/remote/codec_test.go | 26 +++++----- storage/remote/write_handler.go | 16 ++++++- storage/remote/write_handler_test.go | 71 ++++++++++++++++++++-------- 3 files changed, 80 insertions(+), 33 deletions(-) diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 6fb1ae1e49..3557a87eb5 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -104,9 +104,10 @@ var ( HelpRef: 15, // Symbolized writeV2RequestSeries1Metadata.Help. UnitRef: 16, // Symbolized writeV2RequestSeries1Metadata.Unit. }, - Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}, - Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 1}}, - Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))}, + Samples: []writev2.Sample{{Value: 1, Timestamp: 10}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 10}}, + Histograms: []writev2.Histogram{writev2.FromIntHistogram(10, &testHistogram), writev2.FromFloatHistogram(20, testHistogram.ToFloat(nil))}, + CreatedTimestamp: 1, // CT needs to be lower than the sample's timestamp. }, { LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Same series as first. @@ -116,9 +117,9 @@ var ( HelpRef: 17, // Symbolized writeV2RequestSeries2Metadata.Help. // No unit. }, - Samples: []writev2.Sample{{Value: 2, Timestamp: 2}}, - Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{13, 14}, Value: 2, Timestamp: 2}}, - Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))}, + Samples: []writev2.Sample{{Value: 2, Timestamp: 20}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{13, 14}, Value: 2, Timestamp: 20}}, + Histograms: []writev2.Histogram{writev2.FromIntHistogram(30, &testHistogram), writev2.FromFloatHistogram(40, testHistogram.ToFloat(nil))}, }, }, } @@ -140,9 +141,10 @@ func TestWriteV2RequestFixture(t *testing.T) { HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help), UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit), }, - Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}, - Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 1}}, - Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))}, + Samples: []writev2.Sample{{Value: 1, Timestamp: 10}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 10}}, + Histograms: []writev2.Histogram{writev2.FromIntHistogram(10, &testHistogram), writev2.FromFloatHistogram(20, testHistogram.ToFloat(nil))}, + CreatedTimestamp: 1, }, { LabelsRefs: labelRefs, @@ -151,9 +153,9 @@ func TestWriteV2RequestFixture(t *testing.T) { HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help), // No unit. }, - Samples: []writev2.Sample{{Value: 2, Timestamp: 2}}, - Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 2}}, - Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))}, + Samples: []writev2.Sample{{Value: 2, Timestamp: 20}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 20}}, + Histograms: []writev2.Histogram{writev2.FromIntHistogram(30, &testHistogram), writev2.FromFloatHistogram(40, testHistogram.ToFloat(nil))}, }, }, Symbols: st.Symbols(), diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index afb50ef265..b372bd4fa8 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -394,7 +394,21 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * var ref storage.SeriesRef // Samples. - for _, s := range ts.Samples { + for i, s := range ts.Samples { + // CT only needs to be ingested for the first sample, it will be considered + // out of order for the rest. + if i == 0 && ts.CreatedTimestamp != 0 { + ref, err = app.AppendCTZeroSample(ref, ls, s.Timestamp, ts.CreatedTimestamp) + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { + // Even for the first sample OOO is a common scenario. + // We don't fail the request and just log it. + level.Debug(h.logger).Log("msg", "Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", s.Timestamp) + } + if err == nil { + rs.Samples++ + } + } + ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue()) if err == nil { rs.Samples++ diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index c40f227ea8..be4a970c23 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -310,11 +310,12 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { expectedCode int expectedRespBody string - commitErr error - appendSampleErr error - appendHistogramErr error - appendExemplarErr error - updateMetadataErr error + commitErr error + appendSampleErr error + appendCTZeroSampleErr error + appendHistogramErr error + appendExemplarErr error + updateMetadataErr error }{ { desc: "All timeseries accepted", @@ -440,11 +441,12 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) appendable := &mockAppendable{ - commitErr: tc.commitErr, - appendSampleErr: tc.appendSampleErr, - appendHistogramErr: tc.appendHistogramErr, - appendExemplarErr: tc.appendExemplarErr, - updateMetadataErr: tc.updateMetadataErr, + commitErr: tc.commitErr, + appendSampleErr: tc.appendSampleErr, + appendCTZeroSampleErr: tc.appendCTZeroSampleErr, + appendHistogramErr: tc.appendHistogramErr, + appendExemplarErr: tc.appendExemplarErr, + updateMetadataErr: tc.updateMetadataErr, } handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) @@ -472,7 +474,8 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { // Double check mandatory 2.0 stats. // writeV2RequestFixture has 2 series with 1 sample, 2 histograms, 1 exemplar each. - expectHeaderValue(t, 2, resp.Header.Get(rw20WrittenSamplesHeader)) + // We expect 3 samples because the first series has regular sample + CT. + expectHeaderValue(t, 3, resp.Header.Get(rw20WrittenSamplesHeader)) expectHeaderValue(t, 4, resp.Header.Get(rw20WrittenHistogramsHeader)) if tc.appendExemplarErr != nil { expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader)) @@ -489,6 +492,10 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols) for _, s := range ts.Samples { + if ts.CreatedTimestamp != 0 { + requireEqual(t, mockSample{ls, ts.CreatedTimestamp, 0}, appendable.samples[i]) + i++ + } requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i]) i++ } @@ -779,11 +786,12 @@ type mockAppendable struct { metadata []mockMetadata // optional errors to inject. - commitErr error - appendSampleErr error - appendHistogramErr error - appendExemplarErr error - updateMetadataErr error + commitErr error + appendSampleErr error + appendCTZeroSampleErr error + appendHistogramErr error + appendExemplarErr error + updateMetadataErr error } type mockSample struct { @@ -936,9 +944,32 @@ func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp return 0, nil } -func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { - // AppendCTZeroSample is no-op for remote-write for now. - // TODO(bwplotka): Add support for PRW 2.0 for CT zero feature (but also we might - // replace this with in-metadata CT storage, see https://github.com/prometheus/prometheus/issues/14218). +func (m *mockAppendable) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { + if m.appendCTZeroSampleErr != nil { + return 0, m.appendCTZeroSampleErr + } + + // Created Timestamp can't be higher than the original sample's timestamp. + if ct > t { + return 0, storage.ErrOutOfOrderSample + } + + latestTs := m.latestSample[l.Hash()] + if ct < latestTs { + return 0, storage.ErrOutOfOrderSample + } + if ct == latestTs { + return 0, storage.ErrDuplicateSampleForTimestamp + } + + if l.IsEmpty() { + return 0, tsdb.ErrInvalidSample + } + if _, hasDuplicates := l.HasDuplicateLabelNames(); hasDuplicates { + return 0, tsdb.ErrInvalidSample + } + + m.latestSample[l.Hash()] = ct + m.samples = append(m.samples, mockSample{l, ct, 0}) return 0, nil } From 3b97a6397ccbe44bbde126aab3f543f368413294 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Wed, 28 Aug 2024 16:58:46 -0300 Subject: [PATCH 1163/1397] Put PRWv2 created timestamp ingestion behing feature-flag Signed-off-by: Arthur Silva Sens --- cmd/prometheus/main.go | 1 + storage/remote/write_handler.go | 44 ++++++++++++++++++++-------- storage/remote/write_handler_test.go | 41 ++++++++++++++++---------- web/api/v1/api.go | 3 +- web/api/v1/errors_test.go | 1 + web/web.go | 2 ++ 6 files changed, 63 insertions(+), 29 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index cd0775bbdd..d2a69634aa 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -259,6 +259,7 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error { logger.Info("Experimental out-of-order native histogram ingestion enabled. This will only take effect if OutOfOrderTimeWindow is > 0 and if EnableNativeHistograms = true") case "created-timestamp-zero-ingestion": c.scrape.EnableCreatedTimestampZeroIngestion = true + c.web.CTZeroIngestionEnabled = true // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index b372bd4fa8..79a627a53b 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -48,6 +48,8 @@ type writeHandler struct { samplesAppendedWithoutMetadata prometheus.Counter acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{} + + ingestCTZeroSample bool } const maxAheadTime = 10 * time.Minute @@ -57,7 +59,7 @@ const maxAheadTime = 10 * time.Minute // // NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible // as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write. -func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler { +func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg, ingestCTZeroSample bool) http.Handler { protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{} for _, acc := range acceptedProtoMsgs { protoMsgs[acc] = struct{}{} @@ -78,6 +80,8 @@ func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable Name: "remote_write_without_metadata_appended_samples_total", Help: "The total number of received remote write samples (and histogram samples) which were ingested without corresponding metadata.", }), + + ingestCTZeroSample: ingestCTZeroSample, } return h } @@ -393,22 +397,17 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * allSamplesSoFar := rs.AllSamples() var ref storage.SeriesRef - // Samples. - for i, s := range ts.Samples { + if h.ingestCTZeroSample { // CT only needs to be ingested for the first sample, it will be considered // out of order for the rest. - if i == 0 && ts.CreatedTimestamp != 0 { - ref, err = app.AppendCTZeroSample(ref, ls, s.Timestamp, ts.CreatedTimestamp) - if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { - // Even for the first sample OOO is a common scenario. - // We don't fail the request and just log it. - level.Debug(h.logger).Log("msg", "Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", s.Timestamp) - } - if err == nil { - rs.Samples++ - } + ref, err = h.handleCTZeroSample(app, ref, ls, ts.Samples[0], ts.CreatedTimestamp, rs) + if err != nil { + h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", ts.Samples[0].Timestamp) } + } + // Samples. + for _, s := range ts.Samples { ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue()) if err == nil { rs.Samples++ @@ -493,6 +492,25 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * return samplesWithoutMetadata, http.StatusBadRequest, errors.Join(badRequestErrs...) } +// handleCTZeroSample appends CT as a zero-value sample with CT value as the sample timestamp. +// It doens't return errors in case of out of order CT. +func (h *writeHandler) handleCTZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, sample writev2.Sample, ct int64, rs *WriteResponseStats) (storage.SeriesRef, error) { + var err error + if sample.Timestamp != 0 && ct != 0 { + ref, err = app.AppendCTZeroSample(ref, l, sample.Timestamp, ct) + if err == nil { + rs.Samples++ + } + if err != nil && errors.Is(err, storage.ErrOutOfOrderCT) { + // Even for the first sample OOO is a common scenario because + // we can't tell if a CT was already ingested in a previous request. + // We ignore the error. + err = nil + } + } + return ref, err +} + // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. func NewOTLPWriteHandler(logger *slog.Logger, appendable storage.Appendable, configFunc func() config.Config) http.Handler { diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index be4a970c23..035d35a839 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -130,7 +130,7 @@ func TestRemoteWriteHandlerHeadersHandling_V1Message(t *testing.T) { } appendable := &mockAppendable{} - handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -231,7 +231,7 @@ func TestRemoteWriteHandlerHeadersHandling_V2Message(t *testing.T) { } appendable := &mockAppendable{} - handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}, false) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -256,7 +256,7 @@ func TestRemoteWriteHandler_V1Message(t *testing.T) { // in Prometheus, so keeping like this to not break existing 1.0 clients. appendable := &mockAppendable{} - handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -316,9 +316,17 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { appendHistogramErr error appendExemplarErr error updateMetadataErr error + + ingestCTZeroSample bool }{ { - desc: "All timeseries accepted", + desc: "All timeseries accepted/ct_enabled", + input: writeV2RequestFixture.Timeseries, + expectedCode: http.StatusNoContent, + ingestCTZeroSample: true, + }, + { + desc: "All timeseries accepted/ct_disabled", input: writeV2RequestFixture.Timeseries, expectedCode: http.StatusNoContent, }, @@ -448,7 +456,7 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { appendExemplarErr: tc.appendExemplarErr, updateMetadataErr: tc.updateMetadataErr, } - handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}, tc.ingestCTZeroSample) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -474,8 +482,11 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { // Double check mandatory 2.0 stats. // writeV2RequestFixture has 2 series with 1 sample, 2 histograms, 1 exemplar each. - // We expect 3 samples because the first series has regular sample + CT. - expectHeaderValue(t, 3, resp.Header.Get(rw20WrittenSamplesHeader)) + if tc.ingestCTZeroSample { + expectHeaderValue(t, 3, resp.Header.Get(rw20WrittenSamplesHeader)) + } else { + expectHeaderValue(t, 2, resp.Header.Get(rw20WrittenSamplesHeader)) + } expectHeaderValue(t, 4, resp.Header.Get(rw20WrittenHistogramsHeader)) if tc.appendExemplarErr != nil { expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader)) @@ -492,7 +503,7 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols) for _, s := range ts.Samples { - if ts.CreatedTimestamp != 0 { + if ts.CreatedTimestamp != 0 && tc.ingestCTZeroSample { requireEqual(t, mockSample{ls, ts.CreatedTimestamp, 0}, appendable.samples[i]) i++ } @@ -552,7 +563,7 @@ func TestOutOfOrderSample_V1Message(t *testing.T) { require.NoError(t, err) appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}} - handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -594,7 +605,7 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) { require.NoError(t, err) appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}} - handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -632,7 +643,7 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) { require.NoError(t, err) appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}} - handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -663,7 +674,7 @@ func BenchmarkRemoteWriteHandler(b *testing.B) { appendable := &mockAppendable{} // TODO: test with other proto format(s) - handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false) recorder := httptest.NewRecorder() b.ResetTimer() @@ -680,7 +691,7 @@ func TestCommitErr_V1Message(t *testing.T) { require.NoError(t, err) appendable := &mockAppendable{commitErr: errors.New("commit error")} - handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -704,7 +715,7 @@ func TestCommitErr_V2Message(t *testing.T) { req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) appendable := &mockAppendable{commitErr: errors.New("commit error")} - handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}, false) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -731,7 +742,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) { require.NoError(b, db.Close()) }) // TODO: test with other proto format(s) - handler := NewWriteHandler(promslog.NewNopLogger(), nil, db.Head(), []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) + handler := NewWriteHandler(promslog.NewNopLogger(), nil, db.Head(), []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false) buf, _, _, err := buildWriteRequest(nil, genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy") require.NoError(b, err) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 392dfc6aab..e70da5b4b7 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -258,6 +258,7 @@ func NewAPI( rwEnabled bool, acceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg, otlpEnabled bool, + ctZeroIngestionEnabled bool, ) *API { a := &API{ QueryEngine: qe, @@ -301,7 +302,7 @@ func NewAPI( } if rwEnabled { - a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs) + a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, ctZeroIngestionEnabled) } if otlpEnabled { a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap, configFunc) diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go index f5e75615ec..0a5c76b48e 100644 --- a/web/api/v1/errors_test.go +++ b/web/api/v1/errors_test.go @@ -142,6 +142,7 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable) *route false, config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2}, false, + false, ) promRouter := route.New().WithPrefix("/api/v1") diff --git a/web/web.go b/web/web.go index 08c683bae8..b5532cadff 100644 --- a/web/web.go +++ b/web/web.go @@ -290,6 +290,7 @@ type Options struct { EnableRemoteWriteReceiver bool EnableOTLPWriteReceiver bool IsAgent bool + CTZeroIngestionEnabled bool AppName string AcceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg @@ -386,6 +387,7 @@ func New(logger *slog.Logger, o *Options) *Handler { o.EnableRemoteWriteReceiver, o.AcceptRemoteWriteProtoMsgs, o.EnableOTLPWriteReceiver, + o.CTZeroIngestionEnabled, ) if o.RoutePrefix != "/" { From 6571d97e704564596c065e78052f1adb24ca6c76 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Wed, 4 Dec 2024 17:15:13 -0300 Subject: [PATCH 1164/1397] Handle histogram's created timestamp Signed-off-by: Arthur Silva Sens --- storage/remote/write_handler.go | 36 ++++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 79a627a53b..53571e74a0 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -397,7 +397,8 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * allSamplesSoFar := rs.AllSamples() var ref storage.SeriesRef - if h.ingestCTZeroSample { + // Samples. + if h.ingestCTZeroSample && len(ts.Samples) > 0 { // CT only needs to be ingested for the first sample, it will be considered // out of order for the rest. ref, err = h.handleCTZeroSample(app, ref, ls, ts.Samples[0], ts.CreatedTimestamp, rs) @@ -405,8 +406,6 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", ts.Samples[0].Timestamp) } } - - // Samples. for _, s := range ts.Samples { ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue()) if err == nil { @@ -427,6 +426,14 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * } // Native Histograms. + if h.ingestCTZeroSample && len(ts.Histograms) > 0 { + // CT only needs to be ingested for the first histogram, it will be considered + // out of order for the rest. + ref, err = h.handleHistogramZeroSample(app, ref, ls, ts.Histograms[0], ts.CreatedTimestamp, rs) + if err != nil { + h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", ts.Histograms[0].Timestamp) + } + } for _, hp := range ts.Histograms { if hp.IsFloatHistogram() { ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, nil, hp.ToFloatHistogram()) @@ -511,6 +518,29 @@ func (h *writeHandler) handleCTZeroSample(app storage.Appender, ref storage.Seri return ref, err } +// handleHistogramZeroSample appends CT as a zero-value sample with CT value as the sample timestamp. +// It doens't return errors in case of out of order CT. +func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, hist writev2.Histogram, ct int64, rs *WriteResponseStats) (storage.SeriesRef, error) { + var err error + if hist.Timestamp != 0 && ct != 0 { + if hist.IsFloatHistogram() { + ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, nil, hist.ToFloatHistogram()) + } else { + ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, hist.ToIntHistogram(), nil) + } + if err == nil { + rs.Histograms++ + } + if err != nil && errors.Is(err, storage.ErrOutOfOrderCT) { + // Even for the first sample OOO is a common scenario because + // we can't tell if a CT was already ingested in a previous request. + // We ignore the error. + err = nil + } + } + return ref, err +} + // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. func NewOTLPWriteHandler(logger *slog.Logger, appendable storage.Appendable, configFunc func() config.Config) http.Handler { From 3ffc3bf6a347220f4605a35ef63abe9dbc95a31c Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Wed, 4 Dec 2024 18:03:35 -0300 Subject: [PATCH 1165/1397] handle histogram CT Signed-off-by: Arthur Silva Sens --- storage/remote/write_handler.go | 28 +++++------ storage/remote/write_handler_test.go | 72 ++++++++++++++++++++++++---- 2 files changed, 73 insertions(+), 27 deletions(-) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 53571e74a0..cdf141095e 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -401,7 +401,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * if h.ingestCTZeroSample && len(ts.Samples) > 0 { // CT only needs to be ingested for the first sample, it will be considered // out of order for the rest. - ref, err = h.handleCTZeroSample(app, ref, ls, ts.Samples[0], ts.CreatedTimestamp, rs) + ref, err = h.handleCTZeroSample(app, ref, ls, ts.Samples[0], ts.CreatedTimestamp) if err != nil { h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", ts.Samples[0].Timestamp) } @@ -426,15 +426,15 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * } // Native Histograms. - if h.ingestCTZeroSample && len(ts.Histograms) > 0 { - // CT only needs to be ingested for the first histogram, it will be considered - // out of order for the rest. - ref, err = h.handleHistogramZeroSample(app, ref, ls, ts.Histograms[0], ts.CreatedTimestamp, rs) - if err != nil { - h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", ts.Histograms[0].Timestamp) - } - } for _, hp := range ts.Histograms { + if h.ingestCTZeroSample { + // Differently from samples, we need to handle CT for each histogram instead of just the first one. + // This is because histograms and float histograms are stored separately, even if they have the same labels. + ref, err = h.handleHistogramZeroSample(app, ref, ls, hp, ts.CreatedTimestamp) + if err != nil { + h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", hp.Timestamp) + } + } if hp.IsFloatHistogram() { ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, nil, hp.ToFloatHistogram()) } else { @@ -501,13 +501,10 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // handleCTZeroSample appends CT as a zero-value sample with CT value as the sample timestamp. // It doens't return errors in case of out of order CT. -func (h *writeHandler) handleCTZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, sample writev2.Sample, ct int64, rs *WriteResponseStats) (storage.SeriesRef, error) { +func (h *writeHandler) handleCTZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, sample writev2.Sample, ct int64) (storage.SeriesRef, error) { var err error if sample.Timestamp != 0 && ct != 0 { ref, err = app.AppendCTZeroSample(ref, l, sample.Timestamp, ct) - if err == nil { - rs.Samples++ - } if err != nil && errors.Is(err, storage.ErrOutOfOrderCT) { // Even for the first sample OOO is a common scenario because // we can't tell if a CT was already ingested in a previous request. @@ -520,7 +517,7 @@ func (h *writeHandler) handleCTZeroSample(app storage.Appender, ref storage.Seri // handleHistogramZeroSample appends CT as a zero-value sample with CT value as the sample timestamp. // It doens't return errors in case of out of order CT. -func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, hist writev2.Histogram, ct int64, rs *WriteResponseStats) (storage.SeriesRef, error) { +func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, hist writev2.Histogram, ct int64) (storage.SeriesRef, error) { var err error if hist.Timestamp != 0 && ct != 0 { if hist.IsFloatHistogram() { @@ -528,9 +525,6 @@ func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref stora } else { ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, hist.ToIntHistogram(), nil) } - if err == nil { - rs.Histograms++ - } if err != nil && errors.Is(err, storage.ErrOutOfOrderCT) { // Even for the first sample OOO is a common scenario because // we can't tell if a CT was already ingested in a previous request. diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 035d35a839..084db9fcd8 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -482,11 +482,7 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { // Double check mandatory 2.0 stats. // writeV2RequestFixture has 2 series with 1 sample, 2 histograms, 1 exemplar each. - if tc.ingestCTZeroSample { - expectHeaderValue(t, 3, resp.Header.Get(rw20WrittenSamplesHeader)) - } else { - expectHeaderValue(t, 2, resp.Header.Get(rw20WrittenSamplesHeader)) - } + expectHeaderValue(t, 2, resp.Header.Get(rw20WrittenSamplesHeader)) expectHeaderValue(t, 4, resp.Header.Get(rw20WrittenHistogramsHeader)) if tc.appendExemplarErr != nil { expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader)) @@ -513,9 +509,17 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { for _, hp := range ts.Histograms { if hp.IsFloatHistogram() { fh := hp.ToFloatHistogram() + if ts.CreatedTimestamp != 0 && tc.ingestCTZeroSample { + requireEqual(t, mockHistogram{ls, ts.CreatedTimestamp, nil, &histogram.FloatHistogram{}}, appendable.histograms[k]) + k++ + } requireEqual(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k]) } else { h := hp.ToIntHistogram() + if ts.CreatedTimestamp != 0 && tc.ingestCTZeroSample { + requireEqual(t, mockHistogram{ls, ts.CreatedTimestamp, &histogram.Histogram{}, nil}, appendable.histograms[k]) + k++ + } requireEqual(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k]) } k++ @@ -532,6 +536,7 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { requireEqual(t, mockMetadata{ls, expectedMeta}, appendable.metadata[m]) m++ } + } }) } @@ -793,6 +798,7 @@ type mockAppendable struct { latestExemplar map[uint64]int64 exemplars []mockExemplar latestHistogram map[uint64]int64 + latestFloatHist map[uint64]int64 histograms []mockHistogram metadata []mockMetadata @@ -846,6 +852,9 @@ func (m *mockAppendable) Appender(_ context.Context) storage.Appender { if m.latestHistogram == nil { m.latestHistogram = map[uint64]int64{} } + if m.latestFloatHist == nil { + m.latestFloatHist = map[uint64]int64{} + } if m.latestExemplar == nil { m.latestExemplar = map[uint64]int64{} } @@ -919,7 +928,12 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t return 0, m.appendHistogramErr } - latestTs := m.latestHistogram[l.Hash()] + var latestTs int64 + if h != nil { + latestTs = m.latestHistogram[l.Hash()] + } else { + latestTs = m.latestFloatHist[l.Hash()] + } if t < latestTs { return 0, storage.ErrOutOfOrderSample } @@ -934,15 +948,53 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t return 0, tsdb.ErrInvalidSample } - m.latestHistogram[l.Hash()] = t + if h != nil { + m.latestHistogram[l.Hash()] = t + } else { + m.latestFloatHist[l.Hash()] = t + } m.histograms = append(m.histograms, mockHistogram{l, t, h, fh}) return 0, nil } func (m *mockAppendable) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { - // AppendCTZeroSample is no-op for remote-write for now. - // TODO(bwplotka/arthursens): Add support for PRW 2.0 for CT zero feature (but also we might - // replace this with in-metadata CT storage, see https://github.com/prometheus/prometheus/issues/14218). + if m.appendCTZeroSampleErr != nil { + return 0, m.appendCTZeroSampleErr + } + + // Created Timestamp can't be higher than the original sample's timestamp. + if ct > t { + return 0, storage.ErrOutOfOrderSample + } + + var latestTs int64 + if h != nil { + latestTs = m.latestHistogram[l.Hash()] + } else { + latestTs = m.latestFloatHist[l.Hash()] + } + if ct < latestTs { + return 0, storage.ErrOutOfOrderSample + } + if ct == latestTs { + return 0, storage.ErrDuplicateSampleForTimestamp + } + + if l.IsEmpty() { + return 0, tsdb.ErrInvalidSample + } + + if _, hasDuplicates := l.HasDuplicateLabelNames(); hasDuplicates { + return 0, tsdb.ErrInvalidSample + } + + if h != nil { + m.latestHistogram[l.Hash()] = ct + m.histograms = append(m.histograms, mockHistogram{l, ct, &histogram.Histogram{}, nil}) + } else { + m.latestFloatHist[l.Hash()] = ct + m.histograms = append(m.histograms, mockHistogram{l, ct, nil, &histogram.FloatHistogram{}}) + } return 0, nil } From 3380809b682ba65faaf6cb4ff48ddf543903efbe Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Wed, 4 Dec 2024 18:05:59 -0300 Subject: [PATCH 1166/1397] fix linter Signed-off-by: Arthur Silva Sens --- storage/remote/write_handler_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 084db9fcd8..b37b3632b9 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -536,7 +536,6 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { requireEqual(t, mockMetadata{ls, expectedMeta}, appendable.metadata[m]) m++ } - } }) } From b7a5e280dfa15c9b9acef6c0f228ad17e708e1ff Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Wed, 18 Dec 2024 13:44:49 -0300 Subject: [PATCH 1167/1397] Inline conditionals and CT handling Signed-off-by: Arthur Silva Sens --- storage/remote/write_handler.go | 48 +++++++++++---------------------- 1 file changed, 15 insertions(+), 33 deletions(-) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index cdf141095e..89433ae6f2 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -398,11 +398,14 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * var ref storage.SeriesRef // Samples. - if h.ingestCTZeroSample && len(ts.Samples) > 0 { + if h.ingestCTZeroSample && len(ts.Samples) > 0 && ts.Samples[0].Timestamp != 0 && ts.CreatedTimestamp != 0 { // CT only needs to be ingested for the first sample, it will be considered // out of order for the rest. - ref, err = h.handleCTZeroSample(app, ref, ls, ts.Samples[0], ts.CreatedTimestamp) - if err != nil { + ref, err = app.AppendCTZeroSample(ref, ls, ts.Samples[0].Timestamp, ts.CreatedTimestamp) + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { + // Even for the first sample OOO is a common scenario because + // we can't tell if a CT was already ingested in a previous request. + // We ignore the error. h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", ts.Samples[0].Timestamp) } } @@ -427,11 +430,14 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // Native Histograms. for _, hp := range ts.Histograms { - if h.ingestCTZeroSample { + if h.ingestCTZeroSample && hp.Timestamp != 0 && ts.CreatedTimestamp != 0 { // Differently from samples, we need to handle CT for each histogram instead of just the first one. // This is because histograms and float histograms are stored separately, even if they have the same labels. ref, err = h.handleHistogramZeroSample(app, ref, ls, hp, ts.CreatedTimestamp) - if err != nil { + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { + // Even for the first sample OOO is a common scenario because + // we can't tell if a CT was already ingested in a previous request. + // We ignore the error. h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", hp.Timestamp) } } @@ -499,38 +505,14 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * return samplesWithoutMetadata, http.StatusBadRequest, errors.Join(badRequestErrs...) } -// handleCTZeroSample appends CT as a zero-value sample with CT value as the sample timestamp. -// It doens't return errors in case of out of order CT. -func (h *writeHandler) handleCTZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, sample writev2.Sample, ct int64) (storage.SeriesRef, error) { - var err error - if sample.Timestamp != 0 && ct != 0 { - ref, err = app.AppendCTZeroSample(ref, l, sample.Timestamp, ct) - if err != nil && errors.Is(err, storage.ErrOutOfOrderCT) { - // Even for the first sample OOO is a common scenario because - // we can't tell if a CT was already ingested in a previous request. - // We ignore the error. - err = nil - } - } - return ref, err -} - // handleHistogramZeroSample appends CT as a zero-value sample with CT value as the sample timestamp. // It doens't return errors in case of out of order CT. func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, hist writev2.Histogram, ct int64) (storage.SeriesRef, error) { var err error - if hist.Timestamp != 0 && ct != 0 { - if hist.IsFloatHistogram() { - ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, nil, hist.ToFloatHistogram()) - } else { - ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, hist.ToIntHistogram(), nil) - } - if err != nil && errors.Is(err, storage.ErrOutOfOrderCT) { - // Even for the first sample OOO is a common scenario because - // we can't tell if a CT was already ingested in a previous request. - // We ignore the error. - err = nil - } + if hist.IsFloatHistogram() { + ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, nil, hist.ToFloatHistogram()) + } else { + ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, hist.ToIntHistogram(), nil) } return ref, err } From e630ffdbedaf276da8c7e1a015af893ddc16d77a Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 26 Jul 2024 18:17:35 +0100 Subject: [PATCH 1168/1397] TSDB: extend BenchmarkMemPostings_PostingsForLabelMatching to check merge speed We need to create more postings entries so the merger has some work to do. Not material for the regexp ones as they match so few series. Signed-off-by: Bryan Boreham --- tsdb/index/postings_test.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 6dd9f25bc0..c4fb1f12f4 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -1410,12 +1410,15 @@ func BenchmarkMemPostings_PostingsForLabelMatching(b *testing.B) { slowRegexp := "^" + slowRegexpString() + "$" b.Logf("Slow regexp length = %d", len(slowRegexp)) slow := regexp.MustCompile(slowRegexp) + const seriesPerLabel = 10 for _, labelValueCount := range []int{1_000, 10_000, 100_000} { b.Run(fmt.Sprintf("labels=%d", labelValueCount), func(b *testing.B) { mp := NewMemPostings() for i := 0; i < labelValueCount; i++ { - mp.Add(storage.SeriesRef(i), labels.FromStrings("label", strconv.Itoa(i))) + for j := 0; j < seriesPerLabel; j++ { + mp.Add(storage.SeriesRef(i*seriesPerLabel+j), labels.FromStrings("__name__", strconv.Itoa(j), "label", strconv.Itoa(i))) + } } fp, err := ExpandPostings(mp.PostingsForLabelMatching(context.Background(), "label", fast.MatchString)) @@ -1435,6 +1438,18 @@ func BenchmarkMemPostings_PostingsForLabelMatching(b *testing.B) { mp.PostingsForLabelMatching(context.Background(), "label", slow.MatchString).Next() } }) + + b.Run("matcher=all", func(b *testing.B) { + for i := 0; i < b.N; i++ { + // Match everything. + p := mp.PostingsForLabelMatching(context.Background(), "label", func(_ string) bool { return true }) + var sum storage.SeriesRef + // Iterate through all results to exercise merge function. + for p.Next() { + sum += p.At() + } + } + }) }) } } From 1b22242024d09a287a30ad30560dfc58febee966 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 26 Jul 2024 20:00:03 +0100 Subject: [PATCH 1169/1397] TSDB BenchmarkMerge: run fewer sizes As long as we run small and big sizes, we don't need all the sizes inbetween. Signed-off-by: Bryan Boreham --- tsdb/index/postings_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index c4fb1f12f4..cd7fa9d3c6 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -393,7 +393,7 @@ func BenchmarkMerge(t *testing.B) { } its := make([]Postings, len(refs)) - for _, nSeries := range []int{1, 10, 100, 1000, 10000, 100000} { + for _, nSeries := range []int{1, 10, 10000, 100000} { t.Run(strconv.Itoa(nSeries), func(bench *testing.B) { ctx := context.Background() for i := 0; i < bench.N; i++ { From 0a8779f46dace4d24dd9c14e81cba065c23e2a88 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 26 Jul 2024 19:35:58 +0100 Subject: [PATCH 1170/1397] TSDB: Make mergedPostings generic Now we can call it with more specific types which is more efficient than making everything go through the `Postings` interface. Benchmark the concrete type. Signed-off-by: Bryan Boreham --- tsdb/index/postings.go | 20 ++++++++++---------- tsdb/index/postings_test.go | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index a2c5a82239..5384133832 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -660,7 +660,7 @@ func (it *intersectPostings) Err() error { } // Merge returns a new iterator over the union of the input iterators. -func Merge(_ context.Context, its ...Postings) Postings { +func Merge[T Postings](_ context.Context, its ...T) Postings { if len(its) == 0 { return EmptyPostings() } @@ -675,19 +675,19 @@ func Merge(_ context.Context, its ...Postings) Postings { return p } -type mergedPostings struct { - p []Postings - h *loser.Tree[storage.SeriesRef, Postings] +type mergedPostings[T Postings] struct { + p []T + h *loser.Tree[storage.SeriesRef, T] cur storage.SeriesRef } -func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) { +func newMergedPostings[T Postings](p []T) (m *mergedPostings[T], nonEmpty bool) { const maxVal = storage.SeriesRef(math.MaxUint64) // This value must be higher than all real values used in the tree. lt := loser.New(p, maxVal) - return &mergedPostings{p: p, h: lt}, true + return &mergedPostings[T]{p: p, h: lt}, true } -func (it *mergedPostings) Next() bool { +func (it *mergedPostings[T]) Next() bool { for { if !it.h.Next() { return false @@ -701,7 +701,7 @@ func (it *mergedPostings) Next() bool { } } -func (it *mergedPostings) Seek(id storage.SeriesRef) bool { +func (it *mergedPostings[T]) Seek(id storage.SeriesRef) bool { for !it.h.IsEmpty() && it.h.At() < id { finished := !it.h.Winner().Seek(id) it.h.Fix(finished) @@ -713,11 +713,11 @@ func (it *mergedPostings) Seek(id storage.SeriesRef) bool { return true } -func (it mergedPostings) At() storage.SeriesRef { +func (it mergedPostings[T]) At() storage.SeriesRef { return it.cur } -func (it mergedPostings) Err() error { +func (it mergedPostings[T]) Err() error { for _, p := range it.p { if err := p.Err(); err != nil { return err diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index cd7fa9d3c6..77d59ec995 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -392,7 +392,7 @@ func BenchmarkMerge(t *testing.B) { refs = append(refs, temp) } - its := make([]Postings, len(refs)) + its := make([]*ListPostings, len(refs)) for _, nSeries := range []int{1, 10, 10000, 100000} { t.Run(strconv.Itoa(nSeries), func(bench *testing.B) { ctx := context.Background() From cfa32f3d2847eb9d40ebc16ce9bb8ebfeb0705a1 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 26 Jul 2024 20:08:51 +0100 Subject: [PATCH 1171/1397] TSDB: Move merge of head postings into index This enables it to take advantage of a more compact data structure since all postings are known to be `*ListPostings`. Remove the `Get` member which was not used for anything else, and fix up tests. Signed-off-by: Bryan Boreham --- tsdb/head_read.go | 15 +-------------- tsdb/head_test.go | 12 ++++++------ tsdb/index/postings.go | 38 ++++++++++++++++++------------------- tsdb/index/postings_test.go | 8 ++++---- 4 files changed, 30 insertions(+), 43 deletions(-) diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 79ed0f0240..b95257c28a 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -103,20 +103,7 @@ func (h *headIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Ma // Postings returns the postings list iterator for the label pairs. func (h *headIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) { - switch len(values) { - case 0: - return index.EmptyPostings(), nil - case 1: - return h.head.postings.Get(name, values[0]), nil - default: - res := make([]index.Postings, 0, len(values)) - for _, value := range values { - if p := h.head.postings.Get(name, value); !index.IsEmptyPostingsType(p) { - res = append(res, p) - } - } - return index.Merge(ctx, res...), nil - } + return h.head.postings.Postings(ctx, name, values...), nil } func (h *headIndexReader) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) index.Postings { diff --git a/tsdb/head_test.go b/tsdb/head_test.go index fb158b593c..e3742cbe9c 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -962,12 +962,12 @@ func TestHead_Truncate(t *testing.T) { require.Nil(t, h.series.getByID(s3.ref)) require.Nil(t, h.series.getByID(s4.ref)) - postingsA1, _ := index.ExpandPostings(h.postings.Get("a", "1")) - postingsA2, _ := index.ExpandPostings(h.postings.Get("a", "2")) - postingsB1, _ := index.ExpandPostings(h.postings.Get("b", "1")) - postingsB2, _ := index.ExpandPostings(h.postings.Get("b", "2")) - postingsC1, _ := index.ExpandPostings(h.postings.Get("c", "1")) - postingsAll, _ := index.ExpandPostings(h.postings.Get("", "")) + postingsA1, _ := index.ExpandPostings(h.postings.Postings(ctx, "a", "1")) + postingsA2, _ := index.ExpandPostings(h.postings.Postings(ctx, "a", "2")) + postingsB1, _ := index.ExpandPostings(h.postings.Postings(ctx, "b", "1")) + postingsB2, _ := index.ExpandPostings(h.postings.Postings(ctx, "b", "2")) + postingsC1, _ := index.ExpandPostings(h.postings.Postings(ctx, "c", "1")) + postingsAll, _ := index.ExpandPostings(h.postings.Postings(ctx, "", "")) require.Equal(t, []storage.SeriesRef{storage.SeriesRef(s1.ref)}, postingsA1) require.Equal(t, []storage.SeriesRef{storage.SeriesRef(s2.ref)}, postingsA2) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 5384133832..03e3f7a239 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -235,25 +235,9 @@ func (p *MemPostings) Stats(label string, limit int, labelSizeFunc func(string, } } -// Get returns a postings list for the given label pair. -func (p *MemPostings) Get(name, value string) Postings { - var lp []storage.SeriesRef - p.mtx.RLock() - l := p.m[name] - if l != nil { - lp = l[value] - } - p.mtx.RUnlock() - - if lp == nil { - return EmptyPostings() - } - return newListPostings(lp...) -} - // All returns a postings list over all documents ever added. func (p *MemPostings) All() Postings { - return p.Get(AllPostingsKey()) + return p.Postings(context.Background(), allPostingsKey.Name, allPostingsKey.Value) } // EnsureOrder ensures that all postings lists are sorted. After it returns all further @@ -490,7 +474,7 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, } // Now `vals` only contains the values that matched, get their postings. - its := make([]Postings, 0, len(vals)) + its := make([]*ListPostings, 0, len(vals)) lps := make([]ListPostings, len(vals)) p.mtx.RLock() e := p.m[name] @@ -510,11 +494,27 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, return Merge(ctx, its...) } +// Postings returns a postings iterator for the given label values. +func (p *MemPostings) Postings(ctx context.Context, name string, values ...string) Postings { + res := make([]*ListPostings, 0, len(values)) + lps := make([]ListPostings, len(values)) + p.mtx.RLock() + postingsMapForName := p.m[name] + for i, value := range values { + if lp := postingsMapForName[value]; lp != nil { + lps[i] = ListPostings{list: lp} + res = append(res, &lps[i]) + } + } + p.mtx.RUnlock() + return Merge(ctx, res...) +} + func (p *MemPostings) PostingsForAllLabelValues(ctx context.Context, name string) Postings { p.mtx.RLock() e := p.m[name] - its := make([]Postings, 0, len(e)) + its := make([]*ListPostings, 0, len(e)) lps := make([]ListPostings, len(e)) i := 0 for _, refs := range e { diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 77d59ec995..cf5ab6c0f8 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -979,7 +979,7 @@ func TestMemPostings_Delete(t *testing.T) { p.Add(2, labels.FromStrings("lbl1", "b")) p.Add(3, labels.FromStrings("lbl2", "a")) - before := p.Get(allPostingsKey.Name, allPostingsKey.Value) + before := p.Postings(context.Background(), allPostingsKey.Name, allPostingsKey.Value) deletedRefs := map[storage.SeriesRef]struct{}{ 2: {}, } @@ -987,7 +987,7 @@ func TestMemPostings_Delete(t *testing.T) { {Name: "lbl1", Value: "b"}: {}, } p.Delete(deletedRefs, affectedLabels) - after := p.Get(allPostingsKey.Name, allPostingsKey.Value) + after := p.Postings(context.Background(), allPostingsKey.Name, allPostingsKey.Value) // Make sure postings gotten before the delete have the old data when // iterated over. @@ -1001,7 +1001,7 @@ func TestMemPostings_Delete(t *testing.T) { require.NoError(t, err) require.Equal(t, []storage.SeriesRef{1, 3}, expanded) - deleted := p.Get("lbl1", "b") + deleted := p.Postings(context.Background(), "lbl1", "b") expanded, err = ExpandPostings(deleted) require.NoError(t, err) require.Empty(t, expanded, "expected empty postings, got %v", expanded) @@ -1073,7 +1073,7 @@ func BenchmarkMemPostings_Delete(b *testing.B) { return default: // Get a random value of this label. - p.Get(lbl, itoa(rand.Intn(10000))).Next() + p.Postings(context.Background(), lbl, itoa(rand.Intn(10000))).Next() } } }(i) From 7b03796d0f979ea181092f24b61d6cdc1bc89ac9 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Sat, 21 Dec 2024 13:33:08 +0000 Subject: [PATCH 1172/1397] Scraping: stop storing discovered labels (#15261) Instead of storing discovered labels on every target, recompute them if required. The `Target` struct now needs to hold some more data required to recompute them, such as ScrapeConfig. This moves the load from every Prometheus all of the time, to just when someone views Service Discovery in the UI. The way `PopulateLabels` is used changes; you are no longer expected to call it with a part-populated `labels.Builder`. The signature of `Target.Labels` changes to take a `labels.Builder` instead of a `ScratchBuilder`, for consistency with `DiscoveredLabels`. This will save a lot of work when many targets are filtered out in relabeling. Combine with `keep_dropped_targets` to avoid ever computing most labels for dropped targets. Signed-off-by: Bryan Boreham --- cmd/promtool/sd.go | 4 +- scrape/manager_test.go | 70 +++++++++++++------------- scrape/scrape.go | 8 +-- scrape/scrape_test.go | 33 +++++++++---- scrape/target.go | 110 ++++++++++++++++++++++------------------- scrape/target_test.go | 16 +++--- web/api/v1/api.go | 12 ++--- web/api/v1/api_test.go | 44 ++++++++--------- 8 files changed, 162 insertions(+), 135 deletions(-) diff --git a/cmd/promtool/sd.go b/cmd/promtool/sd.go index 5e005bca8b..8863fbeac0 100644 --- a/cmd/promtool/sd.go +++ b/cmd/promtool/sd.go @@ -144,7 +144,9 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc } } - res, orig, err := scrape.PopulateLabels(lb, scrapeConfig) + scrape.PopulateDiscoveredLabels(lb, scrapeConfig, target, targetGroup.Labels) + orig := lb.Labels() + res, err := scrape.PopulateLabels(lb, scrapeConfig, target, targetGroup.Labels) result := sdCheckResult{ DiscoveredLabels: orig, Labels: res, diff --git a/scrape/manager_test.go b/scrape/manager_test.go index b9c6f4c40e..75ac9ea692 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -18,6 +18,7 @@ import ( "context" "errors" "fmt" + "maps" "net/http" "net/http/httptest" "net/url" @@ -61,7 +62,7 @@ func init() { func TestPopulateLabels(t *testing.T) { cases := []struct { - in labels.Labels + in model.LabelSet cfg *config.ScrapeConfig res labels.Labels resOrig labels.Labels @@ -69,10 +70,10 @@ func TestPopulateLabels(t *testing.T) { }{ // Regular population of scrape config options. { - in: labels.FromMap(map[string]string{ + in: model.LabelSet{ model.AddressLabel: "1.2.3.4:1000", "custom": "value", - }), + }, cfg: &config.ScrapeConfig{ Scheme: "https", MetricsPath: "/metrics", @@ -103,14 +104,14 @@ func TestPopulateLabels(t *testing.T) { // Pre-define/overwrite scrape config labels. // Leave out port and expect it to be defaulted to scheme. { - in: labels.FromMap(map[string]string{ + in: model.LabelSet{ model.AddressLabel: "1.2.3.4", model.SchemeLabel: "http", model.MetricsPathLabel: "/custom", model.JobLabel: "custom-job", model.ScrapeIntervalLabel: "2s", model.ScrapeTimeoutLabel: "2s", - }), + }, cfg: &config.ScrapeConfig{ Scheme: "https", MetricsPath: "/metrics", @@ -138,10 +139,10 @@ func TestPopulateLabels(t *testing.T) { }, // Provide instance label. HTTPS port default for IPv6. { - in: labels.FromMap(map[string]string{ + in: model.LabelSet{ model.AddressLabel: "[::1]", model.InstanceLabel: "custom-instance", - }), + }, cfg: &config.ScrapeConfig{ Scheme: "https", MetricsPath: "/metrics", @@ -170,7 +171,7 @@ func TestPopulateLabels(t *testing.T) { }, // Address label missing. { - in: labels.FromStrings("custom", "value"), + in: model.LabelSet{"custom": "value"}, cfg: &config.ScrapeConfig{ Scheme: "https", MetricsPath: "/metrics", @@ -184,7 +185,7 @@ func TestPopulateLabels(t *testing.T) { }, // Address label missing, but added in relabelling. { - in: labels.FromStrings("custom", "host:1234"), + in: model.LabelSet{"custom": "host:1234"}, cfg: &config.ScrapeConfig{ Scheme: "https", MetricsPath: "/metrics", @@ -222,7 +223,7 @@ func TestPopulateLabels(t *testing.T) { }, // Address label missing, but added in relabelling. { - in: labels.FromStrings("custom", "host:1234"), + in: model.LabelSet{"custom": "host:1234"}, cfg: &config.ScrapeConfig{ Scheme: "https", MetricsPath: "/metrics", @@ -260,10 +261,10 @@ func TestPopulateLabels(t *testing.T) { }, // Invalid UTF-8 in label. { - in: labels.FromMap(map[string]string{ + in: model.LabelSet{ model.AddressLabel: "1.2.3.4:1000", "custom": "\xbd", - }), + }, cfg: &config.ScrapeConfig{ Scheme: "https", MetricsPath: "/metrics", @@ -277,10 +278,10 @@ func TestPopulateLabels(t *testing.T) { }, // Invalid duration in interval label. { - in: labels.FromMap(map[string]string{ + in: model.LabelSet{ model.AddressLabel: "1.2.3.4:1000", model.ScrapeIntervalLabel: "2notseconds", - }), + }, cfg: &config.ScrapeConfig{ Scheme: "https", MetricsPath: "/metrics", @@ -294,10 +295,10 @@ func TestPopulateLabels(t *testing.T) { }, // Invalid duration in timeout label. { - in: labels.FromMap(map[string]string{ + in: model.LabelSet{ model.AddressLabel: "1.2.3.4:1000", model.ScrapeTimeoutLabel: "2notseconds", - }), + }, cfg: &config.ScrapeConfig{ Scheme: "https", MetricsPath: "/metrics", @@ -311,10 +312,10 @@ func TestPopulateLabels(t *testing.T) { }, // 0 interval in timeout label. { - in: labels.FromMap(map[string]string{ + in: model.LabelSet{ model.AddressLabel: "1.2.3.4:1000", model.ScrapeIntervalLabel: "0s", - }), + }, cfg: &config.ScrapeConfig{ Scheme: "https", MetricsPath: "/metrics", @@ -328,10 +329,10 @@ func TestPopulateLabels(t *testing.T) { }, // 0 duration in timeout label. { - in: labels.FromMap(map[string]string{ + in: model.LabelSet{ model.AddressLabel: "1.2.3.4:1000", model.ScrapeTimeoutLabel: "0s", - }), + }, cfg: &config.ScrapeConfig{ Scheme: "https", MetricsPath: "/metrics", @@ -345,11 +346,11 @@ func TestPopulateLabels(t *testing.T) { }, // Timeout less than interval. { - in: labels.FromMap(map[string]string{ + in: model.LabelSet{ model.AddressLabel: "1.2.3.4:1000", model.ScrapeIntervalLabel: "1s", model.ScrapeTimeoutLabel: "2s", - }), + }, cfg: &config.ScrapeConfig{ Scheme: "https", MetricsPath: "/metrics", @@ -363,9 +364,9 @@ func TestPopulateLabels(t *testing.T) { }, // Don't attach default port. { - in: labels.FromMap(map[string]string{ + in: model.LabelSet{ model.AddressLabel: "1.2.3.4", - }), + }, cfg: &config.ScrapeConfig{ Scheme: "https", MetricsPath: "/metrics", @@ -393,9 +394,9 @@ func TestPopulateLabels(t *testing.T) { }, // verify that the default port is not removed (http). { - in: labels.FromMap(map[string]string{ + in: model.LabelSet{ model.AddressLabel: "1.2.3.4:80", - }), + }, cfg: &config.ScrapeConfig{ Scheme: "http", MetricsPath: "/metrics", @@ -423,9 +424,9 @@ func TestPopulateLabels(t *testing.T) { }, // verify that the default port is not removed (https). { - in: labels.FromMap(map[string]string{ + in: model.LabelSet{ model.AddressLabel: "1.2.3.4:443", - }), + }, cfg: &config.ScrapeConfig{ Scheme: "https", MetricsPath: "/metrics", @@ -453,17 +454,18 @@ func TestPopulateLabels(t *testing.T) { }, } for _, c := range cases { - in := c.in.Copy() - - res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg) + in := maps.Clone(c.in) + lb := labels.NewBuilder(labels.EmptyLabels()) + res, err := PopulateLabels(lb, c.cfg, c.in, nil) if c.err != "" { require.EqualError(t, err, c.err) } else { require.NoError(t, err) + testutil.RequireEqual(t, c.res, res) + PopulateDiscoveredLabels(lb, c.cfg, c.in, nil) + testutil.RequireEqual(t, c.resOrig, lb.Labels()) } - require.Equal(t, c.in, in) - testutil.RequireEqual(t, c.res, res) - testutil.RequireEqual(t, c.resOrig, orig) + require.Equal(t, c.in, in) // Check this wasn't altered by PopulateLabels(). } } diff --git a/scrape/scrape.go b/scrape/scrape.go index 4803354cf6..2da07d719e 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -450,7 +450,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { switch { case nonEmpty: all = append(all, t) - case !t.discoveredLabels.IsEmpty(): + default: if sp.config.KeepDroppedTargets == 0 || uint(len(sp.droppedTargets)) < sp.config.KeepDroppedTargets { sp.droppedTargets = append(sp.droppedTargets, t) } @@ -553,9 +553,9 @@ func (sp *scrapePool) sync(targets []*Target) { if _, ok := uniqueLoops[hash]; !ok { uniqueLoops[hash] = nil } - // Need to keep the most updated labels information - // for displaying it in the Service Discovery web page. - sp.activeTargets[hash].SetDiscoveredLabels(t.DiscoveredLabels()) + // Need to keep the most updated ScrapeConfig for + // displaying labels in the Service Discovery web page. + sp.activeTargets[hash].SetScrapeConfig(sp.config, t.tLabels, t.tgLabels) } } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index bb62122bb9..f9164ea7ac 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -212,7 +212,8 @@ func TestDroppedTargetsList(t *testing.T) { sp.Sync(tgs) require.Len(t, sp.droppedTargets, expectedLength) require.Equal(t, expectedLength, sp.droppedTargetsCount) - require.Equal(t, expectedLabelSetString, sp.droppedTargets[0].DiscoveredLabels().String()) + lb := labels.NewBuilder(labels.EmptyLabels()) + require.Equal(t, expectedLabelSetString, sp.droppedTargets[0].DiscoveredLabels(lb).String()) // Check that count is still correct when we don't retain all dropped targets. sp.config.KeepDroppedTargets = 1 @@ -235,16 +236,19 @@ func TestDiscoveredLabelsUpdate(t *testing.T) { } sp.activeTargets = make(map[uint64]*Target) t1 := &Target{ - discoveredLabels: labels.FromStrings("label", "name"), + tLabels: model.LabelSet{"label": "name"}, + scrapeConfig: sp.config, } sp.activeTargets[t1.hash()] = t1 t2 := &Target{ - discoveredLabels: labels.FromStrings("labelNew", "nameNew"), + tLabels: model.LabelSet{"labelNew": "nameNew"}, + scrapeConfig: sp.config, } sp.sync([]*Target{t2}) - require.Equal(t, t2.DiscoveredLabels(), sp.activeTargets[t1.hash()].DiscoveredLabels()) + lb := labels.NewBuilder(labels.EmptyLabels()) + require.Equal(t, t2.DiscoveredLabels(lb), sp.activeTargets[t1.hash()].DiscoveredLabels(lb)) } type testLoop struct { @@ -309,7 +313,8 @@ func TestScrapePoolStop(t *testing.T) { for i := 0; i < numTargets; i++ { t := &Target{ - labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)), + labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)), + scrapeConfig: &config.ScrapeConfig{}, } l := &testLoop{} d := time.Duration((i+1)*20) * time.Millisecond @@ -394,8 +399,8 @@ func TestScrapePoolReload(t *testing.T) { for i := 0; i < numTargets; i++ { labels := labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)) t := &Target{ - labels: labels, - discoveredLabels: labels, + labels: labels, + scrapeConfig: &config.ScrapeConfig{}, } l := &testLoop{} d := time.Duration((i+1)*20) * time.Millisecond @@ -2689,6 +2694,7 @@ func TestTargetScraperScrapeOK(t *testing.T) { model.SchemeLabel, serverURL.Scheme, model.AddressLabel, serverURL.Host, ), + scrapeConfig: &config.ScrapeConfig{}, }, client: http.DefaultClient, timeout: configTimeout, @@ -2739,6 +2745,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) { model.SchemeLabel, serverURL.Scheme, model.AddressLabel, serverURL.Host, ), + scrapeConfig: &config.ScrapeConfig{}, }, client: http.DefaultClient, acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation), @@ -2794,6 +2801,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) { model.SchemeLabel, serverURL.Scheme, model.AddressLabel, serverURL.Host, ), + scrapeConfig: &config.ScrapeConfig{}, }, client: http.DefaultClient, acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation), @@ -2837,6 +2845,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) { model.SchemeLabel, serverURL.Scheme, model.AddressLabel, serverURL.Host, ), + scrapeConfig: &config.ScrapeConfig{}, }, client: http.DefaultClient, bodySizeLimit: bodySizeLimit, @@ -3107,7 +3116,8 @@ func TestReuseScrapeCache(t *testing.T) { } sp, _ = newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t)) t1 = &Target{ - discoveredLabels: labels.FromStrings("labelNew", "nameNew", "labelNew1", "nameNew1", "labelNew2", "nameNew2"), + labels: labels.FromStrings("labelNew", "nameNew", "labelNew1", "nameNew1", "labelNew2", "nameNew2"), + scrapeConfig: &config.ScrapeConfig{}, } proxyURL, _ = url.Parse("http://localhost:2128") ) @@ -3291,7 +3301,8 @@ func TestReuseCacheRace(t *testing.T) { buffers = pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) sp, _ = newScrapePool(cfg, app, 0, nil, buffers, &Options{}, newTestScrapeMetrics(t)) t1 = &Target{ - discoveredLabels: labels.FromStrings("labelNew", "nameNew"), + labels: labels.FromStrings("labelNew", "nameNew"), + scrapeConfig: &config.ScrapeConfig{}, } ) defer sp.stop() @@ -4475,7 +4486,9 @@ func BenchmarkTargetScraperGzip(b *testing.B) { model.SchemeLabel, serverURL.Scheme, model.AddressLabel, serverURL.Host, ), - params: url.Values{"count": []string{strconv.Itoa(scenario.metricsCount)}}, + scrapeConfig: &config.ScrapeConfig{ + Params: url.Values{"count": []string{strconv.Itoa(scenario.metricsCount)}}, + }, }, client: client, timeout: time.Second, diff --git a/scrape/target.go b/scrape/target.go index 06d4737ff9..d05866f863 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -45,12 +45,12 @@ const ( // Target refers to a singular HTTP or HTTPS endpoint. type Target struct { - // Labels before any processing. - discoveredLabels labels.Labels // Any labels that are added to this target and its metrics. labels labels.Labels - // Additional URL parameters that are part of the target URL. - params url.Values + // ScrapeConfig used to create this target. + scrapeConfig *config.ScrapeConfig + // Target and TargetGroup labels used to create this target. + tLabels, tgLabels model.LabelSet mtx sync.RWMutex lastError error @@ -61,12 +61,13 @@ type Target struct { } // NewTarget creates a reasonably configured target for querying. -func NewTarget(labels, discoveredLabels labels.Labels, params url.Values) *Target { +func NewTarget(labels labels.Labels, scrapeConfig *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) *Target { return &Target{ - labels: labels, - discoveredLabels: discoveredLabels, - params: params, - health: HealthUnknown, + labels: labels, + tLabels: tLabels, + tgLabels: tgLabels, + scrapeConfig: scrapeConfig, + health: HealthUnknown, } } @@ -168,11 +169,11 @@ func (t *Target) offset(interval time.Duration, offsetSeed uint64) time.Duration } // Labels returns a copy of the set of all public labels of the target. -func (t *Target) Labels(b *labels.ScratchBuilder) labels.Labels { - b.Reset() +func (t *Target) Labels(b *labels.Builder) labels.Labels { + b.Reset(labels.EmptyLabels()) t.labels.Range(func(l labels.Label) { if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) { - b.Add(l.Name, l.Value) + b.Set(l.Name, l.Value) } }) return b.Labels() @@ -188,24 +189,31 @@ func (t *Target) LabelsRange(f func(l labels.Label)) { } // DiscoveredLabels returns a copy of the target's labels before any processing. -func (t *Target) DiscoveredLabels() labels.Labels { +func (t *Target) DiscoveredLabels(lb *labels.Builder) labels.Labels { t.mtx.Lock() - defer t.mtx.Unlock() - return t.discoveredLabels.Copy() + cfg, tLabels, tgLabels := t.scrapeConfig, t.tLabels, t.tgLabels + t.mtx.Unlock() + PopulateDiscoveredLabels(lb, cfg, tLabels, tgLabels) + return lb.Labels() } -// SetDiscoveredLabels sets new DiscoveredLabels. -func (t *Target) SetDiscoveredLabels(l labels.Labels) { +// SetScrapeConfig sets new ScrapeConfig. +func (t *Target) SetScrapeConfig(scrapeConfig *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) { t.mtx.Lock() defer t.mtx.Unlock() - t.discoveredLabels = l + t.scrapeConfig = scrapeConfig + t.tLabels = tLabels + t.tgLabels = tgLabels } // URL returns a copy of the target's URL. func (t *Target) URL() *url.URL { + t.mtx.Lock() + configParams := t.scrapeConfig.Params + t.mtx.Unlock() params := url.Values{} - for k, v := range t.params { + for k, v := range configParams { params[k] = make([]string, len(v)) copy(params[k], v) } @@ -420,10 +428,19 @@ func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels return ref, nil } -// PopulateLabels builds a label set from the given label set and scrape configuration. -// It returns a label set before relabeling was applied as the second return value. -// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. -func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { +// PopulateDiscoveredLabels sets base labels on lb from target and group labels and scrape configuration, before relabeling. +func PopulateDiscoveredLabels(lb *labels.Builder, cfg *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) { + lb.Reset(labels.EmptyLabels()) + + for ln, lv := range tLabels { + lb.Set(string(ln), string(lv)) + } + for ln, lv := range tgLabels { + if _, ok := tLabels[ln]; !ok { + lb.Set(string(ln), string(lv)) + } + } + // Copy labels into the labelset for the target if they are not set already. scrapeLabels := []labels.Label{ {Name: model.JobLabel, Value: cfg.JobName}, @@ -444,44 +461,49 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig lab lb.Set(name, v[0]) } } +} - preRelabelLabels := lb.Labels() +// PopulateLabels builds labels from target and group labels and scrape configuration, +// performs defined relabeling, checks validity, and adds Prometheus standard labels such as 'instance'. +// A return of empty labels and nil error means the target was dropped by relabeling. +func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) (res labels.Labels, err error) { + PopulateDiscoveredLabels(lb, cfg, tLabels, tgLabels) keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) // Check if the target was dropped. if !keep { - return labels.EmptyLabels(), preRelabelLabels, nil + return labels.EmptyLabels(), nil } if v := lb.Get(model.AddressLabel); v == "" { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address") + return labels.EmptyLabels(), errors.New("no address") } addr := lb.Get(model.AddressLabel) if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), err + return labels.EmptyLabels(), err } interval := lb.Get(model.ScrapeIntervalLabel) intervalDuration, err := model.ParseDuration(interval) if err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape interval: %w", err) + return labels.EmptyLabels(), fmt.Errorf("error parsing scrape interval: %w", err) } if time.Duration(intervalDuration) == 0 { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape interval cannot be 0") + return labels.EmptyLabels(), errors.New("scrape interval cannot be 0") } timeout := lb.Get(model.ScrapeTimeoutLabel) timeoutDuration, err := model.ParseDuration(timeout) if err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape timeout: %w", err) + return labels.EmptyLabels(), fmt.Errorf("error parsing scrape timeout: %w", err) } if time.Duration(timeoutDuration) == 0 { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape timeout cannot be 0") + return labels.EmptyLabels(), errors.New("scrape timeout cannot be 0") } if timeoutDuration > intervalDuration { - return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval) + return labels.EmptyLabels(), fmt.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval) } // Meta labels are deleted after relabelling. Other internal labels propagate to @@ -506,9 +528,9 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig lab return nil }) if err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), err + return labels.EmptyLabels(), err } - return res, preRelabelLabels, nil + return res, nil } // TargetsFromGroup builds targets based on the given TargetGroup and config. @@ -516,24 +538,12 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, targets [ targets = targets[:0] failures := []error{} - for i, tlset := range tg.Targets { - lb.Reset(labels.EmptyLabels()) - - for ln, lv := range tlset { - lb.Set(string(ln), string(lv)) - } - for ln, lv := range tg.Labels { - if _, ok := tlset[ln]; !ok { - lb.Set(string(ln), string(lv)) - } - } - - lset, origLabels, err := PopulateLabels(lb, cfg) + for i, tLabels := range tg.Targets { + lset, err := PopulateLabels(lb, cfg, tLabels, tg.Labels) if err != nil { failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err)) - } - if !lset.IsEmpty() || !origLabels.IsEmpty() { - targets = append(targets, NewTarget(lset, origLabels, cfg.Params)) + } else { + targets = append(targets, NewTarget(lset, cfg, tLabels, tg.Labels)) } } return targets, failures diff --git a/scrape/target_test.go b/scrape/target_test.go index 0d763d7389..9dad18f01c 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -43,8 +43,8 @@ const ( func TestTargetLabels(t *testing.T) { target := newTestTarget("example.com:80", 0, labels.FromStrings("job", "some_job", "foo", "bar")) want := labels.FromStrings(model.JobLabel, "some_job", "foo", "bar") - b := labels.NewScratchBuilder(0) - got := target.Labels(&b) + b := labels.NewBuilder(labels.EmptyLabels()) + got := target.Labels(b) require.Equal(t, want, got) i := 0 target.LabelsRange(func(l labels.Label) { @@ -103,9 +103,11 @@ func TestTargetOffset(t *testing.T) { } func TestTargetURL(t *testing.T) { - params := url.Values{ - "abc": []string{"foo", "bar", "baz"}, - "xyz": []string{"hoo"}, + scrapeConfig := &config.ScrapeConfig{ + Params: url.Values{ + "abc": []string{"foo", "bar", "baz"}, + "xyz": []string{"hoo"}, + }, } labels := labels.FromMap(map[string]string{ model.AddressLabel: "example.com:1234", @@ -114,7 +116,7 @@ func TestTargetURL(t *testing.T) { "__param_abc": "overwrite", "__param_cde": "huu", }) - target := NewTarget(labels, labels, params) + target := NewTarget(labels, scrapeConfig, nil, nil) // The reserved labels are concatenated into a full URL. The first value for each // URL query parameter can be set/modified via labels as well. @@ -139,7 +141,7 @@ func newTestTarget(targetURL string, _ time.Duration, lbls labels.Labels) *Targe lb.Set(model.AddressLabel, strings.TrimPrefix(targetURL, "http://")) lb.Set(model.MetricsPathLabel, "/metrics") - return &Target{labels: lb.Labels()} + return &Target{labels: lb.Labels(), scrapeConfig: &config.ScrapeConfig{}} } func TestNewHTTPBearerToken(t *testing.T) { diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 392dfc6aab..3f26cbd484 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1083,12 +1083,12 @@ func (api *API) targets(r *http.Request) apiFuncResult { showActive := state == "" || state == "any" || state == "active" showDropped := state == "" || state == "any" || state == "dropped" res := &TargetDiscovery{} + builder := labels.NewBuilder(labels.EmptyLabels()) if showActive { targetsActive := api.targetRetriever(r.Context()).TargetsActive() activeKeys, numTargets := sortKeys(targetsActive) res.ActiveTargets = make([]*Target, 0, numTargets) - builder := labels.NewScratchBuilder(0) for _, key := range activeKeys { if scrapePool != "" && key != scrapePool { @@ -1104,8 +1104,8 @@ func (api *API) targets(r *http.Request) apiFuncResult { globalURL, err := getGlobalURL(target.URL(), api.globalURLOptions) res.ActiveTargets = append(res.ActiveTargets, &Target{ - DiscoveredLabels: target.DiscoveredLabels(), - Labels: target.Labels(&builder), + DiscoveredLabels: target.DiscoveredLabels(builder), + Labels: target.Labels(builder), ScrapePool: key, ScrapeURL: target.URL().String(), GlobalURL: globalURL.String(), @@ -1143,7 +1143,7 @@ func (api *API) targets(r *http.Request) apiFuncResult { } for _, target := range targetsDropped[key] { res.DroppedTargets = append(res.DroppedTargets, &DroppedTarget{ - DiscoveredLabels: target.DiscoveredLabels(), + DiscoveredLabels: target.DiscoveredLabels(builder), }) } } @@ -1181,7 +1181,7 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { } } - builder := labels.NewScratchBuilder(0) + builder := labels.NewBuilder(labels.EmptyLabels()) metric := r.FormValue("metric") res := []metricMetadata{} for _, tt := range api.targetRetriever(r.Context()).TargetsActive() { @@ -1189,7 +1189,7 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { if limit >= 0 && len(res) >= limit { break } - targetLabels := t.Labels(&builder) + targetLabels := t.Labels(builder) // Filter targets that don't satisfy the label matchers. if matchTarget != "" && !matchLabels(targetLabels, matchers) { continue diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 0168bc57e1..175ed2e0f0 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -103,12 +103,12 @@ type testTargetRetriever struct { } type testTargetParams struct { - Identifier string - Labels labels.Labels - DiscoveredLabels labels.Labels - Params url.Values - Reports []*testReport - Active bool + Identifier string + Labels labels.Labels + targetLabels model.LabelSet + Params url.Values + Reports []*testReport + Active bool } type testReport struct { @@ -124,7 +124,7 @@ func newTestTargetRetriever(targetsInfo []*testTargetParams) *testTargetRetrieve droppedTargets = make(map[string][]*scrape.Target) for _, t := range targetsInfo { - nt := scrape.NewTarget(t.Labels, t.DiscoveredLabels, t.Params) + nt := scrape.NewTarget(t.Labels, &config.ScrapeConfig{Params: t.Params}, t.targetLabels, nil) for _, r := range t.Reports { nt.Report(r.Start, r.Duration, r.Error) @@ -1004,10 +1004,9 @@ func setupTestTargetRetriever(t *testing.T) *testTargetRetriever { model.ScrapeIntervalLabel: "15s", model.ScrapeTimeoutLabel: "5s", }), - DiscoveredLabels: labels.EmptyLabels(), - Params: url.Values{}, - Reports: []*testReport{{scrapeStart, 70 * time.Millisecond, nil}}, - Active: true, + Params: url.Values{}, + Reports: []*testReport{{scrapeStart, 70 * time.Millisecond, nil}}, + Active: true, }, { Identifier: "blackbox", @@ -1019,22 +1018,21 @@ func setupTestTargetRetriever(t *testing.T) *testTargetRetriever { model.ScrapeIntervalLabel: "20s", model.ScrapeTimeoutLabel: "10s", }), - DiscoveredLabels: labels.EmptyLabels(), - Params: url.Values{"target": []string{"example.com"}}, - Reports: []*testReport{{scrapeStart, 100 * time.Millisecond, errors.New("failed")}}, - Active: true, + Params: url.Values{"target": []string{"example.com"}}, + Reports: []*testReport{{scrapeStart, 100 * time.Millisecond, errors.New("failed")}}, + Active: true, }, { Identifier: "blackbox", Labels: labels.EmptyLabels(), - DiscoveredLabels: labels.FromMap(map[string]string{ + targetLabels: model.LabelSet{ model.SchemeLabel: "http", model.AddressLabel: "http://dropped.example.com:9115", model.MetricsPathLabel: "/probe", model.JobLabel: "blackbox", model.ScrapeIntervalLabel: "30s", model.ScrapeTimeoutLabel: "15s", - }), + }, Params: url.Values{}, Active: false, }, @@ -1507,7 +1505,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E response: &TargetDiscovery{ ActiveTargets: []*Target{ { - DiscoveredLabels: labels.FromStrings(), + DiscoveredLabels: labels.FromStrings("__param_target", "example.com", "__scrape_interval__", "0s", "__scrape_timeout__", "0s"), Labels: labels.FromStrings("job", "blackbox"), ScrapePool: "blackbox", ScrapeURL: "http://localhost:9115/probe?target=example.com", @@ -1520,7 +1518,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E ScrapeTimeout: "10s", }, { - DiscoveredLabels: labels.FromStrings(), + DiscoveredLabels: labels.FromStrings("__scrape_interval__", "0s", "__scrape_timeout__", "0s"), Labels: labels.FromStrings("job", "test"), ScrapePool: "test", ScrapeURL: "http://example.com:8080/metrics", @@ -1556,7 +1554,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E response: &TargetDiscovery{ ActiveTargets: []*Target{ { - DiscoveredLabels: labels.FromStrings(), + DiscoveredLabels: labels.FromStrings("__param_target", "example.com", "__scrape_interval__", "0s", "__scrape_timeout__", "0s"), Labels: labels.FromStrings("job", "blackbox"), ScrapePool: "blackbox", ScrapeURL: "http://localhost:9115/probe?target=example.com", @@ -1569,7 +1567,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E ScrapeTimeout: "10s", }, { - DiscoveredLabels: labels.FromStrings(), + DiscoveredLabels: labels.FromStrings("__scrape_interval__", "0s", "__scrape_timeout__", "0s"), Labels: labels.FromStrings("job", "test"), ScrapePool: "test", ScrapeURL: "http://example.com:8080/metrics", @@ -1605,7 +1603,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E response: &TargetDiscovery{ ActiveTargets: []*Target{ { - DiscoveredLabels: labels.FromStrings(), + DiscoveredLabels: labels.FromStrings("__param_target", "example.com", "__scrape_interval__", "0s", "__scrape_timeout__", "0s"), Labels: labels.FromStrings("job", "blackbox"), ScrapePool: "blackbox", ScrapeURL: "http://localhost:9115/probe?target=example.com", @@ -1618,7 +1616,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E ScrapeTimeout: "10s", }, { - DiscoveredLabels: labels.FromStrings(), + DiscoveredLabels: labels.FromStrings("__scrape_interval__", "0s", "__scrape_timeout__", "0s"), Labels: labels.FromStrings("job", "test"), ScrapePool: "test", ScrapeURL: "http://example.com:8080/metrics", From 5e7f804eeb76bbba3819460f6f9a9d32f5f637de Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Sat, 21 Dec 2024 18:51:40 +0100 Subject: [PATCH 1173/1397] otlptranslator: Remove unused function TrimPromSuffixes (#15709) Signed-off-by: Arve Knudsen --- .../prometheus/normalize_name.go | 60 ------------------- .../prometheus/normalize_name_test.go | 32 ---------- 2 files changed, 92 deletions(-) diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index 6967ca013c..d853cb75a9 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -223,66 +223,6 @@ func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) stri return normalizedName } -// TrimPromSuffixes trims type and unit prometheus suffixes from a metric name. -// Following the [OpenTelemetry specs] for converting Prometheus Metric points to OTLP. -// -// [OpenTelemetry specs]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#metric-metadata -func TrimPromSuffixes(promName string, metricType pmetric.MetricType, unit string) string { - nameTokens := strings.Split(promName, "_") - if len(nameTokens) == 1 { - return promName - } - - nameTokens = removeTypeSuffixes(nameTokens, metricType) - nameTokens = removeUnitSuffixes(nameTokens, unit) - - return strings.Join(nameTokens, "_") -} - -func removeTypeSuffixes(tokens []string, metricType pmetric.MetricType) []string { - switch metricType { - case pmetric.MetricTypeSum: - // Only counters are expected to have a type suffix at this point. - // for other types, suffixes are removed during scrape. - return removeSuffix(tokens, "total") - default: - return tokens - } -} - -func removeUnitSuffixes(nameTokens []string, unit string) []string { - l := len(nameTokens) - unitTokens := strings.Split(unit, "_") - lu := len(unitTokens) - - if lu == 0 || l <= lu { - return nameTokens - } - - suffixed := true - for i := range unitTokens { - if nameTokens[l-i-1] != unitTokens[lu-i-1] { - suffixed = false - break - } - } - - if suffixed { - return nameTokens[:l-lu] - } - - return nameTokens -} - -func removeSuffix(tokens []string, suffix string) []string { - l := len(tokens) - if tokens[l-1] == suffix { - return tokens[:l-1] - } - - return tokens -} - // cleanUpUnit cleans up unit so it matches model.LabelNameRE. func cleanUpUnit(unit string) string { // Multiple consecutive underscores are replaced with a single underscore. diff --git a/storage/remote/otlptranslator/prometheus/normalize_name_test.go b/storage/remote/otlptranslator/prometheus/normalize_name_test.go index d97e7a560a..17b755e042 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name_test.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name_test.go @@ -19,9 +19,7 @@ package prometheus import ( "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/pdata/pmetric" ) func TestByte(t *testing.T) { @@ -140,36 +138,6 @@ func TestOTelReceivers(t *testing.T) { require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "", false)) } -func TestTrimPromSuffixes(t *testing.T) { - assert.Equal(t, "active_directory_ds_replication_network_io", TrimPromSuffixes("active_directory_ds_replication_network_io_bytes_total", pmetric.MetricTypeSum, "bytes")) - assert.Equal(t, "active_directory_ds_name_cache_hit_rate", TrimPromSuffixes("active_directory_ds_name_cache_hit_rate_percent", pmetric.MetricTypeGauge, "percent")) - assert.Equal(t, "active_directory_ds_ldap_bind_last_successful_time", TrimPromSuffixes("active_directory_ds_ldap_bind_last_successful_time_milliseconds", pmetric.MetricTypeGauge, "milliseconds")) - assert.Equal(t, "apache_requests", TrimPromSuffixes("apache_requests_total", pmetric.MetricTypeSum, "1")) - assert.Equal(t, "system_cpu_utilization", TrimPromSuffixes("system_cpu_utilization_ratio", pmetric.MetricTypeGauge, "ratio")) - assert.Equal(t, "mongodbatlas_process_journaling_data_files", TrimPromSuffixes("mongodbatlas_process_journaling_data_files_mebibytes", pmetric.MetricTypeGauge, "mebibytes")) - assert.Equal(t, "mongodbatlas_process_network_io", TrimPromSuffixes("mongodbatlas_process_network_io_bytes_per_second", pmetric.MetricTypeGauge, "bytes_per_second")) - assert.Equal(t, "mongodbatlas_process_oplog_rate", TrimPromSuffixes("mongodbatlas_process_oplog_rate_gibibytes_per_hour", pmetric.MetricTypeGauge, "gibibytes_per_hour")) - assert.Equal(t, "nsxt_node_memory_usage", TrimPromSuffixes("nsxt_node_memory_usage_kilobytes", pmetric.MetricTypeGauge, "kilobytes")) - assert.Equal(t, "redis_latest_fork", TrimPromSuffixes("redis_latest_fork_microseconds", pmetric.MetricTypeGauge, "microseconds")) - assert.Equal(t, "up", TrimPromSuffixes("up", pmetric.MetricTypeGauge, "")) - - // These are not necessarily valid OM units, only tested for the sake of completeness. - assert.Equal(t, "active_directory_ds_replication_sync_object_pending", TrimPromSuffixes("active_directory_ds_replication_sync_object_pending_total", pmetric.MetricTypeSum, "{objects}")) - assert.Equal(t, "apache_current", TrimPromSuffixes("apache_current_connections", pmetric.MetricTypeGauge, "connections")) - assert.Equal(t, "bigip_virtual_server_request_count", TrimPromSuffixes("bigip_virtual_server_request_count_total", pmetric.MetricTypeSum, "{requests}")) - assert.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", TrimPromSuffixes("mongodbatlas_process_db_query_targeting_scanned_per_returned", pmetric.MetricTypeGauge, "{scanned}/{returned}")) - assert.Equal(t, "nginx_connections_accepted", TrimPromSuffixes("nginx_connections_accepted", pmetric.MetricTypeGauge, "connections")) - assert.Equal(t, "apache_workers", TrimPromSuffixes("apache_workers_connections", pmetric.MetricTypeGauge, "connections")) - assert.Equal(t, "nginx", TrimPromSuffixes("nginx_requests", pmetric.MetricTypeGauge, "requests")) - - // Units shouldn't be trimmed if the unit is not a direct match with the suffix, i.e, a suffix "_seconds" shouldn't be removed if unit is "sec" or "s" - assert.Equal(t, "system_cpu_load_average_15m_ratio", TrimPromSuffixes("system_cpu_load_average_15m_ratio", pmetric.MetricTypeGauge, "1")) - assert.Equal(t, "mongodbatlas_process_asserts_per_second", TrimPromSuffixes("mongodbatlas_process_asserts_per_second", pmetric.MetricTypeGauge, "{assertions}/s")) - assert.Equal(t, "memcached_operation_hit_ratio_percent", TrimPromSuffixes("memcached_operation_hit_ratio_percent", pmetric.MetricTypeGauge, "%")) - assert.Equal(t, "active_directory_ds_replication_object_rate_per_second", TrimPromSuffixes("active_directory_ds_replication_object_rate_per_second", pmetric.MetricTypeGauge, "{objects}/s")) - assert.Equal(t, "system_disk_operation_time_seconds", TrimPromSuffixes("system_disk_operation_time_seconds_total", pmetric.MetricTypeSum, "s")) -} - func TestNamespace(t *testing.T) { require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space", false)) require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space", false)) From 475b7ff256468dbdfed3ca97074039d083f216fb Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Sat, 21 Dec 2024 18:18:31 +0100 Subject: [PATCH 1174/1397] OTLP receiver: Allow colons in non-standard units Signed-off-by: Arve Knudsen --- CHANGELOG.md | 1 + .../remote/otlptranslator/prometheus/normalize_name.go | 8 ++++---- .../otlptranslator/prometheus/normalize_name_test.go | 10 +++++++++- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 797bf8de08..a75e163ec0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ * [CHANGE] Notifier: Increment the prometheus_notifications_errors_total metric by the number of affected alerts rather than by one per batch of affected alerts. #15428 * [ENHANCEMENT] OTLP receiver: Convert also metric metadata. #15416 +* [BUGFIX] OTLP receiver: Allow colons in non-standard units. #15710 ## 3.0.1 / 2024-11-28 diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index d853cb75a9..0a48e28219 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -22,7 +22,6 @@ import ( "strings" "unicode" - "github.com/prometheus/prometheus/util/strutil" "go.opentelemetry.io/collector/pdata/pmetric" ) @@ -120,18 +119,19 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix return metricName } +var nonMetricNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9:]`) + // Build a normalized name for the specified metric. func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) string { var nameTokens []string var separators []string if !allowUTF8 { - nonTokenMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:]`) // Split metric name into "tokens" (of supported metric name runes). // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. nameTokens = strings.FieldsFunc( metric.Name(), - func(r rune) bool { return nonTokenMetricCharRE.MatchString(string(r)) }, + func(r rune) bool { return nonMetricNameCharRE.MatchString(string(r)) }, ) } else { translationFunc := func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != ':' } @@ -229,7 +229,7 @@ func cleanUpUnit(unit string) string { // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. multipleUnderscoresRE := regexp.MustCompile(`__+`) return strings.TrimPrefix(multipleUnderscoresRE.ReplaceAllString( - strutil.SanitizeLabelName(unit), + nonMetricNameCharRE.ReplaceAllString(unit, "_"), "_", ), "_") } diff --git a/storage/remote/otlptranslator/prometheus/normalize_name_test.go b/storage/remote/otlptranslator/prometheus/normalize_name_test.go index 17b755e042..0473f6cbe1 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name_test.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name_test.go @@ -37,6 +37,8 @@ func TestWhiteSpaces(t *testing.T) { func TestNonStandardUnit(t *testing.T) { require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "", false)) + // The normal metric name character set is allowed in non-standard units. + require.Equal(t, "system_network_dropped_nonstandard:_1", normalizeName(createGauge("system.network.dropped", "nonstandard:_1"), "", false)) } func TestNonStandardUnitCounter(t *testing.T) { @@ -68,6 +70,12 @@ func TestHertz(t *testing.T) { func TestPer(t *testing.T) { require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "", false)) require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "", false)) + // The normal metric name character set is allowed in non-standard units. + require.Equal(t, "system_network_dropped_non_per_standard:_1", normalizeName(createGauge("system.network.dropped", "non/standard:_1"), "", false)) + + t.Run("invalid per unit", func(t *testing.T) { + require.Equal(t, "broken_metric_speed_km", normalizeName(createGauge("broken.metric.speed", "km/°"), "", false)) + }) } func TestPercent(t *testing.T) { @@ -89,7 +97,7 @@ func TestAllowUTF8(t *testing.T) { }) t.Run("disallow UTF8", func(t *testing.T) { require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "", false)) - require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "", false)) + require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.,!* & #"), "", false)) require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "", false)) require.Equal(t, "metric_with_foreign_characters", normalizeName(createGauge("metric_with_字符_foreign_characters", "ど"), "", false)) }) From 1508149184faaa46acd7ac22aa3116f8f8ccfdd3 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Fri, 27 Dec 2024 09:09:13 -0800 Subject: [PATCH 1175/1397] Update benchmark test and comment --- tsdb/record/record.go | 2 +- tsdb/record/record_test.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tsdb/record/record.go b/tsdb/record/record.go index ccfbbfcef9..4d2a52b9af 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -770,7 +770,7 @@ func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([ EncodeHistogram(&buf, h.H) } - // Reset buffer if only custom bucket histograms existed in list of histogram samples + // Reset buffer if only custom bucket histograms existed in list of histogram samples. if len(histograms) == len(customBucketHistograms) { buf.Reset() } diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index 6e9c6e483c..f615a334ea 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -585,7 +585,7 @@ func BenchmarkWAL_HistogramEncoding(b *testing.B) { for _, labelCount := range []int{0, 10, 50} { for _, histograms := range []int{10, 100, 1000} { for _, buckets := range []int{0, 1, 10, 100} { - b.Run(fmt.Sprintf("%s labels=%d histograms=%d buckets=%d", maker.name, labelCount, histograms, buckets), func(b *testing.B) { + b.Run(fmt.Sprintf("type=%s/labels=%d/histograms=%d/buckets=%d", maker.name, labelCount, histograms, buckets), func(b *testing.B) { resetCache() maker.init(labelCount, histograms, buckets) enc := Encoder{} @@ -598,6 +598,7 @@ func BenchmarkWAL_HistogramEncoding(b *testing.B) { if len(leftOver) > 0 { enc.CustomBucketsHistogramSamples(leftOver, buf) } + b.ReportMetric(float64(len(buf)), "recordBytes/ops") } }) } From 8f4557b0b14382674d706cc38b673be9e6f1bbd8 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 20 Aug 2024 20:11:21 +0100 Subject: [PATCH 1176/1397] Scraping benchmark: more realistic test Don't repeat type and help text. Signed-off-by: Bryan Boreham --- scrape/scrape_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index f9164ea7ac..1f0291ae00 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1256,9 +1256,9 @@ func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) { func makeTestMetrics(n int) []byte { // Construct a metrics string to parse sb := bytes.Buffer{} + fmt.Fprintf(&sb, "# TYPE metric_a gauge\n") + fmt.Fprintf(&sb, "# HELP metric_a help text\n") for i := 0; i < n; i++ { - fmt.Fprintf(&sb, "# TYPE metric_a gauge\n") - fmt.Fprintf(&sb, "# HELP metric_a help text\n") fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100) } fmt.Fprintf(&sb, "# EOF\n") From b4ef38cfc8b4ff100783fcc26e909c77993c26f7 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 20 Aug 2024 20:12:02 +0100 Subject: [PATCH 1177/1397] Scraping: Add benchmark for protobuf format Extract helper function textToProto(). Signed-off-by: Bryan Boreham --- scrape/scrape_test.go | 72 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 60 insertions(+), 12 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 1f0291ae00..fa11ad6dcf 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1295,6 +1295,45 @@ func BenchmarkScrapeLoopAppendOM(b *testing.B) { } } +func BenchmarkScrapeLoopAppendProto(b *testing.B) { + ctx, sl := simpleTestScrapeLoop(b) + + slApp := sl.appender(ctx) + + // Construct a metrics string to parse + sb := bytes.Buffer{} + fmt.Fprintf(&sb, "type: GAUGE\n") + fmt.Fprintf(&sb, "help: \"metric_a help text\"\n") + fmt.Fprintf(&sb, "name: \"metric_a\"\n") + for i := 0; i < 100; i++ { + fmt.Fprintf(&sb, `metric: < + label: < + name: "foo" + value: "%d" + > + label: < + name: "bar" + value: "%d" + > + gauge: < + value: 1 + > +> +`, i, i*100) + } + // From text to proto message. + pb := bytes.Buffer{} + require.NoError(b, textToProto(sb.String(), &pb)) + ts := time.Time{} + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + ts = ts.Add(time.Second) + _, _, _, _ = sl.append(slApp, pb.Bytes(), "application/vnd.google.protobuf", ts) + } +} + func TestSetOptionsHandlingStaleness(t *testing.T) { s := teststorage.New(t, 600000) defer s.Close() @@ -2454,18 +2493,7 @@ metric: < buf := &bytes.Buffer{} if test.contentType == "application/vnd.google.protobuf" { - // In case of protobuf, we have to create the binary representation. - pb := &dto.MetricFamily{} - // From text to proto message. - require.NoError(t, proto.UnmarshalText(test.scrapeText, pb)) - // From proto message to binary protobuf. - protoBuf, err := proto.Marshal(pb) - require.NoError(t, err) - - // Write first length, then binary protobuf. - varintBuf := binary.AppendUvarint(nil, uint64(len(protoBuf))) - buf.Write(varintBuf) - buf.Write(protoBuf) + require.NoError(t, textToProto(test.scrapeText, buf)) } else { buf.WriteString(test.scrapeText) } @@ -2480,6 +2508,26 @@ metric: < } } +func textToProto(text string, buf *bytes.Buffer) error { + // In case of protobuf, we have to create the binary representation. + pb := &dto.MetricFamily{} + // From text to proto message. + err := proto.UnmarshalText(text, pb) + if err != nil { + return err + } + // From proto message to binary protobuf. + protoBuf, err := proto.Marshal(pb) + if err != nil { + return err + } + // Write first length, then binary protobuf. + varintBuf := binary.AppendUvarint(nil, uint64(len(protoBuf))) + buf.Write(varintBuf) + buf.Write(protoBuf) + return nil +} + func TestScrapeLoopAppendExemplarSeries(t *testing.T) { scrapeText := []string{`metric_total{n="1"} 1 # {t="1"} 1.0 10000 # EOF`, `metric_total{n="1"} 2 # {t="2"} 2.0 20000 From bc9210e393d520d460dd197bf640578f2daca90a Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 23 Aug 2024 09:12:02 +0100 Subject: [PATCH 1178/1397] [TESTS] Scrape: make caching work in benchmark Returning 0 from Append means 'unknown', so the series is never cached. Return arbitrary numbers instead. Signed-off-by: Bryan Boreham --- scrape/helpers_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go index 12a56d7071..7bc9e3f7d4 100644 --- a/scrape/helpers_test.go +++ b/scrape/helpers_test.go @@ -46,15 +46,15 @@ type nopAppender struct{} func (a nopAppender) SetOptions(opts *storage.AppendOptions) {} func (a nopAppender) Append(storage.SeriesRef, labels.Labels, int64, float64) (storage.SeriesRef, error) { - return 0, nil + return 1, nil } func (a nopAppender) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.Exemplar) (storage.SeriesRef, error) { - return 0, nil + return 2, nil } func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) { - return 0, nil + return 3, nil } func (a nopAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { @@ -62,11 +62,11 @@ func (a nopAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels } func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { - return 0, nil + return 4, nil } func (a nopAppender) AppendCTZeroSample(storage.SeriesRef, labels.Labels, int64, int64) (storage.SeriesRef, error) { - return 0, nil + return 5, nil } func (a nopAppender) Commit() error { return nil } From 281306765e3ddd2b82c0849a9fe443ff2cda88a2 Mon Sep 17 00:00:00 2001 From: bwplotka Date: Sun, 29 Dec 2024 15:10:39 +0000 Subject: [PATCH 1179/1397] scrape: Unified scrape loop benchmark. Signed-off-by: bwplotka --- scrape/scrape_test.go | 115 ++++++++++++++++++++---------------------- 1 file changed, 54 insertions(+), 61 deletions(-) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index fa11ad6dcf..a67d52e5cc 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -39,6 +39,7 @@ import ( prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" dto "github.com/prometheus/client_model/go" config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" @@ -1265,72 +1266,64 @@ func makeTestMetrics(n int) []byte { return sb.Bytes() } +func promTextToProto(tb testing.TB, text []byte) []byte { + tb.Helper() + + d := expfmt.NewDecoder(bytes.NewReader(text), expfmt.TextVersion) + + pb := &dto.MetricFamily{} + if err := d.Decode(pb); err != nil { + tb.Fatal(err) + } + o, err := proto.Marshal(pb) + if err != nil { + tb.Fatal(err) + } + buf := bytes.Buffer{} + // Write first length, then binary protobuf. + varintBuf := binary.AppendUvarint(nil, uint64(len(o))) + buf.Write(varintBuf) + buf.Write(o) + return buf.Bytes() +} + +/* + export bench=scrape-loop-v1 && go test \ + -run '^$' -bench '^BenchmarkScrapeLoopAppend' \ + -benchtime 5s -count 6 -cpu 2 -timeout 999m \ + | tee ${bench}.txt +*/ func BenchmarkScrapeLoopAppend(b *testing.B) { - ctx, sl := simpleTestScrapeLoop(b) + metricsText := makeTestMetrics(100) - slApp := sl.appender(ctx) - metrics := makeTestMetrics(100) - ts := time.Time{} + // Create proto representation. + metricsProto := promTextToProto(b, metricsText) - b.ResetTimer() + for _, bcase := range []struct { + name string + contentType string + parsable []byte + }{ + {name: "PromText", contentType: "text/plain", parsable: metricsText}, + {name: "OMText", contentType: "application/openmetrics-text", parsable: metricsText}, + {name: "PromProto", contentType: "application/vnd.google.protobuf", parsable: metricsProto}, + } { + b.Run(fmt.Sprintf("fmt=%v", bcase.name), func(b *testing.B) { + ctx, sl := simpleTestScrapeLoop(b) - for i := 0; i < b.N; i++ { - ts = ts.Add(time.Second) - _, _, _, _ = sl.append(slApp, metrics, "text/plain", ts) - } -} + slApp := sl.appender(ctx) + ts := time.Time{} -func BenchmarkScrapeLoopAppendOM(b *testing.B) { - ctx, sl := simpleTestScrapeLoop(b) - - slApp := sl.appender(ctx) - metrics := makeTestMetrics(100) - ts := time.Time{} - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - ts = ts.Add(time.Second) - _, _, _, _ = sl.append(slApp, metrics, "application/openmetrics-text", ts) - } -} - -func BenchmarkScrapeLoopAppendProto(b *testing.B) { - ctx, sl := simpleTestScrapeLoop(b) - - slApp := sl.appender(ctx) - - // Construct a metrics string to parse - sb := bytes.Buffer{} - fmt.Fprintf(&sb, "type: GAUGE\n") - fmt.Fprintf(&sb, "help: \"metric_a help text\"\n") - fmt.Fprintf(&sb, "name: \"metric_a\"\n") - for i := 0; i < 100; i++ { - fmt.Fprintf(&sb, `metric: < - label: < - name: "foo" - value: "%d" - > - label: < - name: "bar" - value: "%d" - > - gauge: < - value: 1 - > -> -`, i, i*100) - } - // From text to proto message. - pb := bytes.Buffer{} - require.NoError(b, textToProto(sb.String(), &pb)) - ts := time.Time{} - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - ts = ts.Add(time.Second) - _, _, _, _ = sl.append(slApp, pb.Bytes(), "application/vnd.google.protobuf", ts) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts = ts.Add(time.Second) + _, _, _, err := sl.append(slApp, bcase.parsable, bcase.contentType, ts) + if err != nil { + b.Fatal(err) + } + } + }) } } From 43fd40cae0e24dfb691204ae4f810428f99164bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 29 Dec 2024 17:49:51 +0100 Subject: [PATCH 1180/1397] chore(deps): bump github/codeql-action from 3.27.5 to 3.27.7 (#15582) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.27.5 to 3.27.7. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/f09c1c0a94de965c15400f5634aa42fac8fb8f88...babb554ede22fd5605947329c4d04d8e7a0b8155) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index c30d907326..56839da9cb 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,12 +27,12 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Initialize CodeQL - uses: github/codeql-action/init@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 + uses: github/codeql-action/init@babb554ede22fd5605947329c4d04d8e7a0b8155 # v3.27.7 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 + uses: github/codeql-action/autobuild@babb554ede22fd5605947329c4d04d8e7a0b8155 # v3.27.7 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 + uses: github/codeql-action/analyze@babb554ede22fd5605947329c4d04d8e7a0b8155 # v3.27.7 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index f637e921e5..edb347abea 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # tag=v3.27.5 + uses: github/codeql-action/upload-sarif@babb554ede22fd5605947329c4d04d8e7a0b8155 # tag=v3.27.7 with: sarif_file: results.sarif From 4d2c1c1d066de0aa738b6ee07e8bce1529fa5eae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 29 Dec 2024 17:50:09 +0100 Subject: [PATCH 1181/1397] chore(deps): bump actions/cache from 4.1.2 to 4.2.0 (#15583) Bumps [actions/cache](https://github.com/actions/cache) from 4.1.2 to 4.2.0. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/6849a6489940f00c2f30c0fb92c6274307ccb58a...1bd1e32a3bdc45362d1e726936510720a7c30a57) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c0d4302072..65173c1072 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -247,7 +247,7 @@ jobs: with: node-version-file: "web/ui/.nvmrc" registry-url: "https://registry.npmjs.org" - - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + - uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: ~/.npm key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} From 2c5502c1144697c693911e56694b172f70d819fc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 29 Dec 2024 17:50:30 +0100 Subject: [PATCH 1182/1397] chore(deps): bump actions/setup-go from 5.1.0 to 5.2.0 in /scripts (#15580) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.1.0 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed...3041bf56c941b39c61721a86cd11f3bb1338122a) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- scripts/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index e645ba30ae..01b943b9b5 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -26,7 +26,7 @@ jobs: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 + uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: go-version: 1.23.x - name: Install snmp_exporter/generator dependencies From 061400e31b5d2246372717d5b559464194900257 Mon Sep 17 00:00:00 2001 From: johncming Date: Mon, 30 Dec 2024 00:54:45 +0800 Subject: [PATCH 1183/1397] tsdb: export CheckpointPrefix constant (#15636) Exported the CheckpointPrefix constant to be used in other packages. Updated references to the constant in db.go and checkpoint.go files. This change improves code readability and maintainability. Signed-off-by: johncming Co-authored-by: johncming --- tsdb/db.go | 2 +- tsdb/wlog/checkpoint.go | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/tsdb/db.go b/tsdb/db.go index 2cfa4f3638..2d35e3fb00 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -2320,7 +2320,7 @@ func isTmpDir(fi fs.DirEntry) bool { fn := fi.Name() ext := filepath.Ext(fn) if ext == tmpForDeletionBlockDirSuffix || ext == tmpForCreationBlockDirSuffix || ext == tmpLegacy { - if strings.HasPrefix(fn, "checkpoint.") { + if strings.HasPrefix(fn, wlog.CheckpointPrefix) { return true } if strings.HasPrefix(fn, chunkSnapshotPrefix) { diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index 58e11c770e..dd62a79e2a 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -81,7 +81,8 @@ func DeleteCheckpoints(dir string, maxIndex int) error { return errs.Err() } -const checkpointPrefix = "checkpoint." +// CheckpointPrefix is the prefix used for checkpoint files. +const CheckpointPrefix = "checkpoint." // Checkpoint creates a compacted checkpoint of segments in range [from, to] in the given WAL. // It includes the most recent checkpoint if it exists. @@ -363,7 +364,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } func checkpointDir(dir string, i int) string { - return filepath.Join(dir, fmt.Sprintf(checkpointPrefix+"%08d", i)) + return filepath.Join(dir, fmt.Sprintf(CheckpointPrefix+"%08d", i)) } type checkpointRef struct { @@ -379,13 +380,13 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) { for i := 0; i < len(files); i++ { fi := files[i] - if !strings.HasPrefix(fi.Name(), checkpointPrefix) { + if !strings.HasPrefix(fi.Name(), CheckpointPrefix) { continue } if !fi.IsDir() { return nil, fmt.Errorf("checkpoint %s is not a directory", fi.Name()) } - idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):]) + idx, err := strconv.Atoi(fi.Name()[len(CheckpointPrefix):]) if err != nil { continue } From 6a61efcfc3bf4c0469959ed8d2c879059449266c Mon Sep 17 00:00:00 2001 From: pinglanlu Date: Mon, 30 Dec 2024 01:03:06 +0800 Subject: [PATCH 1184/1397] discovery: use a more direct and less error-prone return value (#15347) Signed-off-by: pinglanlu --- discovery/uyuni/uyuni.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index 1bd0cd2d49..a37083575c 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -205,9 +205,6 @@ func getEndpointInfoForSystems( err := rpcclient.Call( "system.monitoring.listEndpoints", []interface{}{token, systemIDs}, &endpointInfos) - if err != nil { - return nil, err - } return endpointInfos, err } From 08c81b721a0e50c30c5661650a458cab1b924f81 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 29 Dec 2024 17:08:18 +0000 Subject: [PATCH 1185/1397] chore(deps): bump actions/setup-go from 5.1.0 to 5.2.0 (#15581) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.1.0 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed...3041bf56c941b39c61721a86cd11f3bb1338122a) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 65173c1072..e96f77c114 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -80,7 +80,7 @@ jobs: runs-on: windows-latest steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: go-version: 1.23.x - run: | @@ -171,7 +171,7 @@ jobs: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 + uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: cache: false go-version: 1.23.x @@ -184,7 +184,7 @@ jobs: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 + uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: go-version: 1.23.x - name: Install snmp_exporter/generator dependencies From 9823a93c4238b3b549321ece051bbba6d3922741 Mon Sep 17 00:00:00 2001 From: machine424 Date: Mon, 30 Dec 2024 05:14:44 +0100 Subject: [PATCH 1186/1397] fix(main.go): avoid closing the query engine until it is guaranteed to no longer be in use. partially reverts https://github.com/prometheus/prometheus/pull/14064 fixes https://github.com/prometheus/prometheus/issues/15232 supersedes https://github.com/prometheus/prometheus/pull/15533 reusing Engine.Close() outside of tests will require more consideration. Signed-off-by: machine424 --- cmd/prometheus/main.go | 9 --------- promql/engine.go | 2 ++ 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index d2a69634aa..06f46f8d72 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -989,18 +989,12 @@ func main() { listeners, err := webHandler.Listeners() if err != nil { logger.Error("Unable to start web listener", "err", err) - if err := queryEngine.Close(); err != nil { - logger.Warn("Closing query engine failed", "err", err) - } os.Exit(1) } err = toolkit_web.Validate(*webConfig) if err != nil { logger.Error("Unable to validate web configuration file", "err", err) - if err := queryEngine.Close(); err != nil { - logger.Warn("Closing query engine failed", "err", err) - } os.Exit(1) } @@ -1022,9 +1016,6 @@ func main() { case <-cancel: reloadReady.Close() } - if err := queryEngine.Close(); err != nil { - logger.Warn("Closing query engine failed", "err", err) - } return nil }, func(err error) { diff --git a/promql/engine.go b/promql/engine.go index a23e899d04..bbd8410268 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -436,6 +436,8 @@ func NewEngine(opts EngineOpts) *Engine { } // Close closes ng. +// Callers must ensure the engine is really no longer in use before calling this to avoid +// issues failures like in https://github.com/prometheus/prometheus/issues/15232 func (ng *Engine) Close() error { if ng == nil { return nil From 4cbd9ffb91ce9c64db4b29591475a811b0c7c95e Mon Sep 17 00:00:00 2001 From: TJ Hoplock <33664289+tjhop@users.noreply.github.com> Date: Mon, 30 Dec 2024 03:46:17 -0500 Subject: [PATCH 1187/1397] docs: update required go version in readme to 1.22 (#15447) It was bumped during 3.0 with the adoption of log/slog and other dep updates. ``` ~/go/src/github.com/prometheus/prometheus (main [ ]) -> grep '^go' go.mod go 1.22.0 ``` Signed-off-by: TJ Hoplock --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 63e5b13ba1..658cee4640 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ Prometheus will now be reachable at . To build Prometheus from source code, You need: -* Go [version 1.17 or greater](https://golang.org/doc/install). +* Go [version 1.22 or greater](https://golang.org/doc/install). * NodeJS [version 16 or greater](https://nodejs.org/). * npm [version 7 or greater](https://www.npmjs.com/). From b2fa1c952450d4158adc71d64e9dc6defd134371 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 30 Dec 2024 17:42:52 +0000 Subject: [PATCH 1188/1397] TSDB benchmarks: Commit periodically to speed up init When creating dummy data for benchmarks, call `Commit()` periodically to avoid growing the appender to enormous size. Signed-off-by: Bryan Boreham --- tsdb/querier_bench_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index 8b8f092a81..038052e340 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -55,6 +55,8 @@ func BenchmarkQuerier(b *testing.B) { addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", "1_"+strconv.Itoa(n)+postingsBenchSuffix, "j", "bar")) addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", "2_"+strconv.Itoa(n)+postingsBenchSuffix, "j", "foo")) } + require.NoError(b, app.Commit()) + app = h.Appender(context.Background()) } require.NoError(b, app.Commit()) @@ -270,6 +272,10 @@ func createHeadForBenchmarkSelect(b *testing.B, numSeries int, addSeries func(ap app := h.Appender(context.Background()) for i := 0; i < numSeries; i++ { addSeries(app, i) + if i%1000 == 999 { // Commit every so often, so the appender doesn't get too big. + require.NoError(b, app.Commit()) + app = h.Appender(context.Background()) + } } require.NoError(b, app.Commit()) return h, db From f37b5adfef76f884be9c79d9e121f0e828eebf9b Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 31 Dec 2024 11:12:21 +0100 Subject: [PATCH 1189/1397] OTLP receiver: Optimize by initializing regexps at program start (#15733) Signed-off-by: Arve Knudsen --- .../otlptranslator/prometheus/normalize_name.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index 0a48e28219..580c72b548 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -95,9 +95,6 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix var metricName string if !allowUTF8 { - // Regexp for metric name characters that should be replaced with _. - invalidMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:_]`) - // Simple case (no full normalization, no units, etc.). metricName = strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool { return invalidMetricCharRE.MatchString(string(r)) @@ -119,7 +116,12 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix return metricName } -var nonMetricNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9:]`) +var ( + nonMetricNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9:]`) + // Regexp for metric name characters that should be replaced with _. + invalidMetricCharRE = regexp.MustCompile(`[^a-zA-Z0-9:_]`) + multipleUnderscoresRE = regexp.MustCompile(`__+`) +) // Build a normalized name for the specified metric. func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) string { @@ -227,7 +229,6 @@ func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) stri func cleanUpUnit(unit string) string { // Multiple consecutive underscores are replaced with a single underscore. // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. - multipleUnderscoresRE := regexp.MustCompile(`__+`) return strings.TrimPrefix(multipleUnderscoresRE.ReplaceAllString( nonMetricNameCharRE.ReplaceAllString(unit, "_"), "_", From a7ccc8e091fd5a91df345ecba3cef940ef47f0ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 2 Jan 2025 12:45:20 +0100 Subject: [PATCH 1190/1397] record_test.go: avoid captures, simply return test refs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- tsdb/record/record_test.go | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index f615a334ea..dc625f0830 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -469,26 +469,17 @@ func TestRecord_MetadataDecodeUnknownExtraFields(t *testing.T) { require.Equal(t, expectedMetadata, decMetadata) } +type refsCreateFn func(labelCount, histograms, buckets int) ([]RefSeries, []RefSample, []RefHistogramSample) + type recordsMaker struct { name string - init func(int, int, int) + make refsCreateFn } // BenchmarkWAL_HistogramLog measures efficiency of encoding classic // histograms and native historgrams with custom buckets (NHCB). func BenchmarkWAL_HistogramEncoding(b *testing.B) { - // Cache for the refs. - var series []RefSeries - var samples []RefSample - var nhcbs []RefHistogramSample - - resetCache := func() { - series = nil - samples = nil - nhcbs = nil - } - - initClassicRefs := func(labelCount, histograms, buckets int) { + initClassicRefs := func(labelCount, histograms, buckets int) (series []RefSeries, floatSamples []RefSample, histSamples []RefHistogramSample) { ref := chunks.HeadSeriesRef(0) lbls := map[string]string{} for i := range labelCount { @@ -500,7 +491,7 @@ func BenchmarkWAL_HistogramEncoding(b *testing.B) { Ref: ref, Labels: labels.FromMap(lbls), }) - samples = append(samples, RefSample{ + floatSamples = append(floatSamples, RefSample{ Ref: ref, T: 100, V: float64(i), @@ -512,7 +503,7 @@ func BenchmarkWAL_HistogramEncoding(b *testing.B) { Ref: ref, Labels: labels.FromMap(lbls), }) - samples = append(samples, RefSample{ + floatSamples = append(floatSamples, RefSample{ Ref: ref, T: 100, V: float64(i), @@ -529,7 +520,7 @@ func BenchmarkWAL_HistogramEncoding(b *testing.B) { Ref: ref, Labels: labels.FromMap(lbls), }) - samples = append(samples, RefSample{ + floatSamples = append(floatSamples, RefSample{ Ref: ref, T: 100, V: float64(i + j), @@ -538,9 +529,10 @@ func BenchmarkWAL_HistogramEncoding(b *testing.B) { } delete(lbls, model.BucketLabel) } + return } - initNHCBRefs := func(labelCount, histograms, buckets int) { + initNHCBRefs := func(labelCount, histograms, buckets int) (series []RefSeries, floatSamples []RefSample, histSamples []RefHistogramSample) { ref := chunks.HeadSeriesRef(0) lbls := map[string]string{} for i := range labelCount { @@ -563,31 +555,31 @@ func BenchmarkWAL_HistogramEncoding(b *testing.B) { for j := range buckets { h.PositiveBuckets[j] = int64(i + j) } - nhcbs = append(nhcbs, RefHistogramSample{ + histSamples = append(histSamples, RefHistogramSample{ Ref: ref, T: 100, H: h, }) ref++ } + return } for _, maker := range []recordsMaker{ { name: "classic", - init: initClassicRefs, + make: initClassicRefs, }, { name: "nhcb", - init: initNHCBRefs, + make: initNHCBRefs, }, } { for _, labelCount := range []int{0, 10, 50} { for _, histograms := range []int{10, 100, 1000} { for _, buckets := range []int{0, 1, 10, 100} { b.Run(fmt.Sprintf("type=%s/labels=%d/histograms=%d/buckets=%d", maker.name, labelCount, histograms, buckets), func(b *testing.B) { - resetCache() - maker.init(labelCount, histograms, buckets) + series, samples, nhcbs := maker.make(labelCount, histograms, buckets) enc := Encoder{} for range b.N { var buf []byte From cb7254158b335e9c7110d7a05bbf2dd2b21b98fb Mon Sep 17 00:00:00 2001 From: Paulo Dias Date: Thu, 2 Jan 2025 14:43:31 +0000 Subject: [PATCH 1191/1397] feat: rename status to provisioning_status and add operating_status Signed-off-by: Paulo Dias --- discovery/openstack/loadbalancer.go | 20 +++--- discovery/openstack/loadbalancer_test.go | 78 +++++++++++++----------- docs/configuration/configuration.md | 2 + 3 files changed, 54 insertions(+), 46 deletions(-) diff --git a/discovery/openstack/loadbalancer.go b/discovery/openstack/loadbalancer.go index 7bdf1d2fdf..b3c05fada5 100644 --- a/discovery/openstack/loadbalancer.go +++ b/discovery/openstack/loadbalancer.go @@ -33,14 +33,15 @@ import ( ) const ( - openstackLabelLoadBalancerID = openstackLabelPrefix + "loadbalancer_id" - openstackLabelLoadBalancerName = openstackLabelPrefix + "loadbalancer_name" - openstackLabelLoadBalancerStatus = openstackLabelPrefix + "loadbalancer_status" - openstackLabelLoadBalancerAvailabilityZone = openstackLabelPrefix + "loadbalancer_availability_zone" - openstackLabelLoadBalancerFloatingIP = openstackLabelPrefix + "loadbalancer_floating_ip" - openstackLabelLoadBalancerVIP = openstackLabelPrefix + "loadbalancer_vip" - openstackLabelLoadBalancerProvider = openstackLabelPrefix + "loadbalancer_provider" - openstackLabelLoadBalancerTags = openstackLabelPrefix + "loadbalancer_tags" + openstackLabelLoadBalancerID = openstackLabelPrefix + "loadbalancer_id" + openstackLabelLoadBalancerName = openstackLabelPrefix + "loadbalancer_name" + openstackLabelLoadBalancerOperatingStatus = openstackLabelPrefix + "loadbalancer_operating_status" + openstackLabelLoadBalancerProvisioningStatus = openstackLabelPrefix + "loadbalancer_provisioning_status" + openstackLabelLoadBalancerAvailabilityZone = openstackLabelPrefix + "loadbalancer_availability_zone" + openstackLabelLoadBalancerFloatingIP = openstackLabelPrefix + "loadbalancer_floating_ip" + openstackLabelLoadBalancerVIP = openstackLabelPrefix + "loadbalancer_vip" + openstackLabelLoadBalancerProvider = openstackLabelPrefix + "loadbalancer_provider" + openstackLabelLoadBalancerTags = openstackLabelPrefix + "loadbalancer_tags" ) // LoadBalancerDiscovery discovers OpenStack load balancers. @@ -174,7 +175,8 @@ func (i *LoadBalancerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Gro labels[model.AddressLabel] = model.LabelValue(addr) labels[openstackLabelLoadBalancerID] = model.LabelValue(lb.ID) labels[openstackLabelLoadBalancerName] = model.LabelValue(lb.Name) - labels[openstackLabelLoadBalancerStatus] = model.LabelValue(lb.ProvisioningStatus) + labels[openstackLabelLoadBalancerOperatingStatus] = model.LabelValue(lb.OperatingStatus) + labels[openstackLabelLoadBalancerProvisioningStatus] = model.LabelValue(lb.ProvisioningStatus) labels[openstackLabelLoadBalancerAvailabilityZone] = model.LabelValue(lb.AvailabilityZone) labels[openstackLabelLoadBalancerVIP] = model.LabelValue(lb.VipAddress) labels[openstackLabelLoadBalancerProvider] = model.LabelValue(lb.Provider) diff --git a/discovery/openstack/loadbalancer_test.go b/discovery/openstack/loadbalancer_test.go index 45c9d1024d..c0719437d7 100644 --- a/discovery/openstack/loadbalancer_test.go +++ b/discovery/openstack/loadbalancer_test.go @@ -70,49 +70,53 @@ func TestOpenstackSDLoadBalancerRefresh(t *testing.T) { for i, lbls := range []model.LabelSet{ { - "__address__": model.LabelValue("10.0.0.32:9273"), - "__meta_openstack_loadbalancer_id": model.LabelValue("ef079b0c-e610-4dfb-b1aa-b49f07ac48e5"), - "__meta_openstack_loadbalancer_name": model.LabelValue("lb1"), - "__meta_openstack_loadbalancer_status": model.LabelValue("ACTIVE"), - "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az1"), - "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.1.2"), - "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.0.32"), - "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), - "__meta_openstack_loadbalancer_tags": model.LabelValue("tag1,tag2"), - "__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"), + "__address__": model.LabelValue("10.0.0.32:9273"), + "__meta_openstack_loadbalancer_id": model.LabelValue("ef079b0c-e610-4dfb-b1aa-b49f07ac48e5"), + "__meta_openstack_loadbalancer_name": model.LabelValue("lb1"), + "__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"), + "__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"), + "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az1"), + "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.1.2"), + "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.0.32"), + "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), + "__meta_openstack_loadbalancer_tags": model.LabelValue("tag1,tag2"), + "__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"), }, { - "__address__": model.LabelValue("10.0.2.78:8080"), - "__meta_openstack_loadbalancer_id": model.LabelValue("d92c471e-8d3e-4b9f-b2b5-9c72a9e3ef54"), - "__meta_openstack_loadbalancer_name": model.LabelValue("lb3"), - "__meta_openstack_loadbalancer_status": model.LabelValue("ACTIVE"), - "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az3"), - "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.3.4"), - "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.2.78"), - "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), - "__meta_openstack_loadbalancer_tags": model.LabelValue("tag5,tag6"), - "__meta_openstack_project_id": model.LabelValue("ac57f03dba1a4fdebff3e67201bc7a85"), + "__address__": model.LabelValue("10.0.2.78:8080"), + "__meta_openstack_loadbalancer_id": model.LabelValue("d92c471e-8d3e-4b9f-b2b5-9c72a9e3ef54"), + "__meta_openstack_loadbalancer_name": model.LabelValue("lb3"), + "__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"), + "__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"), + "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az3"), + "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.3.4"), + "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.2.78"), + "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), + "__meta_openstack_loadbalancer_tags": model.LabelValue("tag5,tag6"), + "__meta_openstack_project_id": model.LabelValue("ac57f03dba1a4fdebff3e67201bc7a85"), }, { - "__address__": model.LabelValue("10.0.3.99:9090"), - "__meta_openstack_loadbalancer_id": model.LabelValue("f5c7e918-df38-4a5a-a7d4-d9c27ab2cf67"), - "__meta_openstack_loadbalancer_name": model.LabelValue("lb4"), - "__meta_openstack_loadbalancer_status": model.LabelValue("ACTIVE"), - "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az1"), - "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.4.5"), - "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.3.99"), - "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), - "__meta_openstack_project_id": model.LabelValue("fa8c372dfe4d4c92b0c4e3a2d9b3c9fa"), + "__address__": model.LabelValue("10.0.3.99:9090"), + "__meta_openstack_loadbalancer_id": model.LabelValue("f5c7e918-df38-4a5a-a7d4-d9c27ab2cf67"), + "__meta_openstack_loadbalancer_name": model.LabelValue("lb4"), + "__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"), + "__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"), + "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az1"), + "__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.4.5"), + "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.3.99"), + "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), + "__meta_openstack_project_id": model.LabelValue("fa8c372dfe4d4c92b0c4e3a2d9b3c9fa"), }, { - "__address__": model.LabelValue("10.0.4.88:9876"), - "__meta_openstack_loadbalancer_id": model.LabelValue("e83a6d92-7a3e-4567-94b3-20c83b32a75e"), - "__meta_openstack_loadbalancer_name": model.LabelValue("lb5"), - "__meta_openstack_loadbalancer_status": model.LabelValue("ACTIVE"), - "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az4"), - "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.4.88"), - "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), - "__meta_openstack_project_id": model.LabelValue("a5d3b2e1e6f34cd9a5f7c2f01a6b8e29"), + "__address__": model.LabelValue("10.0.4.88:9876"), + "__meta_openstack_loadbalancer_id": model.LabelValue("e83a6d92-7a3e-4567-94b3-20c83b32a75e"), + "__meta_openstack_loadbalancer_name": model.LabelValue("lb5"), + "__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"), + "__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"), + "__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az4"), + "__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.4.88"), + "__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), + "__meta_openstack_project_id": model.LabelValue("a5d3b2e1e6f34cd9a5f7c2f01a6b8e29"), }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 3749681977..99146254fb 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1209,6 +1209,8 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_openstack_loadbalancer_name`: the OpenStack load balancer name. * `__meta_openstack_loadbalancer_provider`: the Octavia provider of the OpenStack load balancer. * `__meta_openstack_loadbalancer_status`: the status of the OpenStack load balancer. +* `__meta_openstack_loadbalancer_operating_status`: the operating status of the OpenStack load balancer. +* `__meta_openstack_loadbalancer_provisioning_status`: the provisioning status of the OpenStack load balancer. * `__meta_openstack_loadbalancer_tags`: comma separated list of the OpenStack load balancer. * `__meta_openstack_loadbalancer_vip`: the VIP of the OpenStack load balancer. * `__meta_openstack_project_id`: the project (tenant) owning this load balancer. From cfcb00a716f2dfa286a8f1a479ea7d2790277acd Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Thu, 2 Jan 2025 15:51:52 +0100 Subject: [PATCH 1192/1397] perf(nhcbparse): unroll recursion (#15776) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit https://github.com/prometheus/prometheus/pull/15467#issuecomment-2563585979 Signed-off-by: György Krajcsovits --- model/textparse/nhcbparse.go | 94 ++++++++++++++++++------------------ 1 file changed, 48 insertions(+), 46 deletions(-) diff --git a/model/textparse/nhcbparse.go b/model/textparse/nhcbparse.go index ff756965f4..83e381539f 100644 --- a/model/textparse/nhcbparse.go +++ b/model/textparse/nhcbparse.go @@ -177,61 +177,63 @@ func (p *NHCBParser) CreatedTimestamp() *int64 { } func (p *NHCBParser) Next() (Entry, error) { - if p.state == stateEmitting { - p.state = stateStart - if p.entry == EntrySeries { - isNHCB := p.handleClassicHistogramSeries(p.lset) - if isNHCB && !p.keepClassicHistograms { - // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. - return p.Next() + for { + if p.state == stateEmitting { + p.state = stateStart + if p.entry == EntrySeries { + isNHCB := p.handleClassicHistogramSeries(p.lset) + if isNHCB && !p.keepClassicHistograms { + // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. + continue + } } + return p.entry, p.err } - return p.entry, p.err - } - p.entry, p.err = p.parser.Next() - if p.err != nil { - if errors.Is(p.err, io.EOF) && p.processNHCB() { - return EntryHistogram, nil - } - return EntryInvalid, p.err - } - switch p.entry { - case EntrySeries: - p.bytes, p.ts, p.value = p.parser.Series() - p.metricString = p.parser.Metric(&p.lset) - // Check the label set to see if we can continue or need to emit the NHCB. - var isNHCB bool - if p.compareLabels() { - // Labels differ. Check if we can emit the NHCB. - if p.processNHCB() { + p.entry, p.err = p.parser.Next() + if p.err != nil { + if errors.Is(p.err, io.EOF) && p.processNHCB() { return EntryHistogram, nil } - isNHCB = p.handleClassicHistogramSeries(p.lset) - } else { - // Labels are the same. Check if after an exponential histogram. - if p.lastHistogramExponential { - isNHCB = false - } else { - isNHCB = p.handleClassicHistogramSeries(p.lset) - } + return EntryInvalid, p.err } - if isNHCB && !p.keepClassicHistograms { - // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. - return p.Next() + switch p.entry { + case EntrySeries: + p.bytes, p.ts, p.value = p.parser.Series() + p.metricString = p.parser.Metric(&p.lset) + // Check the label set to see if we can continue or need to emit the NHCB. + var isNHCB bool + if p.compareLabels() { + // Labels differ. Check if we can emit the NHCB. + if p.processNHCB() { + return EntryHistogram, nil + } + isNHCB = p.handleClassicHistogramSeries(p.lset) + } else { + // Labels are the same. Check if after an exponential histogram. + if p.lastHistogramExponential { + isNHCB = false + } else { + isNHCB = p.handleClassicHistogramSeries(p.lset) + } + } + if isNHCB && !p.keepClassicHistograms { + // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. + continue + } + return p.entry, p.err + case EntryHistogram: + p.bytes, p.ts, p.h, p.fh = p.parser.Histogram() + p.metricString = p.parser.Metric(&p.lset) + p.storeExponentialLabels() + case EntryType: + p.bName, p.typ = p.parser.Type() + } + if p.processNHCB() { + return EntryHistogram, nil } return p.entry, p.err - case EntryHistogram: - p.bytes, p.ts, p.h, p.fh = p.parser.Histogram() - p.metricString = p.parser.Metric(&p.lset) - p.storeExponentialLabels() - case EntryType: - p.bName, p.typ = p.parser.Type() } - if p.processNHCB() { - return EntryHistogram, nil - } - return p.entry, p.err } // Return true if labels have changed and we should emit the NHCB. From a6947a03692f825fcf907be3ef8e3dfb8f47d7b2 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 3 Jan 2025 14:28:51 +0000 Subject: [PATCH 1193/1397] Merge 3.1 into main (#15775) Signed-off-by: Bryan Boreham --- CHANGELOG.md | 54 ++++++++++++++++++-- VERSION | 2 +- web/ui/mantine-ui/package.json | 4 +- web/ui/module/codemirror-promql/package.json | 4 +- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 ++--- web/ui/package.json | 2 +- 7 files changed, 63 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a75e163ec0..d0a7ef6611 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,9 +2,53 @@ ## unreleased -* [CHANGE] Notifier: Increment the prometheus_notifications_errors_total metric by the number of affected alerts rather than by one per batch of affected alerts. #15428 -* [ENHANCEMENT] OTLP receiver: Convert also metric metadata. #15416 -* [BUGFIX] OTLP receiver: Allow colons in non-standard units. #15710 +## 3.1.0 / 2025-01-02 + + * [SECURITY] upgrade golang.org/x/crypto to address reported CVE-2024-45337. #15691 + * [CHANGE] Notifier: Increment prometheus_notifications_errors_total by the number of affected alerts rather than per batch. #15428 + * [CHANGE] API: list rules field "groupNextToken:omitempty" renamed to "groupNextToken". #15400 + * [ENHANCEMENT] OTLP translate: keep identifying attributes in target_info. #15448 + * [ENHANCEMENT] Paginate rule groups, add infinite scroll to rules within groups. #15677 + * [ENHANCEMENT] TSDB: Improve calculation of space used by labels. #13880 + * [ENHANCEMENT] Rules: new metric rule_group_last_rule_duration_sum_seconds. #15672 + * [ENHANCEMENT] Observability: Export 'go_sync_mutex_wait_total_seconds_total' metric. #15339 + * [ENHANCEMEN] Remote-Write: optionally use a DNS resolver that picks a random IP. #15329 + * [PERF] Optimize `l=~".+"` matcher. #15474, #15684 + * [PERF] TSDB: Cache all symbols for compaction . #15455 + * [PERF] TSDB: MemPostings: keep a map of label values slices. #15426 + * [PERF] Remote-Write: Remove interning hook. #15456 + * [PERF] Scrape: optimize string manipulation for experimental native histograms with custom buckets. #15453 + * [PERF] TSDB: reduce memory allocations. #15465, #15427 + * [PERF] Storage: Implement limit in mergeGenericQuerier. #14489 + * [PERF] TSDB: Optimize inverse matching. #14144 + * [PERF] Regex: use stack memory for lowercase copy of string. #15210 + * [PERF] TSDB: When deleting from postings index, pause to unlock and let readers read. #15242 + * [BUGFIX] Main: Avoid possible segfault at exit. (#15724) + * [BUGFIX] Rules: Do not run rules concurrently if uncertain about dependencies. #15560 + * [BUGFIX] PromQL: Adds test for `absent`, `absent_over_time` and `deriv` func with histograms. #15667 + * [BUGFIX] PromQL: Fix various bugs related to quoting UTF-8 characters. #15531 + * [BUGFIX] Scrape: fix nil panic after scrape loop reload. #15563 + * [BUGFIX] Remote-write: fix panic on repeated log message. #15562 + * [BUGFIX] Scrape: reload would ignore always_scrape_classic_histograms and convert_classic_histograms_to_nhcb configs. #15489 + * [BUGFIX] TSDB: fix data corruption in experimental native histograms. #15482 + * [BUGFIX] PromQL: Ignore histograms in all time related functions. #15479 + * [BUGFIX] OTLP receiver: Convert metric metadata. #15416 + * [BUGFIX] PromQL: Fix `resets` function for histograms. #15527 + * [BUGFIX] PromQL: Fix behaviour of `changes()` for mix of histograms and floats. #15469 + * [BUGFIX] PromQL: Fix behaviour of some aggregations with histograms. #15432 + * [BUGFIX] allow quoted exemplar keys in openmetrics text format. #15260 + * [BUGFIX] TSDB: fixes for rare conditions when loading write-behind-log (WBL). #15380 + * [BUGFIX] `round()` function did not remove `__name__` label. #15250 + * [BUGFIX] Promtool: analyze block shows metric name with 0 cardinality. #15438 + * [BUGFIX] PromQL: Fix `count_values` for histograms. #15422 + * [BUGFIX] PromQL: fix issues with comparison binary operations with `bool` modifier and native histograms. #15413 + * [BUGFIX] PromQL: fix incorrect "native histogram ignored in aggregation" annotations. #15414 + * [BUGFIX] PromQL: Corrects the behaviour of some operator and aggregators with Native Histograms. #15245 + * [BUGFIX] TSDB: Always return unknown hint for first sample in non-gauge histogram chunk. #15343 + * [BUGFIX] PromQL: Clamp functions: Ignore any points with native histograms. #15169 + * [BUGFIX] TSDB: Fix race on stale values in headAppender. #15322 + * [BUGFIX] UI: Fix selector / series formatting for empty metric names. #15340 + * [BUGFIX] OTLP receiver: Allow colons in non-standard units. #15710 ## 3.0.1 / 2024-11-28 @@ -37,14 +81,14 @@ This release includes new features such as a brand new UI and UTF-8 support enab * [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904 * [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365 * [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365 -* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705 +* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705, #15258 * [CHANGE] Console: Remove example files for the console feature. Users can continue using the console feature by supplying their own JavaScript and templates. #14807 * [CHANGE] SD: Enable the new service discovery manager by default. This SD manager does not restart unchanged discoveries upon reloading. This makes reloads faster and reduces pressure on service discoveries' sources. The corresponding `new-service-discovery-manager` feature flag has been removed. #14770 * [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747 * [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526 * [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643 * [FEATURE] OTLP receiver: Ability to skip UTF-8 normalization using `otlp.translation_strategy = NoUTF8EscapingWithSuffixes` configuration option. #15384 -* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769 +* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769, #15011 * [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710 * [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196 * [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694 diff --git a/VERSION b/VERSION index cb2b00e4f7..fd2a01863f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.0.1 +3.1.0 diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index a8c7ebd417..c3f1e0fbfd 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,7 +1,7 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "0.300.1", + "version": "0.301.0", "type": "module", "scripts": { "start": "vite", @@ -28,7 +28,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.300.1", + "@prometheus-io/codemirror-promql": "0.301.0", "@reduxjs/toolkit": "^2.5.0", "@tabler/icons-react": "^3.24.0", "@tanstack/react-query": "^5.62.7", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index c10ebf6115..d4e95c7ee8 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.300.1", + "version": "0.301.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.300.1", + "@prometheus-io/lezer-promql": "0.301.0", "lru-cache": "^11.0.2" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index bb7af294d9..282ab5ab62 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.300.1", + "version": "0.301.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index a73f55a49f..6db3035c27 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.300.1", + "version": "0.301.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.300.1", + "version": "0.301.0", "workspaces": [ "mantine-ui", "module/*" @@ -24,7 +24,7 @@ }, "mantine-ui": { "name": "@prometheus-io/mantine-ui", - "version": "0.300.1", + "version": "0.301.0", "dependencies": { "@codemirror/autocomplete": "^6.18.3", "@codemirror/language": "^6.10.6", @@ -42,7 +42,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.300.1", + "@prometheus-io/codemirror-promql": "0.301.0", "@reduxjs/toolkit": "^2.5.0", "@tabler/icons-react": "^3.24.0", "@tanstack/react-query": "^5.62.7", @@ -147,10 +147,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.300.1", + "version": "0.301.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.300.1", + "@prometheus-io/lezer-promql": "0.301.0", "lru-cache": "^11.0.2" }, "devDependencies": { @@ -180,7 +180,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.300.1", + "version": "0.301.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.2", diff --git a/web/ui/package.json b/web/ui/package.json index bfebd64bd5..62b3f26246 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,7 +1,7 @@ { "name": "prometheus-io", "description": "Monorepo for the Prometheus UI", - "version": "0.300.1", + "version": "0.301.0", "private": true, "scripts": { "build": "bash build_ui.sh --all", From 4f67a38a390224b4f501c2f139daabeeaa49792b Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Fri, 3 Jan 2025 17:58:02 +0100 Subject: [PATCH 1194/1397] template: Use cases.Title instead of deprecated strings.Title (#15721) Signed-off-by: Arve Knudsen --- docs/configuration/template_reference.md | 2 +- template/template.go | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/configuration/template_reference.md b/docs/configuration/template_reference.md index 47df9d1e09..ec4b31376c 100644 --- a/docs/configuration/template_reference.md +++ b/docs/configuration/template_reference.md @@ -68,7 +68,7 @@ versions. | Name | Arguments | Returns | Notes | | ------------- | ------------- | ------- | ----------- | -| title | string | string | [strings.Title](https://golang.org/pkg/strings/#Title), capitalises first character of each word.| +| title | string | string | [cases.Title](https://pkg.go.dev/golang.org/x/text/cases#Title), capitalises first character of each word.| | toUpper | string | string | [strings.ToUpper](https://golang.org/pkg/strings/#ToUpper), converts all characters to upper case.| | toLower | string | string | [strings.ToLower](https://golang.org/pkg/strings/#ToLower), converts all characters to lower case.| | stripPort | string | string | [net.SplitHostPort](https://pkg.go.dev/net#SplitHostPort), splits string into host and port, then returns only host.| diff --git a/template/template.go b/template/template.go index 0698c6c8ac..25b65eb577 100644 --- a/template/template.go +++ b/template/template.go @@ -30,6 +30,8 @@ import ( "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "golang.org/x/text/cases" + "golang.org/x/text/language" common_templates "github.com/prometheus/common/helpers/templates" @@ -166,7 +168,7 @@ func NewTemplateExpander( return html_template.HTML(text) }, "match": regexp.MatchString, - "title": strings.Title, //nolint:staticcheck // TODO(beorn7): Need to come up with a replacement using the cases package. + "title": cases.Title(language.AmericanEnglish, cases.NoLower).String, "toUpper": strings.ToUpper, "toLower": strings.ToLower, "graphLink": strutil.GraphLinkForExpression, From 56094197b598b56434c5d8c32b176f115821cc3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A9lia=20Barroso?= <66432275+heliapb@users.noreply.github.com> Date: Mon, 6 Jan 2025 14:13:17 +0000 Subject: [PATCH 1195/1397] [Docs] Note that scrape_timeout cannot be greater than scrape_interval (#15786) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Hélia Barroso --- docs/configuration/configuration.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 57f4013936..168c99d3ca 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -59,6 +59,7 @@ global: [ scrape_interval: | default = 1m ] # How long until a scrape request times out. + # It cannot be greater than the scrape interval. [ scrape_timeout: | default = 10s ] # The protocols to negotiate during a scrape with the client. @@ -221,6 +222,7 @@ job_name: [ scrape_interval: | default = ] # Per-scrape timeout when scraping this job. +# It cannot be greater than the scrape interval. [ scrape_timeout: | default = ] # The protocols to negotiate during a scrape with the client. From 5fdec3140188808bb9c2bf48e94b8533dbf75ac5 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Mon, 6 Jan 2025 11:30:39 -0300 Subject: [PATCH 1196/1397] otlp/translator: Use separate function for metric names with UTF8 characters (#15664) BuildCompliantName was renamed to BuildCompliantMetricName, and it no longer takes UTF8 support into consideration. It focuses on building a metric name that follows Prometheus conventions. A new function, BuildMetricName, was added to optionally add unit and type suffixes to OTLP metric names without translating any characters to underscores(_). --- .../prometheus/helpers_from_stdlib.go | 106 -------- ...rmalize_name.go => metric_name_builder.go} | 211 ++++++++------ .../prometheus/metric_name_builder_test.go | 257 ++++++++++++++++++ .../prometheus/normalize_name_test.go | 210 -------------- .../prometheusremotewrite/histograms_test.go | 2 +- .../prometheusremotewrite/metrics_to_prw.go | 7 +- .../metrics_to_prw_test.go | 2 +- 7 files changed, 391 insertions(+), 404 deletions(-) delete mode 100644 storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go rename storage/remote/otlptranslator/prometheus/{normalize_name.go => metric_name_builder.go} (56%) create mode 100644 storage/remote/otlptranslator/prometheus/metric_name_builder_test.go delete mode 100644 storage/remote/otlptranslator/prometheus/normalize_name_test.go diff --git a/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go b/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go deleted file mode 100644 index cb9257d073..0000000000 --- a/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: https://github.com/golang/go/blob/f2d118fd5f7e872804a5825ce29797f81a28b0fa/src/strings/strings.go -// Provenance-includes-license: BSD-3-Clause -// Provenance-includes-copyright: Copyright The Go Authors. - -package prometheus - -import "strings" - -// fieldsFunc is a copy of strings.FieldsFunc from the Go standard library, -// but it also returns the separators as part of the result. -func fieldsFunc(s string, f func(rune) bool) ([]string, []string) { - // A span is used to record a slice of s of the form s[start:end]. - // The start index is inclusive and the end index is exclusive. - type span struct { - start int - end int - } - spans := make([]span, 0, 32) - separators := make([]string, 0, 32) - - // Find the field start and end indices. - // Doing this in a separate pass (rather than slicing the string s - // and collecting the result substrings right away) is significantly - // more efficient, possibly due to cache effects. - start := -1 // valid span start if >= 0 - for end, rune := range s { - if f(rune) { - if start >= 0 { - spans = append(spans, span{start, end}) - // Set start to a negative value. - // Note: using -1 here consistently and reproducibly - // slows down this code by a several percent on amd64. - start = ^start - separators = append(separators, string(s[end])) - } - } else { - if start < 0 { - start = end - } - } - } - - // Last field might end at EOF. - if start >= 0 { - spans = append(spans, span{start, len(s)}) - } - - // Create strings from recorded field indices. - a := make([]string, len(spans)) - for i, span := range spans { - a[i] = s[span.start:span.end] - } - - return a, separators -} - -// join is a copy of strings.Join from the Go standard library, -// but it also accepts a slice of separators to join the elements with. -// If the slice of separators is shorter than the slice of elements, use a default value. -// We also don't check for integer overflow. -func join(elems []string, separators []string, def string) string { - switch len(elems) { - case 0: - return "" - case 1: - return elems[0] - } - - var n int - var sep string - sepLen := len(separators) - for i, elem := range elems { - if i >= sepLen { - sep = def - } else { - sep = separators[i] - } - n += len(sep) + len(elem) - } - - var b strings.Builder - b.Grow(n) - b.WriteString(elems[0]) - for i, s := range elems[1:] { - if i >= sepLen { - sep = def - } else { - sep = separators[i] - } - b.WriteString(sep) - b.WriteString(s) - } - return b.String() -} diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/metric_name_builder.go similarity index 56% rename from storage/remote/otlptranslator/prometheus/normalize_name.go rename to storage/remote/otlptranslator/prometheus/metric_name_builder.go index 580c72b548..8b5ea2a046 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/metric_name_builder.go @@ -78,7 +78,7 @@ var perUnitMap = map[string]string{ "y": "year", } -// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric. +// BuildCompliantMetricName builds a Prometheus-compliant metric name for the specified metric. // // Metric name is prefixed with specified namespace and underscore (if any). // Namespace is not cleaned up. Make sure specified namespace follows Prometheus @@ -87,29 +87,24 @@ var perUnitMap = map[string]string{ // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels, // https://prometheus.io/docs/practices/naming/#metric-and-label-naming // and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. -func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes, allowUTF8 bool) string { +func BuildCompliantMetricName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { // Full normalization following standard Prometheus naming conventions if addMetricSuffixes { - return normalizeName(metric, namespace, allowUTF8) + return normalizeName(metric, namespace) } - var metricName string - if !allowUTF8 { - // Simple case (no full normalization, no units, etc.). - metricName = strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool { - return invalidMetricCharRE.MatchString(string(r)) - }), "_") - } else { - metricName = metric.Name() - } + // Simple case (no full normalization, no units, etc.). + metricName := strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool { + return invalidMetricCharRE.MatchString(string(r)) + }), "_") // Namespace? if namespace != "" { return namespace + "_" + metricName } - // Metric name starts with a digit and utf8 not allowed? Prefix it with an underscore. - if metricName != "" && unicode.IsDigit(rune(metricName[0])) && !allowUTF8 { + // Metric name starts with a digit? Prefix it with an underscore. + if metricName != "" && unicode.IsDigit(rune(metricName[0])) { metricName = "_" + metricName } @@ -124,70 +119,17 @@ var ( ) // Build a normalized name for the specified metric. -func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) string { - var nameTokens []string - var separators []string - if !allowUTF8 { - // Split metric name into "tokens" (of supported metric name runes). - // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. - // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. - nameTokens = strings.FieldsFunc( - metric.Name(), - func(r rune) bool { return nonMetricNameCharRE.MatchString(string(r)) }, - ) - } else { - translationFunc := func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != ':' } - // Split metric name into "tokens" (of supported metric name runes). - nameTokens, separators = fieldsFunc(metric.Name(), translationFunc) - } +func normalizeName(metric pmetric.Metric, namespace string) string { + // Split metric name into "tokens" (of supported metric name runes). + // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. + // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. + nameTokens := strings.FieldsFunc( + metric.Name(), + func(r rune) bool { return nonMetricNameCharRE.MatchString(string(r)) }, + ) - // Split unit at the '/' if any - unitTokens := strings.SplitN(metric.Unit(), "/", 2) - - // Main unit - // Append if not blank, doesn't contain '{}', and is not present in metric name already - if len(unitTokens) > 0 { - var mainUnitProm, perUnitProm string - mainUnitOTel := strings.TrimSpace(unitTokens[0]) - if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") { - mainUnitProm = unitMapGetOrDefault(mainUnitOTel) - if !allowUTF8 { - mainUnitProm = cleanUpUnit(mainUnitProm) - } - if slices.Contains(nameTokens, mainUnitProm) { - mainUnitProm = "" - } - } - - // Per unit - // Append if not blank, doesn't contain '{}', and is not present in metric name already - if len(unitTokens) > 1 && unitTokens[1] != "" { - perUnitOTel := strings.TrimSpace(unitTokens[1]) - if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") { - perUnitProm = perUnitMapGetOrDefault(perUnitOTel) - if !allowUTF8 { - perUnitProm = cleanUpUnit(perUnitProm) - } - } - if perUnitProm != "" { - perUnitProm = "per_" + perUnitProm - if slices.Contains(nameTokens, perUnitProm) { - perUnitProm = "" - } - } - } - - if perUnitProm != "" { - mainUnitProm = strings.TrimSuffix(mainUnitProm, "_") - } - - if mainUnitProm != "" { - nameTokens = append(nameTokens, mainUnitProm) - } - if perUnitProm != "" { - nameTokens = append(nameTokens, perUnitProm) - } - } + mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(metric.Unit()) + nameTokens = addUnitTokens(nameTokens, cleanUpUnit(mainUnitSuffix), cleanUpUnit(perUnitSuffix)) // Append _total for Counters if metric.Type() == pmetric.MetricTypeSum && metric.Sum().IsMonotonic() { @@ -208,14 +150,8 @@ func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) stri nameTokens = append([]string{namespace}, nameTokens...) } - var normalizedName string - if !allowUTF8 { - // Build the string from the tokens, separated with underscores - normalizedName = strings.Join(nameTokens, "_") - } else { - // Build the string from the tokens + separators. - normalizedName = join(nameTokens, separators, "_") - } + // Build the string from the tokens, separated with underscores + normalizedName := strings.Join(nameTokens, "_") // Metric name cannot start with a digit, so prefix it with "_" in this case if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) { @@ -225,6 +161,39 @@ func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) stri return normalizedName } +// addUnitTokens will add the suffixes to the nameTokens if they are not already present. +// It will also remove trailing underscores from the main suffix to avoid double underscores +// when joining the tokens. +// +// If the 'per' unit ends with underscore, the underscore will be removed. If the per unit is just +// 'per_', it will be entirely removed. +func addUnitTokens(nameTokens []string, mainUnitSuffix, perUnitSuffix string) []string { + if slices.Contains(nameTokens, mainUnitSuffix) { + mainUnitSuffix = "" + } + + if perUnitSuffix == "per_" { + perUnitSuffix = "" + } else { + perUnitSuffix = strings.TrimSuffix(perUnitSuffix, "_") + if slices.Contains(nameTokens, perUnitSuffix) { + perUnitSuffix = "" + } + } + + if perUnitSuffix != "" { + mainUnitSuffix = strings.TrimSuffix(mainUnitSuffix, "_") + } + + if mainUnitSuffix != "" { + nameTokens = append(nameTokens, mainUnitSuffix) + } + if perUnitSuffix != "" { + nameTokens = append(nameTokens, perUnitSuffix) + } + return nameTokens +} + // cleanUpUnit cleans up unit so it matches model.LabelNameRE. func cleanUpUnit(unit string) string { // Multiple consecutive underscores are replaced with a single underscore. @@ -263,3 +232,75 @@ func removeItem(slice []string, value string) []string { } return newSlice } + +// BuildMetricName builds a valid metric name but without following Prometheus naming conventions. +// It doesn't do any character transformation, it only prefixes the metric name with the namespace, if any, +// and adds metric type suffixes, e.g. "_total" for counters and unit suffixes. +// +// Differently from BuildCompliantMetricName, it doesn't check for the presence of unit and type suffixes. +// If "addMetricSuffixes" is true, it will add them anyway. +// +// Please use BuildCompliantMetricName for a metric name that follows Prometheus naming conventions. +func BuildMetricName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { + metricName := metric.Name() + + if namespace != "" { + metricName = namespace + "_" + metricName + } + + if addMetricSuffixes { + mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(metric.Unit()) + if mainUnitSuffix != "" { + metricName = metricName + "_" + mainUnitSuffix + } + if perUnitSuffix != "" { + metricName = metricName + "_" + perUnitSuffix + } + + // Append _total for Counters + if metric.Type() == pmetric.MetricTypeSum && metric.Sum().IsMonotonic() { + metricName = metricName + "_total" + } + + // Append _ratio for metrics with unit "1" + // Some OTel receivers improperly use unit "1" for counters of objects + // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions + // Until these issues have been fixed, we're appending `_ratio` for gauges ONLY + // Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons) + if metric.Unit() == "1" && metric.Type() == pmetric.MetricTypeGauge { + metricName = metricName + "_ratio" + } + } + return metricName +} + +// buildUnitSuffixes builds the main and per unit suffixes for the specified unit +// but doesn't do any special character transformation to accommodate Prometheus naming conventions. +// Removing trailing underscores or appending suffixes is done in the caller. +func buildUnitSuffixes(unit string) (mainUnitSuffix, perUnitSuffix string) { + // Split unit at the '/' if any + unitTokens := strings.SplitN(unit, "/", 2) + + if len(unitTokens) > 0 { + // Main unit + // Update if not blank and doesn't contain '{}' + mainUnitOTel := strings.TrimSpace(unitTokens[0]) + if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") { + mainUnitSuffix = unitMapGetOrDefault(mainUnitOTel) + } + + // Per unit + // Update if not blank and doesn't contain '{}' + if len(unitTokens) > 1 && unitTokens[1] != "" { + perUnitOTel := strings.TrimSpace(unitTokens[1]) + if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") { + perUnitSuffix = perUnitMapGetOrDefault(perUnitOTel) + } + if perUnitSuffix != "" { + perUnitSuffix = "per_" + perUnitSuffix + } + } + } + + return mainUnitSuffix, perUnitSuffix +} diff --git a/storage/remote/otlptranslator/prometheus/metric_name_builder_test.go b/storage/remote/otlptranslator/prometheus/metric_name_builder_test.go new file mode 100644 index 0000000000..1c4a6124c4 --- /dev/null +++ b/storage/remote/otlptranslator/prometheus/metric_name_builder_test.go @@ -0,0 +1,257 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name_test.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. + +package prometheus + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestByte(t *testing.T) { + require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), "")) +} + +func TestByteCounter(t *testing.T) { + require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), "")) + require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), "")) +} + +func TestWhiteSpaces(t *testing.T) { + require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), "")) +} + +func TestNonStandardUnit(t *testing.T) { + require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "")) + // The normal metric name character set is allowed in non-standard units. + require.Equal(t, "system_network_dropped_nonstandard:_1", normalizeName(createGauge("system.network.dropped", "nonstandard:_1"), "")) +} + +func TestNonStandardUnitCounter(t *testing.T) { + require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), "")) +} + +func TestBrokenUnit(t *testing.T) { + require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), "")) + require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), "")) + require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), "")) +} + +func TestBrokenUnitCounter(t *testing.T) { + require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), "")) + require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), "")) + require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), "")) +} + +func TestRatio(t *testing.T) { + require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), "")) + require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), "")) + require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), "")) +} + +func TestHertz(t *testing.T) { + require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), "")) +} + +func TestPer(t *testing.T) { + require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "")) + require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "")) + // The normal metric name character set is allowed in non-standard units. + require.Equal(t, "system_network_dropped_non_per_standard:_1", normalizeName(createGauge("system.network.dropped", "non/standard:_1"), "")) + + t.Run("invalid per unit", func(t *testing.T) { + require.Equal(t, "broken_metric_speed_km", normalizeName(createGauge("broken.metric.speed", "km/°"), "")) + }) +} + +func TestPercent(t *testing.T) { + require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), "")) + require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), "")) +} + +func TestEmpty(t *testing.T) { + require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), "")) + require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), "")) +} + +func TestOTelReceivers(t *testing.T) { + require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), "")) + require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "")) + require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "")) + require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), "")) + require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), "")) + require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), "")) + require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), "")) + require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), "")) + require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), "")) + require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), "")) + require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), "")) + require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), "")) + require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), "")) + require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), "")) + require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), "")) + require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), "")) + require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), "")) + require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), "")) + require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), "")) + require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), "")) + require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), "")) + require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "")) +} + +func TestNamespace(t *testing.T) { + require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space")) + require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space")) +} + +func TestCleanUpUnit(t *testing.T) { + require.Equal(t, "", cleanUpUnit("")) + require.Equal(t, "a_b", cleanUpUnit("a b")) + require.Equal(t, "hello_world", cleanUpUnit("hello, world")) + require.Equal(t, "hello_you_2", cleanUpUnit("hello you 2")) + require.Equal(t, "1000", cleanUpUnit("$1000")) + require.Equal(t, "", cleanUpUnit("*+$^=)")) +} + +func TestUnitMapGetOrDefault(t *testing.T) { + require.Equal(t, "", unitMapGetOrDefault("")) + require.Equal(t, "seconds", unitMapGetOrDefault("s")) + require.Equal(t, "invalid", unitMapGetOrDefault("invalid")) +} + +func TestPerUnitMapGetOrDefault(t *testing.T) { + require.Equal(t, "", perUnitMapGetOrDefault("")) + require.Equal(t, "second", perUnitMapGetOrDefault("s")) + require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid")) +} + +func TestBuildUnitSuffixes(t *testing.T) { + tests := []struct { + unit string + expectedMain string + expectedPer string + }{ + {"", "", ""}, + {"s", "seconds", ""}, + {"By/s", "bytes", "per_second"}, + {"requests/m", "requests", "per_minute"}, + {"{invalid}/second", "", "per_second"}, + {"bytes/{invalid}", "bytes", ""}, + } + + for _, test := range tests { + mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(test.unit) + require.Equal(t, test.expectedMain, mainUnitSuffix) + require.Equal(t, test.expectedPer, perUnitSuffix) + } +} + +func TestAddUnitTokens(t *testing.T) { + tests := []struct { + nameTokens []string + mainUnitSuffix string + perUnitSuffix string + expected []string + }{ + {[]string{}, "", "", []string{}}, + {[]string{"token1"}, "main", "", []string{"token1", "main"}}, + {[]string{"token1"}, "", "per", []string{"token1", "per"}}, + {[]string{"token1"}, "main", "per", []string{"token1", "main", "per"}}, + {[]string{"token1", "per"}, "main", "per", []string{"token1", "per", "main"}}, + {[]string{"token1", "main"}, "main", "per", []string{"token1", "main", "per"}}, + {[]string{"token1"}, "main_", "per", []string{"token1", "main", "per"}}, + {[]string{"token1"}, "main_unit", "per_seconds_", []string{"token1", "main_unit", "per_seconds"}}, // trailing underscores are removed + {[]string{"token1"}, "main_unit", "per_", []string{"token1", "main_unit"}}, // 'per_' is removed entirely + } + + for _, test := range tests { + result := addUnitTokens(test.nameTokens, test.mainUnitSuffix, test.perUnitSuffix) + require.Equal(t, test.expected, result) + } +} + +func TestRemoveItem(t *testing.T) { + require.Equal(t, []string{}, removeItem([]string{}, "test")) + require.Equal(t, []string{}, removeItem([]string{}, "")) + require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d")) + require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "")) + require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c")) + require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b")) + require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a")) +} + +func TestBuildCompliantMetricNameWithSuffixes(t *testing.T) { + require.Equal(t, "system_io_bytes_total", BuildCompliantMetricName(createCounter("system.io", "By"), "", true)) + require.Equal(t, "system_network_io_bytes_total", BuildCompliantMetricName(createCounter("network.io", "By"), "system", true)) + require.Equal(t, "_3_14_digits", BuildCompliantMetricName(createGauge("3.14 digits", ""), "", true)) + require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantMetricName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true)) + require.Equal(t, ":foo::bar", BuildCompliantMetricName(createGauge(":foo::bar", ""), "", true)) + require.Equal(t, ":foo::bar_total", BuildCompliantMetricName(createCounter(":foo::bar", ""), "", true)) + // Gauges with unit 1 are considered ratios. + require.Equal(t, "foo_bar_ratio", BuildCompliantMetricName(createGauge("foo.bar", "1"), "", true)) + // Slashes in units are converted. + require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantMetricName(createCounter("system.io", "foo/bar"), "", true)) + require.Equal(t, "metric_with_foreign_characters_total", BuildCompliantMetricName(createCounter("metric_with_字符_foreign_characters", ""), "", true)) + // Removes non aplhanumerical characters from units, but leaves colons. + require.Equal(t, "temperature_:C", BuildCompliantMetricName(createGauge("temperature", "%*()°:C"), "", true)) +} + +func TestBuildCompliantMetricNameWithoutSuffixes(t *testing.T) { + require.Equal(t, "system_io", BuildCompliantMetricName(createCounter("system.io", "By"), "", false)) + require.Equal(t, "system_network_io", BuildCompliantMetricName(createCounter("network.io", "By"), "system", false)) + require.Equal(t, "system_network_I_O", BuildCompliantMetricName(createCounter("network (I/O)", "By"), "system", false)) + require.Equal(t, "_3_14_digits", BuildCompliantMetricName(createGauge("3.14 digits", "By"), "", false)) + require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantMetricName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false)) + require.Equal(t, ":foo::bar", BuildCompliantMetricName(createGauge(":foo::bar", ""), "", false)) + require.Equal(t, ":foo::bar", BuildCompliantMetricName(createCounter(":foo::bar", ""), "", false)) + require.Equal(t, "foo_bar", BuildCompliantMetricName(createGauge("foo.bar", "1"), "", false)) + require.Equal(t, "system_io", BuildCompliantMetricName(createCounter("system.io", "foo/bar"), "", false)) + require.Equal(t, "metric_with___foreign_characters", BuildCompliantMetricName(createCounter("metric_with_字符_foreign_characters", ""), "", false)) +} + +func TestBuildMetricNameWithSuffixes(t *testing.T) { + require.Equal(t, "system.io_bytes_total", BuildMetricName(createCounter("system.io", "By"), "", true)) + require.Equal(t, "system_network.io_bytes_total", BuildMetricName(createCounter("network.io", "By"), "system", true)) + require.Equal(t, "3.14 digits", BuildMetricName(createGauge("3.14 digits", ""), "", true)) + require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildMetricName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true)) + require.Equal(t, ":foo::bar", BuildMetricName(createGauge(":foo::bar", ""), "", true)) + require.Equal(t, ":foo::bar_total", BuildMetricName(createCounter(":foo::bar", ""), "", true)) + // Gauges with unit 1 are considered ratios. + require.Equal(t, "foo.bar_ratio", BuildMetricName(createGauge("foo.bar", "1"), "", true)) + // Slashes in units are converted. + require.Equal(t, "system.io_foo_per_bar_total", BuildMetricName(createCounter("system.io", "foo/bar"), "", true)) + require.Equal(t, "metric_with_字符_foreign_characters_total", BuildMetricName(createCounter("metric_with_字符_foreign_characters", ""), "", true)) + require.Equal(t, "temperature_%*()°C", BuildMetricName(createGauge("temperature", "%*()°C"), "", true)) // Keeps the all characters in unit + // Tests below show weird interactions that users can have with the metric names. + // With BuildMetricName we don't check if units/type suffixes are already present in the metric name, we always add them. + require.Equal(t, "system_io_seconds_seconds", BuildMetricName(createGauge("system_io_seconds", "s"), "", true)) + require.Equal(t, "system_io_total_total", BuildMetricName(createCounter("system_io_total", ""), "", true)) +} + +func TestBuildMetricNameWithoutSuffixes(t *testing.T) { + require.Equal(t, "system.io", BuildMetricName(createCounter("system.io", "By"), "", false)) + require.Equal(t, "system_network.io", BuildMetricName(createCounter("network.io", "By"), "system", false)) + require.Equal(t, "3.14 digits", BuildMetricName(createGauge("3.14 digits", ""), "", false)) + require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildMetricName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false)) + require.Equal(t, ":foo::bar", BuildMetricName(createGauge(":foo::bar", ""), "", false)) + require.Equal(t, ":foo::bar", BuildMetricName(createCounter(":foo::bar", ""), "", false)) + // Gauges with unit 1 are considered ratios. + require.Equal(t, "foo.bar", BuildMetricName(createGauge("foo.bar", "1"), "", false)) + require.Equal(t, "metric_with_字符_foreign_characters", BuildMetricName(createCounter("metric_with_字符_foreign_characters", ""), "", false)) + require.Equal(t, "system_io_seconds", BuildMetricName(createGauge("system_io_seconds", "s"), "", false)) + require.Equal(t, "system_io_total", BuildMetricName(createCounter("system_io_total", ""), "", false)) +} diff --git a/storage/remote/otlptranslator/prometheus/normalize_name_test.go b/storage/remote/otlptranslator/prometheus/normalize_name_test.go deleted file mode 100644 index 0473f6cbe1..0000000000 --- a/storage/remote/otlptranslator/prometheus/normalize_name_test.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name_test.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. - -package prometheus - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestByte(t *testing.T) { - require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), "", false)) -} - -func TestByteCounter(t *testing.T) { - require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), "", false)) - require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), "", false)) -} - -func TestWhiteSpaces(t *testing.T) { - require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), "", false)) -} - -func TestNonStandardUnit(t *testing.T) { - require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "", false)) - // The normal metric name character set is allowed in non-standard units. - require.Equal(t, "system_network_dropped_nonstandard:_1", normalizeName(createGauge("system.network.dropped", "nonstandard:_1"), "", false)) -} - -func TestNonStandardUnitCounter(t *testing.T) { - require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), "", false)) -} - -func TestBrokenUnit(t *testing.T) { - require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), "", false)) - require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), "", false)) - require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), "", false)) -} - -func TestBrokenUnitCounter(t *testing.T) { - require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), "", false)) - require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), "", false)) - require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), "", false)) -} - -func TestRatio(t *testing.T) { - require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), "", false)) - require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), "", false)) - require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), "", false)) -} - -func TestHertz(t *testing.T) { - require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), "", false)) -} - -func TestPer(t *testing.T) { - require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "", false)) - require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "", false)) - // The normal metric name character set is allowed in non-standard units. - require.Equal(t, "system_network_dropped_non_per_standard:_1", normalizeName(createGauge("system.network.dropped", "non/standard:_1"), "", false)) - - t.Run("invalid per unit", func(t *testing.T) { - require.Equal(t, "broken_metric_speed_km", normalizeName(createGauge("broken.metric.speed", "km/°"), "", false)) - }) -} - -func TestPercent(t *testing.T) { - require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), "", false)) - require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), "", false)) -} - -func TestEmpty(t *testing.T) { - require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), "", false)) - require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), "", false)) -} - -func TestAllowUTF8(t *testing.T) { - t.Run("allow UTF8", func(t *testing.T) { - require.Equal(t, "unsupported.metric.temperature_°F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "", true)) - require.Equal(t, "unsupported.metric.weird_+=.:,!* & #", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "", true)) - require.Equal(t, "unsupported.metric.redundant___test $_per_°C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "", true)) - require.Equal(t, "metric_with_字符_foreign_characters_ど", normalizeName(createGauge("metric_with_字符_foreign_characters", "ど"), "", true)) - }) - t.Run("disallow UTF8", func(t *testing.T) { - require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "", false)) - require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.,!* & #"), "", false)) - require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "", false)) - require.Equal(t, "metric_with_foreign_characters", normalizeName(createGauge("metric_with_字符_foreign_characters", "ど"), "", false)) - }) -} - -func TestAllowUTF8KnownBugs(t *testing.T) { - // Due to historical reasons, the translator code was copied from OpenTelemetry collector codebase. - // Over there, they tried to provide means to translate metric names following Prometheus conventions that are documented here: - // https://prometheus.io/docs/practices/naming/ - // - // Althogh not explicitly said, it was implied that words should be separated by a single underscore and the codebase was written - // with that in mind. - // - // Now that we're allowing OTel users to have their original names stored in prometheus without any transformation, we're facing problems - // where two (or more) UTF-8 characters are being used to separate words. - // TODO(arthursens): Fix it! - - // We're asserting on 'NotEqual', which proves the bug. - require.NotEqual(t, "metric....split_=+by_//utf8characters", normalizeName(createGauge("metric....split_=+by_//utf8characters", ""), "", true)) - // Here we're asserting on 'Equal', showing the current behavior. - require.Equal(t, "metric.split_by_utf8characters", normalizeName(createGauge("metric....split_=+by_//utf8characters", ""), "", true)) -} - -func TestOTelReceivers(t *testing.T) { - require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), "", false)) - require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "", false)) - require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "", false)) - require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), "", false)) - require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), "", false)) - require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), "", false)) - require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), "", false)) - require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), "", false)) - require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), "", false)) - require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), "", false)) - require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), "", false)) - require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), "", false)) - require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), "", false)) - require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), "", false)) - require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), "", false)) - require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), "", false)) - require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), "", false)) - require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), "", false)) - require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), "", false)) - require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), "", false)) - require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), "", false)) - require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "", false)) -} - -func TestNamespace(t *testing.T) { - require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space", false)) - require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space", false)) -} - -func TestCleanUpUnit(t *testing.T) { - require.Equal(t, "", cleanUpUnit("")) - require.Equal(t, "a_b", cleanUpUnit("a b")) - require.Equal(t, "hello_world", cleanUpUnit("hello, world")) - require.Equal(t, "hello_you_2", cleanUpUnit("hello you 2")) - require.Equal(t, "1000", cleanUpUnit("$1000")) - require.Equal(t, "", cleanUpUnit("*+$^=)")) -} - -func TestUnitMapGetOrDefault(t *testing.T) { - require.Equal(t, "", unitMapGetOrDefault("")) - require.Equal(t, "seconds", unitMapGetOrDefault("s")) - require.Equal(t, "invalid", unitMapGetOrDefault("invalid")) -} - -func TestPerUnitMapGetOrDefault(t *testing.T) { - require.Equal(t, "", perUnitMapGetOrDefault("")) - require.Equal(t, "second", perUnitMapGetOrDefault("s")) - require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid")) -} - -func TestRemoveItem(t *testing.T) { - require.Equal(t, []string{}, removeItem([]string{}, "test")) - require.Equal(t, []string{}, removeItem([]string{}, "")) - require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d")) - require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "")) - require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c")) - require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b")) - require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a")) -} - -func TestBuildCompliantNameWithSuffixes(t *testing.T) { - require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", true, false)) - require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true, false)) - require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true, false)) - require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true, false)) - require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true, false)) - require.Equal(t, ":foo::bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true, false)) - // Gauges with unit 1 are considered ratios. - require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true, false)) - // Slashes in units are converted. - require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantName(createCounter("system.io", "foo/bar"), "", true, false)) - require.Equal(t, "metric_with_foreign_characters_total", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", true, false)) -} - -func TestBuildCompliantNameWithoutSuffixes(t *testing.T) { - require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", false, false)) - require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", false, false)) - require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", false, false)) - require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", false, false)) - require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false, false)) - require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", false, false)) - require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false, false)) - require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false, false)) - require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false, false)) - require.Equal(t, "metric_with___foreign_characters", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", false, false)) -} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go index dcd83b7f93..520d571b65 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go @@ -762,7 +762,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { Settings{ ExportCreatedMetric: true, }, - prometheustranslator.BuildCompliantName(metric, "", true, true), + prometheustranslator.BuildCompliantMetricName(metric, "", true), ) require.NoError(t, err) require.Empty(t, annots) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 6779c9ed80..1545accf2f 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -96,7 +96,12 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric continue } - promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes, settings.AllowUTF8) + var promName string + if settings.AllowUTF8 { + promName = prometheustranslator.BuildMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) + } else { + promName = prometheustranslator.BuildCompliantMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) + } c.metadata = append(c.metadata, prompb.MetricMetadata{ Type: otelMetricTypeToPromMetricType(metric), MetricFamilyName: promName, diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go index 05abc7743f..a3b4b08df4 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go @@ -46,7 +46,7 @@ func TestFromMetrics(t *testing.T) { metricSlice := scopeMetricsSlice.At(j).Metrics() for k := 0; k < metricSlice.Len(); k++ { metric := metricSlice.At(k) - promName := prometheustranslator.BuildCompliantName(metric, "", false, false) + promName := prometheustranslator.BuildCompliantMetricName(metric, "", false) expMetadata = append(expMetadata, prompb.MetricMetadata{ Type: otelMetricTypeToPromMetricType(metric), MetricFamilyName: promName, From 816a5c94b9e0d50e6e55dca10bb096acc49a9325 Mon Sep 17 00:00:00 2001 From: Paulo Dias Date: Mon, 6 Jan 2025 15:15:17 +0000 Subject: [PATCH 1197/1397] fix: fix docs typo Signed-off-by: Paulo Dias --- docs/configuration/configuration.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 4d6716bb21..2b815b9d88 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1212,7 +1212,6 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_openstack_loadbalancer_id`: the OpenStack load balancer ID. * `__meta_openstack_loadbalancer_name`: the OpenStack load balancer name. * `__meta_openstack_loadbalancer_provider`: the Octavia provider of the OpenStack load balancer. -* `__meta_openstack_loadbalancer_status`: the status of the OpenStack load balancer. * `__meta_openstack_loadbalancer_operating_status`: the operating status of the OpenStack load balancer. * `__meta_openstack_loadbalancer_provisioning_status`: the provisioning status of the OpenStack load balancer. * `__meta_openstack_loadbalancer_tags`: comma separated list of the OpenStack load balancer. From 8067f2797198d7f43006c5ed5a3d60900d355cfa Mon Sep 17 00:00:00 2001 From: Julien Duchesne Date: Mon, 6 Jan 2025 13:51:19 -0500 Subject: [PATCH 1198/1397] `RuleConcurrencyController`: Add `SplitGroupIntoBatches` method (#15681) * `RuleConcurrencyController`: Add `SplitGroupIntoBatches` method The concurrency implementation can now return a slice of concurrent rule batches This allows for additional concurrency as opposed to the current interface which is limited by the order in which the rules have been loaded Also, I removed the `concurrencyController` attribute from the group. That information is duplicated in the opts.RuleConcurrencyController` attribute, leading to some confusing behavior, especially in tests. Signed-off-by: Julien Duchesne * Address PR comments Signed-off-by: Julien Duchesne * Apply suggestions from code review Co-authored-by: gotjosh Signed-off-by: Julien Duchesne --------- Signed-off-by: Julien Duchesne Signed-off-by: Julien Duchesne Co-authored-by: gotjosh --- rules/fixtures/rules_chain.yaml | 22 ++++ .../rules_multiple_dependents_on_base.yaml | 21 ++++ rules/fixtures/rules_multiple_groups.yaml | 4 + rules/group.go | 86 ++++++------- rules/manager.go | 59 +++++++-- rules/manager_test.go | 114 ++++++++++++++++++ 6 files changed, 256 insertions(+), 50 deletions(-) create mode 100644 rules/fixtures/rules_chain.yaml create mode 100644 rules/fixtures/rules_multiple_dependents_on_base.yaml diff --git a/rules/fixtures/rules_chain.yaml b/rules/fixtures/rules_chain.yaml new file mode 100644 index 0000000000..00043b8d6f --- /dev/null +++ b/rules/fixtures/rules_chain.yaml @@ -0,0 +1,22 @@ +groups: + - name: chain + rules: + # Evaluated concurrently, no dependencies + - record: job:http_requests:rate1m + expr: sum by (job)(rate(http_requests_total[1m])) + - record: job:http_requests:rate5m + expr: sum by (job)(rate(http_requests_total[1m])) + + # Evaluated sequentially, dependents and dependencies + - record: job1:http_requests:rate1m + expr: job:http_requests:rate1m{job="job1"} + - record: job1_cluster1:http_requests:rate1m + expr: job1:http_requests:rate1m{cluster="cluster1"} + + # Evaluated concurrently, no dependents + - record: job1_cluster2:http_requests:rate1m + expr: job1:http_requests:rate1m{cluster="cluster2"} + - record: job1_cluster1_namespace1:http_requests:rate1m + expr: job1_cluster1:http_requests:rate1m{namespace="namespace1"} + - record: job1_cluster1_namespace2:http_requests:rate1m + expr: job1_cluster1:http_requests:rate1m{namespace="namespace2"} diff --git a/rules/fixtures/rules_multiple_dependents_on_base.yaml b/rules/fixtures/rules_multiple_dependents_on_base.yaml new file mode 100644 index 0000000000..40ef14de8c --- /dev/null +++ b/rules/fixtures/rules_multiple_dependents_on_base.yaml @@ -0,0 +1,21 @@ +groups: + - name: concurrent_dependents + rules: + # 3 dependents on the same base + - record: job:http_requests:rate1m + expr: sum by (job)(rate(http_requests_total[1m])) + - record: job1:http_requests:rate1m + expr: job:http_requests:rate1m{job="job1"} + - record: job2:http_requests:rate1m + expr: job:http_requests:rate1m{job="job2"} + - record: job3:http_requests:rate1m + expr: job:http_requests:rate1m{job="job3"} + # another 3 dependents on the same base + - record: job:http_requests:rate5m + expr: sum by (job)(rate(http_requests_total[5m])) + - record: job1:http_requests:rate5m + expr: job:http_requests:rate5m{job="job1"} + - record: job2:http_requests:rate5m + expr: job:http_requests:rate5m{job="job2"} + - record: job3:http_requests:rate5m + expr: job:http_requests:rate5m{job="job3"} diff --git a/rules/fixtures/rules_multiple_groups.yaml b/rules/fixtures/rules_multiple_groups.yaml index 87f31a6ca5..592219e981 100644 --- a/rules/fixtures/rules_multiple_groups.yaml +++ b/rules/fixtures/rules_multiple_groups.yaml @@ -6,6 +6,8 @@ groups: expr: sum by (job)(rate(http_requests_total[1m])) - record: job:http_requests:rate5m expr: sum by (job)(rate(http_requests_total[5m])) + - record: job:http_requests:rate10m + expr: sum by (job)(rate(http_requests_total[10m])) # dependents - record: job:http_requests:rate15m @@ -20,6 +22,8 @@ groups: expr: sum by (job)(rate(grpc_requests_total[1m])) - record: job:grpc_requests:rate5m expr: sum by (job)(rate(grpc_requests_total[5m])) + - record: job:grpc_requests:rate10m + expr: sum by (job)(rate(grpc_requests_total[10m])) # dependents - record: job:grpc_requests:rate15m diff --git a/rules/group.go b/rules/group.go index ecc96d0a12..cabb45abbb 100644 --- a/rules/group.go +++ b/rules/group.go @@ -74,9 +74,7 @@ type Group struct { // defaults to DefaultEvalIterationFunc. evalIterationFunc GroupEvalIterationFunc - // concurrencyController controls the rules evaluation concurrency. - concurrencyController RuleConcurrencyController - appOpts *storage.AppendOptions + appOpts *storage.AppendOptions } // GroupEvalIterationFunc is used to implement and extend rule group @@ -126,33 +124,27 @@ func NewGroup(o GroupOptions) *Group { evalIterationFunc = DefaultEvalIterationFunc } - concurrencyController := opts.RuleConcurrencyController - if concurrencyController == nil { - concurrencyController = sequentialRuleEvalController{} - } - if opts.Logger == nil { opts.Logger = promslog.NewNopLogger() } return &Group{ - name: o.Name, - file: o.File, - interval: o.Interval, - queryOffset: o.QueryOffset, - limit: o.Limit, - rules: o.Rules, - shouldRestore: o.ShouldRestore, - opts: opts, - seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)), - done: make(chan struct{}), - managerDone: o.done, - terminated: make(chan struct{}), - logger: opts.Logger.With("file", o.File, "group", o.Name), - metrics: metrics, - evalIterationFunc: evalIterationFunc, - concurrencyController: concurrencyController, - appOpts: &storage.AppendOptions{DiscardOutOfOrder: true}, + name: o.Name, + file: o.File, + interval: o.Interval, + queryOffset: o.QueryOffset, + limit: o.Limit, + rules: o.Rules, + shouldRestore: o.ShouldRestore, + opts: opts, + seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)), + done: make(chan struct{}), + managerDone: o.done, + terminated: make(chan struct{}), + logger: opts.Logger.With("file", o.File, "group", o.Name), + metrics: metrics, + evalIterationFunc: evalIterationFunc, + appOpts: &storage.AppendOptions{DiscardOutOfOrder: true}, } } @@ -647,25 +639,33 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { } var wg sync.WaitGroup - for i, rule := range g.rules { - select { - case <-g.done: - return - default: - } - - if ctrl := g.concurrencyController; ctrl.Allow(ctx, g, rule) { - wg.Add(1) - - go eval(i, rule, func() { - wg.Done() - ctrl.Done(ctx) - }) - } else { - eval(i, rule, nil) - } + ctrl := g.opts.RuleConcurrencyController + if ctrl == nil { + ctrl = sequentialRuleEvalController{} + } + for _, batch := range ctrl.SplitGroupIntoBatches(ctx, g) { + for _, ruleIndex := range batch { + select { + case <-g.done: + return + default: + } + + rule := g.rules[ruleIndex] + if len(batch) > 1 && ctrl.Allow(ctx, g, rule) { + wg.Add(1) + + go eval(ruleIndex, rule, func() { + wg.Done() + ctrl.Done(ctx) + }) + } else { + eval(ruleIndex, rule, nil) + } + } + // It is important that we finish processing any rules in this current batch - before we move into the next one. + wg.Wait() } - wg.Wait() g.metrics.GroupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal.Load()) g.cleanupStaleSeries(ctx, ts) diff --git a/rules/manager.go b/rules/manager.go index edc67a832b..390742ce50 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -465,10 +465,17 @@ func (c ruleDependencyController) AnalyseRules(rules []Rule) { } } +// ConcurrentRules represents a slice of indexes of rules that can be evaluated concurrently. +type ConcurrentRules []int + // RuleConcurrencyController controls concurrency for rules that are safe to be evaluated concurrently. // Its purpose is to bound the amount of concurrency in rule evaluations to avoid overwhelming the Prometheus // server with additional query load. Concurrency is controlled globally, not on a per-group basis. type RuleConcurrencyController interface { + // SplitGroupIntoBatches returns an ordered slice of of ConcurrentRules, which are batches of rules that can be evaluated concurrently. + // The rules are represented by their index from the input rule group. + SplitGroupIntoBatches(ctx context.Context, group *Group) []ConcurrentRules + // Allow determines if the given rule is allowed to be evaluated concurrently. // If Allow() returns true, then Done() must be called to release the acquired slot and corresponding cleanup is done. // It is important that both *Group and Rule are not retained and only be used for the duration of the call. @@ -490,21 +497,51 @@ func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyControlle } func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, rule Rule) bool { - // To allow a rule to be executed concurrently, we need 3 conditions: - // 1. The rule must not have any rules that depend on it. - // 2. The rule itself must not depend on any other rules. - // 3. If 1 & 2 are true, then and only then we should try to acquire the concurrency slot. - if rule.NoDependentRules() && rule.NoDependencyRules() { - return c.sema.TryAcquire(1) + return c.sema.TryAcquire(1) +} + +func (c *concurrentRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules { + // Using the rule dependency controller information (rules being identified as having no dependencies or no dependants), + // we can safely run the following concurrent groups: + // 1. Concurrently, all rules that have no dependencies + // 2. Sequentially, all rules that have both dependencies and dependants + // 3. Concurrently, all rules that have no dependants + + var noDependencies []int + var dependenciesAndDependants []int + var noDependants []int + + for i, r := range g.rules { + switch { + case r.NoDependencyRules(): + noDependencies = append(noDependencies, i) + case !r.NoDependentRules() && !r.NoDependencyRules(): + dependenciesAndDependants = append(dependenciesAndDependants, i) + case r.NoDependentRules(): + noDependants = append(noDependants, i) + } } - return false + var order []ConcurrentRules + if len(noDependencies) > 0 { + order = append(order, noDependencies) + } + for _, r := range dependenciesAndDependants { + order = append(order, []int{r}) + } + if len(noDependants) > 0 { + order = append(order, noDependants) + } + + return order } func (c *concurrentRuleEvalController) Done(_ context.Context) { c.sema.Release(1) } +var _ RuleConcurrencyController = &sequentialRuleEvalController{} + // sequentialRuleEvalController is a RuleConcurrencyController that runs every rule sequentially. type sequentialRuleEvalController struct{} @@ -512,6 +549,14 @@ func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) return false } +func (c sequentialRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules { + order := make([]ConcurrentRules, len(g.rules)) + for i := range g.rules { + order[i] = []int{i} + } + return order +} + func (c sequentialRuleEvalController) Done(_ context.Context) {} // FromMaps returns new sorted Labels from the given maps, overriding each other in order. diff --git a/rules/manager_test.go b/rules/manager_test.go index 94ee1e8b8b..df6f5fd1b4 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -1987,6 +1987,15 @@ func TestAsyncRuleEvaluation(t *testing.T) { start := time.Now() DefaultEvalIterationFunc(ctx, group, start) + // Expected evaluation order + order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group) + require.Equal(t, []ConcurrentRules{ + {0}, + {1}, + {2}, + {3}, + }, order) + // Never expect more than 1 inflight query at a time. require.EqualValues(t, 1, maxInflight.Load()) // Each rule should take at least 1 second to execute sequentially. @@ -2065,6 +2074,12 @@ func TestAsyncRuleEvaluation(t *testing.T) { start := time.Now() DefaultEvalIterationFunc(ctx, group, start) + // Expected evaluation order (isn't affected by concurrency settings) + order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group) + require.Equal(t, []ConcurrentRules{ + {0, 1, 2, 3, 4, 5}, + }, order) + // Max inflight can be 1 synchronous eval and up to MaxConcurrentEvals concurrent evals. require.EqualValues(t, opts.MaxConcurrentEvals+1, maxInflight.Load()) // Some rules should execute concurrently so should complete quicker. @@ -2104,6 +2119,12 @@ func TestAsyncRuleEvaluation(t *testing.T) { DefaultEvalIterationFunc(ctx, group, start) + // Expected evaluation order + order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group) + require.Equal(t, []ConcurrentRules{ + {0, 1, 2, 3, 4, 5}, + }, order) + // Max inflight can be up to MaxConcurrentEvals concurrent evals, since there is sufficient concurrency to run all rules at once. require.LessOrEqual(t, int64(maxInflight.Load()), opts.MaxConcurrentEvals) // Some rules should execute concurrently so should complete quicker. @@ -2153,6 +2174,99 @@ func TestAsyncRuleEvaluation(t *testing.T) { require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples)) } }) + + t.Run("asynchronous evaluation of rules that benefit from reordering", func(t *testing.T) { + t.Parallel() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + inflightQueries := atomic.Int32{} + maxInflight := atomic.Int32{} + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + ruleCount := 8 + opts := optsFactory(storage, &maxInflight, &inflightQueries, 0) + + // Configure concurrency settings. + opts.ConcurrentEvalsEnabled = true + opts.MaxConcurrentEvals = int64(ruleCount) * 2 + opts.RuleConcurrencyController = nil + ruleManager := NewManager(opts) + + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_multiple_dependents_on_base.yaml"}...) + require.Empty(t, errs) + require.Len(t, groups, 1) + var group *Group + for _, g := range groups { + group = g + } + + start := time.Now() + + // Expected evaluation order + order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group) + require.Equal(t, []ConcurrentRules{ + {0, 4}, + {1, 2, 3, 5, 6, 7}, + }, order) + + group.Eval(ctx, start) + + // Inflight queries should be equal to 6. This is the size of the second batch of rules that can be executed concurrently. + require.EqualValues(t, 6, maxInflight.Load()) + // Some rules should execute concurrently so should complete quicker. + require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds()) + // Each rule produces one vector. + require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples)) + }) + + t.Run("attempted asynchronous evaluation of chained rules", func(t *testing.T) { + t.Parallel() + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + inflightQueries := atomic.Int32{} + maxInflight := atomic.Int32{} + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + ruleCount := 7 + opts := optsFactory(storage, &maxInflight, &inflightQueries, 0) + + // Configure concurrency settings. + opts.ConcurrentEvalsEnabled = true + opts.MaxConcurrentEvals = int64(ruleCount) * 2 + opts.RuleConcurrencyController = nil + ruleManager := NewManager(opts) + + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_chain.yaml"}...) + require.Empty(t, errs) + require.Len(t, groups, 1) + var group *Group + for _, g := range groups { + group = g + } + + start := time.Now() + + // Expected evaluation order + order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group) + require.Equal(t, []ConcurrentRules{ + {0, 1}, + {2}, + {3}, + {4, 5, 6}, + }, order) + + group.Eval(ctx, start) + + require.EqualValues(t, 3, maxInflight.Load()) + // Some rules should execute concurrently so should complete quicker. + require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds()) + // Each rule produces one vector. + require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples)) + }) } func TestBoundedRuleEvalConcurrency(t *testing.T) { From 1a27ab29b8a33592e50daef4c59f6fc7b9a5b1f9 Mon Sep 17 00:00:00 2001 From: Julien Duchesne Date: Mon, 6 Jan 2025 15:48:38 -0500 Subject: [PATCH 1199/1397] Rules: Store dependencies instead of boolean (#15689) * Rules: Store dependencies instead of boolean To improve https://github.com/prometheus/prometheus/pull/15681 further, we'll need to store the dependencies and dependents of each Right now, if a rule has both (at least 1) dependents and dependencies, it is not possible to determine the order to run the rules and they must all run sequentially This PR only changes the dependents and dependencies attributes of rules, it does not implement a new topological sort algorithm Signed-off-by: Julien Duchesne * Store a slice of Rule instead Signed-off-by: Julien Duchesne * Add `BenchmarkRuleDependencyController_AnalyseRules` for future reference Signed-off-by: Julien Duchesne --------- Signed-off-by: Julien Duchesne --- rules/alerting.go | 53 ++++++++++++++++++++++++++++++++-------- rules/alerting_test.go | 20 ++++++++++----- rules/group.go | 26 +++++++++----------- rules/manager.go | 8 +++--- rules/manager_test.go | 35 +++++++++++++++++++++----- rules/origin_test.go | 14 ++++++----- rules/recording.go | 54 +++++++++++++++++++++++++++++++++-------- rules/recording_test.go | 20 ++++++++++----- rules/rule.go | 14 ++++++++--- 9 files changed, 178 insertions(+), 66 deletions(-) diff --git a/rules/alerting.go b/rules/alerting.go index e7f15baefe..77d53395e0 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -143,8 +143,9 @@ type AlertingRule struct { logger *slog.Logger - noDependentRules *atomic.Bool - noDependencyRules *atomic.Bool + dependenciesMutex sync.RWMutex + dependentRules []Rule + dependencyRules []Rule } // NewAlertingRule constructs a new AlertingRule. @@ -171,8 +172,6 @@ func NewAlertingRule( evaluationTimestamp: atomic.NewTime(time.Time{}), evaluationDuration: atomic.NewDuration(0), lastError: atomic.NewError(nil), - noDependentRules: atomic.NewBool(false), - noDependencyRules: atomic.NewBool(false), } } @@ -316,20 +315,54 @@ func (r *AlertingRule) Restored() bool { return r.restored.Load() } -func (r *AlertingRule) SetNoDependentRules(noDependentRules bool) { - r.noDependentRules.Store(noDependentRules) +func (r *AlertingRule) SetDependentRules(dependents []Rule) { + r.dependenciesMutex.Lock() + defer r.dependenciesMutex.Unlock() + + r.dependentRules = make([]Rule, len(dependents)) + copy(r.dependentRules, dependents) } func (r *AlertingRule) NoDependentRules() bool { - return r.noDependentRules.Load() + r.dependenciesMutex.RLock() + defer r.dependenciesMutex.RUnlock() + + if r.dependentRules == nil { + return false // We don't know if there are dependent rules. + } + + return len(r.dependentRules) == 0 } -func (r *AlertingRule) SetNoDependencyRules(noDependencyRules bool) { - r.noDependencyRules.Store(noDependencyRules) +func (r *AlertingRule) DependentRules() []Rule { + r.dependenciesMutex.RLock() + defer r.dependenciesMutex.RUnlock() + return r.dependentRules +} + +func (r *AlertingRule) SetDependencyRules(dependencies []Rule) { + r.dependenciesMutex.Lock() + defer r.dependenciesMutex.Unlock() + + r.dependencyRules = make([]Rule, len(dependencies)) + copy(r.dependencyRules, dependencies) } func (r *AlertingRule) NoDependencyRules() bool { - return r.noDependencyRules.Load() + r.dependenciesMutex.RLock() + defer r.dependenciesMutex.RUnlock() + + if r.dependencyRules == nil { + return false // We don't know if there are dependency rules. + } + + return len(r.dependencyRules) == 0 +} + +func (r *AlertingRule) DependencyRules() []Rule { + r.dependenciesMutex.RLock() + defer r.dependenciesMutex.RUnlock() + return r.dependencyRules } // resolvedRetention is the duration for which a resolved alert instance diff --git a/rules/alerting_test.go b/rules/alerting_test.go index f0aa339cc7..f7bdf4a955 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -998,7 +998,9 @@ func TestAlertingEvalWithOrigin(t *testing.T) { require.Equal(t, detail, NewRuleDetail(rule)) } -func TestAlertingRule_SetNoDependentRules(t *testing.T) { +func TestAlertingRule_SetDependentRules(t *testing.T) { + dependentRule := NewRecordingRule("test1", nil, labels.EmptyLabels()) + rule := NewAlertingRule( "test", &parser.NumberLiteral{Val: 1}, @@ -1012,14 +1014,18 @@ func TestAlertingRule_SetNoDependentRules(t *testing.T) { ) require.False(t, rule.NoDependentRules()) - rule.SetNoDependentRules(false) + rule.SetDependentRules([]Rule{dependentRule}) require.False(t, rule.NoDependentRules()) + require.Equal(t, []Rule{dependentRule}, rule.DependentRules()) - rule.SetNoDependentRules(true) + rule.SetDependentRules([]Rule{}) require.True(t, rule.NoDependentRules()) + require.Empty(t, rule.DependentRules()) } -func TestAlertingRule_SetNoDependencyRules(t *testing.T) { +func TestAlertingRule_SetDependencyRules(t *testing.T) { + dependencyRule := NewRecordingRule("test1", nil, labels.EmptyLabels()) + rule := NewAlertingRule( "test", &parser.NumberLiteral{Val: 1}, @@ -1033,11 +1039,13 @@ func TestAlertingRule_SetNoDependencyRules(t *testing.T) { ) require.False(t, rule.NoDependencyRules()) - rule.SetNoDependencyRules(false) + rule.SetDependencyRules([]Rule{dependencyRule}) require.False(t, rule.NoDependencyRules()) + require.Equal(t, []Rule{dependencyRule}, rule.DependencyRules()) - rule.SetNoDependencyRules(true) + rule.SetDependencyRules([]Rule{}) require.True(t, rule.NoDependencyRules()) + require.Empty(t, rule.DependencyRules()) } func TestAlertingRule_ActiveAlertsCount(t *testing.T) { diff --git a/rules/group.go b/rules/group.go index cabb45abbb..724b926d4f 100644 --- a/rules/group.go +++ b/rules/group.go @@ -1034,27 +1034,25 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { // output metric produced by another rule in its expression (i.e. as its "input"). type dependencyMap map[Rule][]Rule -// dependents returns the count of rules which use the output of the given rule as one of their inputs. -func (m dependencyMap) dependents(r Rule) int { - return len(m[r]) +// dependents returns the rules which use the output of the given rule as one of their inputs. +func (m dependencyMap) dependents(r Rule) []Rule { + return m[r] } -// dependencies returns the count of rules on which the given rule is dependent for input. -func (m dependencyMap) dependencies(r Rule) int { +// dependencies returns the rules on which the given rule is dependent for input. +func (m dependencyMap) dependencies(r Rule) []Rule { if len(m) == 0 { - return 0 + return []Rule{} } - var count int - for _, children := range m { - for _, child := range children { - if child == r { - count++ - } + var dependencies []Rule + for rule, dependents := range m { + if slices.Contains(dependents, r) { + dependencies = append(dependencies, rule) } } - return count + return dependencies } // isIndependent determines whether the given rule is not dependent on another rule for its input, nor is any other rule @@ -1064,7 +1062,7 @@ func (m dependencyMap) isIndependent(r Rule) bool { return false } - return m.dependents(r)+m.dependencies(r) == 0 + return len(m.dependents(r)) == 0 && len(m.dependencies(r)) == 0 } // buildDependencyMap builds a data-structure which contains the relationships between rules within a group. diff --git a/rules/manager.go b/rules/manager.go index 390742ce50..c4c0f8a1ef 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -444,8 +444,8 @@ func SendAlerts(s Sender, externalURL string) NotifyFunc { // RuleDependencyController controls whether a set of rules have dependencies between each other. type RuleDependencyController interface { // AnalyseRules analyses dependencies between the input rules. For each rule that it's guaranteed - // not having any dependants and/or dependency, this function should call Rule.SetNoDependentRules(true) - // and/or Rule.SetNoDependencyRules(true). + // not having any dependants and/or dependency, this function should call Rule.SetDependentRules(...) + // and/or Rule.SetDependencyRules(...). AnalyseRules(rules []Rule) } @@ -460,8 +460,8 @@ func (c ruleDependencyController) AnalyseRules(rules []Rule) { } for _, r := range rules { - r.SetNoDependentRules(depMap.dependents(r) == 0) - r.SetNoDependencyRules(depMap.dependencies(r) == 0) + r.SetDependentRules(depMap.dependents(r)) + r.SetDependencyRules(depMap.dependencies(r)) } } diff --git a/rules/manager_test.go b/rules/manager_test.go index df6f5fd1b4..defa93a68c 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -1423,8 +1423,6 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) { evaluationTimestamp: atomic.NewTime(time.Time{}), evaluationDuration: atomic.NewDuration(0), lastError: atomic.NewError(nil), - noDependentRules: atomic.NewBool(false), - noDependencyRules: atomic.NewBool(false), } group := NewGroup(GroupOptions{ @@ -1613,11 +1611,12 @@ func TestDependencyMap(t *testing.T) { depMap := buildDependencyMap(group.rules) require.Zero(t, depMap.dependencies(rule)) - require.Equal(t, 2, depMap.dependents(rule)) + require.Equal(t, []Rule{rule2, rule4}, depMap.dependents(rule)) + require.Len(t, depMap.dependents(rule), 2) require.False(t, depMap.isIndependent(rule)) require.Zero(t, depMap.dependents(rule2)) - require.Equal(t, 1, depMap.dependencies(rule2)) + require.Equal(t, []Rule{rule}, depMap.dependencies(rule2)) require.False(t, depMap.isIndependent(rule2)) require.Zero(t, depMap.dependents(rule3)) @@ -1625,7 +1624,7 @@ func TestDependencyMap(t *testing.T) { require.True(t, depMap.isIndependent(rule3)) require.Zero(t, depMap.dependents(rule4)) - require.Equal(t, 1, depMap.dependencies(rule4)) + require.Equal(t, []Rule{rule}, depMap.dependencies(rule4)) require.False(t, depMap.isIndependent(rule4)) } @@ -1958,7 +1957,8 @@ func TestDependencyMapUpdatesOnGroupUpdate(t *testing.T) { require.NotEqual(t, orig[h], depMap) // We expect there to be some dependencies since the new rule group contains a dependency. require.NotEmpty(t, depMap) - require.Equal(t, 1, depMap.dependents(rr)) + require.Len(t, depMap.dependents(rr), 1) + require.Equal(t, "HighRequestRate", depMap.dependents(rr)[0].Name()) require.Zero(t, depMap.dependencies(rr)) } } @@ -2508,3 +2508,26 @@ func TestRuleDependencyController_AnalyseRules(t *testing.T) { }) } } + +func BenchmarkRuleDependencyController_AnalyseRules(b *testing.B) { + storage := teststorage.New(b) + b.Cleanup(func() { storage.Close() }) + + ruleManager := NewManager(&ManagerOptions{ + Context: context.Background(), + Logger: promslog.NewNopLogger(), + Appendable: storage, + QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil }, + }) + + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, "fixtures/rules_multiple.yaml") + require.Empty(b, errs) + require.Len(b, groups, 1) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, g := range groups { + ruleManager.opts.RuleDependencyController.AnalyseRules(g.rules) + } + } +} diff --git a/rules/origin_test.go b/rules/origin_test.go index 0bf428f3c1..b38f5d99b2 100644 --- a/rules/origin_test.go +++ b/rules/origin_test.go @@ -45,10 +45,12 @@ func (u unknownRule) SetEvaluationDuration(time.Duration) {} func (u unknownRule) GetEvaluationDuration() time.Duration { return 0 } func (u unknownRule) SetEvaluationTimestamp(time.Time) {} func (u unknownRule) GetEvaluationTimestamp() time.Time { return time.Time{} } -func (u unknownRule) SetNoDependentRules(bool) {} +func (u unknownRule) SetDependentRules([]Rule) {} func (u unknownRule) NoDependentRules() bool { return false } -func (u unknownRule) SetNoDependencyRules(bool) {} +func (u unknownRule) DependentRules() []Rule { return nil } +func (u unknownRule) SetDependencyRules([]Rule) {} func (u unknownRule) NoDependencyRules() bool { return false } +func (u unknownRule) DependencyRules() []Rule { return nil } func TestNewRuleDetailPanics(t *testing.T) { require.PanicsWithValue(t, `unknown rule type "rules.unknownRule"`, func() { @@ -76,12 +78,12 @@ func TestNewRuleDetail(t *testing.T) { require.False(t, detail.NoDependentRules) require.False(t, detail.NoDependencyRules) - rule.SetNoDependentRules(true) + rule.SetDependentRules([]Rule{}) detail = NewRuleDetail(rule) require.True(t, detail.NoDependentRules) require.False(t, detail.NoDependencyRules) - rule.SetNoDependencyRules(true) + rule.SetDependencyRules([]Rule{}) detail = NewRuleDetail(rule) require.True(t, detail.NoDependentRules) require.True(t, detail.NoDependencyRules) @@ -104,12 +106,12 @@ func TestNewRuleDetail(t *testing.T) { require.False(t, detail.NoDependentRules) require.False(t, detail.NoDependencyRules) - rule.SetNoDependentRules(true) + rule.SetDependentRules([]Rule{}) detail = NewRuleDetail(rule) require.True(t, detail.NoDependentRules) require.False(t, detail.NoDependencyRules) - rule.SetNoDependencyRules(true) + rule.SetDependencyRules([]Rule{}) detail = NewRuleDetail(rule) require.True(t, detail.NoDependentRules) require.True(t, detail.NoDependencyRules) diff --git a/rules/recording.go b/rules/recording.go index 52c2a875ab..3b6db210af 100644 --- a/rules/recording.go +++ b/rules/recording.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "net/url" + "sync" "time" "go.uber.org/atomic" @@ -43,8 +44,9 @@ type RecordingRule struct { // Duration of how long it took to evaluate the recording rule. evaluationDuration *atomic.Duration - noDependentRules *atomic.Bool - noDependencyRules *atomic.Bool + dependenciesMutex sync.RWMutex + dependentRules []Rule + dependencyRules []Rule } // NewRecordingRule returns a new recording rule. @@ -57,8 +59,6 @@ func NewRecordingRule(name string, vector parser.Expr, lset labels.Labels) *Reco evaluationTimestamp: atomic.NewTime(time.Time{}), evaluationDuration: atomic.NewDuration(0), lastError: atomic.NewError(nil), - noDependentRules: atomic.NewBool(false), - noDependencyRules: atomic.NewBool(false), } } @@ -172,18 +172,52 @@ func (rule *RecordingRule) GetEvaluationTimestamp() time.Time { return rule.evaluationTimestamp.Load() } -func (rule *RecordingRule) SetNoDependentRules(noDependentRules bool) { - rule.noDependentRules.Store(noDependentRules) +func (rule *RecordingRule) SetDependentRules(dependents []Rule) { + rule.dependenciesMutex.Lock() + defer rule.dependenciesMutex.Unlock() + + rule.dependentRules = make([]Rule, len(dependents)) + copy(rule.dependentRules, dependents) } func (rule *RecordingRule) NoDependentRules() bool { - return rule.noDependentRules.Load() + rule.dependenciesMutex.RLock() + defer rule.dependenciesMutex.RUnlock() + + if rule.dependentRules == nil { + return false // We don't know if there are dependent rules. + } + + return len(rule.dependentRules) == 0 } -func (rule *RecordingRule) SetNoDependencyRules(noDependencyRules bool) { - rule.noDependencyRules.Store(noDependencyRules) +func (rule *RecordingRule) DependentRules() []Rule { + rule.dependenciesMutex.RLock() + defer rule.dependenciesMutex.RUnlock() + return rule.dependentRules +} + +func (rule *RecordingRule) SetDependencyRules(dependencies []Rule) { + rule.dependenciesMutex.Lock() + defer rule.dependenciesMutex.Unlock() + + rule.dependencyRules = make([]Rule, len(dependencies)) + copy(rule.dependencyRules, dependencies) } func (rule *RecordingRule) NoDependencyRules() bool { - return rule.noDependencyRules.Load() + rule.dependenciesMutex.RLock() + defer rule.dependenciesMutex.RUnlock() + + if rule.dependencyRules == nil { + return false // We don't know if there are dependency rules. + } + + return len(rule.dependencyRules) == 0 +} + +func (rule *RecordingRule) DependencyRules() []Rule { + rule.dependenciesMutex.RLock() + defer rule.dependenciesMutex.RUnlock() + return rule.dependencyRules } diff --git a/rules/recording_test.go b/rules/recording_test.go index 72c0764f9b..3fbf11c435 100644 --- a/rules/recording_test.go +++ b/rules/recording_test.go @@ -255,24 +255,32 @@ func TestRecordingEvalWithOrigin(t *testing.T) { require.Equal(t, detail, NewRuleDetail(rule)) } -func TestRecordingRule_SetNoDependentRules(t *testing.T) { +func TestRecordingRule_SetDependentRules(t *testing.T) { + dependentRule := NewRecordingRule("test1", nil, labels.EmptyLabels()) + rule := NewRecordingRule("1", &parser.NumberLiteral{Val: 1}, labels.EmptyLabels()) require.False(t, rule.NoDependentRules()) - rule.SetNoDependentRules(false) + rule.SetDependentRules([]Rule{dependentRule}) require.False(t, rule.NoDependentRules()) + require.Equal(t, []Rule{dependentRule}, rule.DependentRules()) - rule.SetNoDependentRules(true) + rule.SetDependentRules([]Rule{}) require.True(t, rule.NoDependentRules()) + require.Empty(t, rule.DependentRules()) } -func TestRecordingRule_SetNoDependencyRules(t *testing.T) { +func TestRecordingRule_SetDependencyRules(t *testing.T) { + dependencyRule := NewRecordingRule("test1", nil, labels.EmptyLabels()) + rule := NewRecordingRule("1", &parser.NumberLiteral{Val: 1}, labels.EmptyLabels()) require.False(t, rule.NoDependencyRules()) - rule.SetNoDependencyRules(false) + rule.SetDependencyRules([]Rule{dependencyRule}) require.False(t, rule.NoDependencyRules()) + require.Equal(t, []Rule{dependencyRule}, rule.DependencyRules()) - rule.SetNoDependencyRules(true) + rule.SetDependencyRules([]Rule{}) require.True(t, rule.NoDependencyRules()) + require.Empty(t, rule.DependencyRules()) } diff --git a/rules/rule.go b/rules/rule.go index 687c03d000..33f1755ac5 100644 --- a/rules/rule.go +++ b/rules/rule.go @@ -62,19 +62,25 @@ type Rule interface { // NOTE: Used dynamically by rules.html template. GetEvaluationTimestamp() time.Time - // SetNoDependentRules sets whether there's no other rule in the rule group that depends on this rule. - SetNoDependentRules(bool) + // SetDependentRules sets rules which depend on the output of this rule. + SetDependentRules(rules []Rule) // NoDependentRules returns true if it's guaranteed that in the rule group there's no other rule // which depends on this one. In case this function returns false there's no such guarantee, which // means there may or may not be other rules depending on this one. NoDependentRules() bool - // SetNoDependencyRules sets whether this rule doesn't depend on the output of any rule in the rule group. - SetNoDependencyRules(bool) + // DependentRules returns the rules which depend on the output of this rule. + DependentRules() []Rule + + // SetDependencyRules sets rules on which this rule depends. + SetDependencyRules(rules []Rule) // NoDependencyRules returns true if it's guaranteed that this rule doesn't depend on the output of // any other rule in the group. In case this function returns false there's no such guarantee, which // means the rule may or may not depend on other rules. NoDependencyRules() bool + + // DependencyRules returns the rules on which this rule depends. + DependencyRules() []Rule } From 73a3438c1b6da19adcb099b62ae5376dee95b352 Mon Sep 17 00:00:00 2001 From: sujal shah Date: Sun, 5 Jan 2025 23:45:47 +0530 Subject: [PATCH 1200/1397] api: Add two new fields Node and ServerTime. This commit introduced two field in `/status` endpoint: - The node currently serving the request. - The current server time for debugging time drift issues. fixes #15394. Signed-off-by: sujal shah --- docs/querying/api.md | 2 ++ web/api/v1/api.go | 2 ++ web/ui/mantine-ui/src/pages/StatusPage.tsx | 6 ++++++ web/web.go | 7 +++++++ 4 files changed, 17 insertions(+) diff --git a/docs/querying/api.md b/docs/querying/api.md index 87de463288..f1e7129303 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -1158,6 +1158,8 @@ $ curl http://localhost:9090/api/v1/status/runtimeinfo "data": { "startTime": "2019-11-02T17:23:59.301361365+01:00", "CWD": "/", + "hostname" : "DESKTOP-717H17Q", + "serverTime": "2025-01-05T18:27:33Z", "reloadConfigSuccess": true, "lastConfigTime": "2019-11-02T17:23:59+01:00", "timeSeriesCount": 873, diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 392dfc6aab..49112c7888 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -144,6 +144,8 @@ type PrometheusVersion struct { type RuntimeInfo struct { StartTime time.Time `json:"startTime"` CWD string `json:"CWD"` + Hostname string `json:"hostname"` + ServerTime time.Time `json:"serverTime"` ReloadConfigSuccess bool `json:"reloadConfigSuccess"` LastConfigTime time.Time `json:"lastConfigTime"` CorruptionCount int64 `json:"corruptionCount"` diff --git a/web/ui/mantine-ui/src/pages/StatusPage.tsx b/web/ui/mantine-ui/src/pages/StatusPage.tsx index 71dc476a2d..c968f1e866 100644 --- a/web/ui/mantine-ui/src/pages/StatusPage.tsx +++ b/web/ui/mantine-ui/src/pages/StatusPage.tsx @@ -29,6 +29,12 @@ export default function StatusPage() { formatTimestamp(new Date(v as string).valueOf() / 1000, useLocalTime), }, CWD: { title: "Working directory" }, + hostname: { title: "Hostname" }, + serverTime: { + title: "Server Time", + formatValue: (v: string | boolean) => + formatTimestamp(new Date(v as string).valueOf() / 1000, useLocalTime), + }, reloadConfigSuccess: { title: "Configuration reload", formatValue: (v: string | boolean) => (v ? "Successful" : "Unsuccessful"), diff --git a/web/web.go b/web/web.go index 08c683bae8..9ce66b7ff5 100644 --- a/web/web.go +++ b/web/web.go @@ -804,6 +804,13 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) { GODEBUG: os.Getenv("GODEBUG"), } + hostname, err := os.Hostname() + if err != nil { + return status, fmt.Errorf("Error getting hostname: %w", err) + } + status.Hostname = hostname + status.ServerTime = time.Now().UTC() + if h.options.TSDBRetentionDuration != 0 { status.StorageRetention = h.options.TSDBRetentionDuration.String() } From df55e536b801b8512ce425501791bf3b6727a4a5 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 7 Jan 2025 17:51:57 +0100 Subject: [PATCH 1201/1397] docs: fix spelling Signed-off-by: beorn7 --- docs/http_sd.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/http_sd.md b/docs/http_sd.md index 884deb9f3c..aadc488738 100644 --- a/docs/http_sd.md +++ b/docs/http_sd.md @@ -8,7 +8,7 @@ sort_rank: 7 Prometheus provides a generic [HTTP Service Discovery](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#http_sd_config), that enables it to discover targets over an HTTP endpoint. -The HTTP Service Discovery is complimentary to the supported service +The HTTP Service Discovery is complementary to the supported service discovery mechanisms, and is an alternative to [File-based Service Discovery](https://prometheus.io/docs/guides/file-sd/#use-file-based-service-discovery-to-discover-scrape-targets). ## Comparison between File-Based SD and HTTP SD From 919a5b657e1d6a23d6ba6c853ee63da5dbc352d2 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Tue, 7 Jan 2025 08:58:26 -0800 Subject: [PATCH 1202/1397] Expose ListPostings Length via Len() method (#15678) tsdb: expose remaining ListPostings Length Signed-off-by: Ben Ye --------- Signed-off-by: Ben Ye --- tsdb/index/postings.go | 5 +++++ tsdb/index/postings_test.go | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 03e3f7a239..e3ba5d64b4 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -863,6 +863,11 @@ func (it *ListPostings) Err() error { return nil } +// Len returns the remaining number of postings in the list. +func (it *ListPostings) Len() int { + return len(it.list) +} + // bigEndianPostings implements the Postings interface over a byte stream of // big endian numbers. type bigEndianPostings struct { diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index cf5ab6c0f8..feaba90e52 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -1244,63 +1244,78 @@ func TestPostingsWithIndexHeap(t *testing.T) { func TestListPostings(t *testing.T) { t.Run("empty list", func(t *testing.T) { p := NewListPostings(nil) + require.Equal(t, 0, p.(*ListPostings).Len()) require.False(t, p.Next()) require.False(t, p.Seek(10)) require.False(t, p.Next()) require.NoError(t, p.Err()) + require.Equal(t, 0, p.(*ListPostings).Len()) }) t.Run("one posting", func(t *testing.T) { t.Run("next", func(t *testing.T) { p := NewListPostings([]storage.SeriesRef{10}) + require.Equal(t, 1, p.(*ListPostings).Len()) require.True(t, p.Next()) require.Equal(t, storage.SeriesRef(10), p.At()) require.False(t, p.Next()) require.NoError(t, p.Err()) + require.Equal(t, 0, p.(*ListPostings).Len()) }) t.Run("seek less", func(t *testing.T) { p := NewListPostings([]storage.SeriesRef{10}) + require.Equal(t, 1, p.(*ListPostings).Len()) require.True(t, p.Seek(5)) require.Equal(t, storage.SeriesRef(10), p.At()) require.True(t, p.Seek(5)) require.Equal(t, storage.SeriesRef(10), p.At()) require.False(t, p.Next()) require.NoError(t, p.Err()) + require.Equal(t, 0, p.(*ListPostings).Len()) }) t.Run("seek equal", func(t *testing.T) { p := NewListPostings([]storage.SeriesRef{10}) + require.Equal(t, 1, p.(*ListPostings).Len()) require.True(t, p.Seek(10)) require.Equal(t, storage.SeriesRef(10), p.At()) require.False(t, p.Next()) require.NoError(t, p.Err()) + require.Equal(t, 0, p.(*ListPostings).Len()) }) t.Run("seek more", func(t *testing.T) { p := NewListPostings([]storage.SeriesRef{10}) + require.Equal(t, 1, p.(*ListPostings).Len()) require.False(t, p.Seek(15)) require.False(t, p.Next()) require.NoError(t, p.Err()) + require.Equal(t, 0, p.(*ListPostings).Len()) }) t.Run("seek after next", func(t *testing.T) { p := NewListPostings([]storage.SeriesRef{10}) + require.Equal(t, 1, p.(*ListPostings).Len()) require.True(t, p.Next()) require.False(t, p.Seek(15)) require.False(t, p.Next()) require.NoError(t, p.Err()) + require.Equal(t, 0, p.(*ListPostings).Len()) }) }) t.Run("multiple postings", func(t *testing.T) { t.Run("next", func(t *testing.T) { p := NewListPostings([]storage.SeriesRef{10, 20}) + require.Equal(t, 2, p.(*ListPostings).Len()) require.True(t, p.Next()) require.Equal(t, storage.SeriesRef(10), p.At()) require.True(t, p.Next()) require.Equal(t, storage.SeriesRef(20), p.At()) require.False(t, p.Next()) require.NoError(t, p.Err()) + require.Equal(t, 0, p.(*ListPostings).Len()) }) t.Run("seek", func(t *testing.T) { p := NewListPostings([]storage.SeriesRef{10, 20}) + require.Equal(t, 2, p.(*ListPostings).Len()) require.True(t, p.Seek(5)) require.Equal(t, storage.SeriesRef(10), p.At()) require.True(t, p.Seek(5)) @@ -1315,23 +1330,30 @@ func TestListPostings(t *testing.T) { require.Equal(t, storage.SeriesRef(20), p.At()) require.False(t, p.Next()) require.NoError(t, p.Err()) + require.Equal(t, 0, p.(*ListPostings).Len()) }) t.Run("seek lest than last", func(t *testing.T) { p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50}) + require.Equal(t, 5, p.(*ListPostings).Len()) require.True(t, p.Seek(45)) require.Equal(t, storage.SeriesRef(50), p.At()) require.False(t, p.Next()) + require.Equal(t, 0, p.(*ListPostings).Len()) }) t.Run("seek exactly last", func(t *testing.T) { p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50}) + require.Equal(t, 5, p.(*ListPostings).Len()) require.True(t, p.Seek(50)) require.Equal(t, storage.SeriesRef(50), p.At()) require.False(t, p.Next()) + require.Equal(t, 0, p.(*ListPostings).Len()) }) t.Run("seek more than last", func(t *testing.T) { p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50}) + require.Equal(t, 5, p.(*ListPostings).Len()) require.False(t, p.Seek(60)) require.False(t, p.Next()) + require.Equal(t, 0, p.(*ListPostings).Len()) }) }) From 7687661453fbf9ba3e9cfeb1f16e5c4ac66b8fae Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 8 Jan 2025 12:55:27 +0100 Subject: [PATCH 1203/1397] promqltest: make eval_ordered ignore annotations Besides making eval_ordered ignore annotations, this does the following: - Adds a test to verify that eval_ordered indeed ignores an info annotations now, while eval complains about it, eval_info recognizes it and, eval_warn flags the missing of the warn annotation. - Refactors the annotation check into its own method. - Moves closing of the query to the appropriate place where it wasn't so far. Signed-off-by: beorn7 --- promql/promqltest/test.go | 47 ++++++++++++++++++---------------- promql/promqltest/test_test.go | 38 +++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 22 deletions(-) diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index efa2136f10..518164827a 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -39,6 +39,7 @@ import ( "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/almost" + "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/convertnhcb" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" @@ -692,6 +693,24 @@ func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.Sequenc ev.expected[h] = entry{pos: pos, vals: vals} } +// checkAnnotations asserts if the annotations match the expectations. +func (ev *evalCmd) checkAnnotations(expr string, annos annotations.Annotations) error { + countWarnings, countInfo := annos.CountWarningsAndInfo() + switch { + case ev.ordered: + // Ignore annotations if testing for order. + case !ev.warn && countWarnings > 0: + return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", expr, ev.line, annos.AsErrors()) + case ev.warn && countWarnings == 0: + return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", expr, ev.line) + case !ev.info && countInfo > 0: + return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", expr, ev.line, annos.AsErrors()) + case ev.info && countInfo == 0: + return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", expr, ev.line) + } + return nil +} + // compareResult compares the result value with the defined expectation. func (ev *evalCmd) compareResult(result parser.Value) error { switch val := result.(type) { @@ -1131,6 +1150,7 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error { if err != nil { return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err) } + defer q.Close() res := q.Exec(t.context) if res.Err != nil { if cmd.fail { @@ -1142,18 +1162,9 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error { if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line) } - countWarnings, countInfo := res.Warnings.CountWarningsAndInfo() - switch { - case !cmd.warn && countWarnings > 0: - return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings) - case cmd.warn && countWarnings == 0: - return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line) - case !cmd.info && countInfo > 0: - return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings) - case cmd.info && countInfo == 0: - return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", cmd.expr, cmd.line) + if err := cmd.checkAnnotations(cmd.expr, res.Warnings); err != nil { + return err } - defer q.Close() if err := cmd.compareResult(res.Value); err != nil { return fmt.Errorf("error in %s %s (line %d): %w", cmd, cmd.expr, cmd.line, err) @@ -1196,16 +1207,8 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) } - countWarnings, countInfo := res.Warnings.CountWarningsAndInfo() - switch { - case !cmd.warn && countWarnings > 0: - return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) - case cmd.warn && countWarnings == 0: - return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) - case !cmd.info && countInfo > 0: - return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) - case cmd.info && countInfo == 0: - return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", iq.expr, cmd.line) + if err := cmd.checkAnnotations(iq.expr, res.Warnings); err != nil { + return err } err = cmd.compareResult(res.Value) if err != nil { @@ -1218,11 +1221,11 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq if err != nil { return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err) } + defer q.Close() rangeRes := q.Exec(t.context) if rangeRes.Err != nil { return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err) } - defer q.Close() if cmd.ordered { // Range queries are always sorted by labels, so skip this test case that expects results in a particular order. return nil diff --git a/promql/promqltest/test_test.go b/promql/promqltest/test_test.go index 327dcd78fe..96499e869d 100644 --- a/promql/promqltest/test_test.go +++ b/promql/promqltest/test_test.go @@ -353,6 +353,44 @@ eval_ordered instant at 50m sort(http_requests) `, expectedError: `error in eval sort(http_requests) (line 10): unexpected metric {__name__="http_requests", group="canary", instance="1", job="api-server"} in result, has value 400`, }, + "instant query with results expected to match provided order, result is in expected order and info annotation is ignored": { + input: testData + ` +eval_ordered instant at 50m sort(rate(http_requests[10m])) + {group="production", instance="0", job="api-server"} 0.03333333333333333 + {group="production", instance="1", job="api-server"} 0.06666666666666667 + {group="canary", instance="0", job="api-server"} 0.1 + {group="canary", instance="1", job="api-server"} 0.13333333333333333 +`, + }, + "instant query with expected info annotation": { + input: testData + ` +eval_info instant at 50m sort(rate(http_requests[10m])) + {group="production", instance="0", job="api-server"} 0.03333333333333333 + {group="production", instance="1", job="api-server"} 0.06666666666666667 + {group="canary", instance="0", job="api-server"} 0.1 + {group="canary", instance="1", job="api-server"} 0.13333333333333333 +`, + }, + "instant query with unexpected info annotation": { + input: testData + ` +eval instant at 50m sort(rate(http_requests[10m])) + {group="production", instance="0", job="api-server"} 0.03333333333333333 + {group="production", instance="1", job="api-server"} 0.06666666666666667 + {group="canary", instance="0", job="api-server"} 0.1 + {group="canary", instance="1", job="api-server"} 0.13333333333333333 +`, + expectedError: `unexpected info annotations evaluating query "sort(rate(http_requests[10m]))" (line 10): [PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "http_requests"]`, + }, + "instant query with unexpectedly missing warn annotation": { + input: testData + ` +eval_warn instant at 50m sort(rate(http_requests[10m])) + {group="production", instance="0", job="api-server"} 0.03333333333333333 + {group="production", instance="1", job="api-server"} 0.06666666666666667 + {group="canary", instance="0", job="api-server"} 0.1 + {group="canary", instance="1", job="api-server"} 0.13333333333333333 +`, + expectedError: `expected warnings evaluating query "sort(rate(http_requests[10m]))" (line 10) but got none`, + }, "instant query with invalid timestamp": { input: `eval instant at abc123 vector(0)`, expectedError: `error in eval vector(0) (line 1): invalid timestamp definition "abc123": not a valid duration string: "abc123"`, From d9a80a91e3c7d8f98e72924b55bc6da3c4311b51 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 8 Jan 2025 13:57:13 +0100 Subject: [PATCH 1204/1397] docs: Document eval_warn and eval_info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This also improves the documentation in the following ways: - Clarifies that `eval` requires no annotations. - Clarifies that `eval_ordered` ignores annotations. - Clarifies that `eval_ordered` does not work with matrix returns (which could very well be created by instant queries). - Clarifies that there are more `eval` commands than just `eval`. - Improves wording for `eval_ordered`. - Replaces `...` by the typographical correct `…`. - Fixes a numerical error in an example. Signed-off-by: beorn7 --- promql/promqltest/README.md | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/promql/promqltest/README.md b/promql/promqltest/README.md index af34354241..25c2653ab3 100644 --- a/promql/promqltest/README.md +++ b/promql/promqltest/README.md @@ -22,7 +22,7 @@ Each test file contains a series of commands. There are three kinds of commands: * `load` * `clear` -* `eval` +* `eval` (including the variants `eval_fail`, `eval_warn`, `eval_info`, and `eval_ordered`) Each command is executed in the order given in the file. @@ -50,12 +50,12 @@ load 1m my_metric{env="prod"} 5 2+3x2 _ stale {{schema:1 sum:3 count:22 buckets:[5 10 7]}} ``` -...will create a single series with labels `my_metric{env="prod"}`, with the following points: +… will create a single series with labels `my_metric{env="prod"}`, with the following points: * t=0: value is 5 * t=1m: value is 2 * t=2m: value is 5 -* t=3m: value is 7 +* t=3m: value is 8 * t=4m: no point * t=5m: stale marker * t=6m: native histogram with schema 1, sum -3, count 22 and bucket counts 5, 10 and 7 @@ -74,6 +74,7 @@ When loading a batch of classic histogram float series, you can optionally appen ## `eval` command `eval` runs a query against the test environment and asserts that the result is as expected. +It requires the query to succeed without any (info or warn) annotations. Both instant and range queries are supported. @@ -110,11 +111,18 @@ eval range from 0 to 3m step 1m sum by (env) (my_metric) {env="test"} 10 20 30 45 ``` -Instant queries also support asserting that the series are returned in exactly the order specified: use `eval_ordered instant ...` instead of `eval instant ...`. -This is not supported for range queries. +To assert that a query succeeds with an info or warn annotation, use the +`eval_info` or `eval_warn` commands, respectively. -It is also possible to test that queries fail: use `eval_fail instant ...` or `eval_fail range ...`. -`eval_fail` optionally takes an expected error message string or regexp to assert that the error message is as expected. +Instant queries also support asserting that the series are returned in exactly +the order specified: use `eval_ordered instant ...` instead of `eval instant +...`. `eval_ordered` ignores any annotations. The assertion always fails for +matrix results. + +To assert that a query fails, use the `eval_fail` command. `eval_fail` does not +expect any result lines. Instead, it optionally accepts an expected error +message string or regular expression to assert that the error message is as +expected. For example: From a768a3b95e65efd0338cb2db598191ecd117f362 Mon Sep 17 00:00:00 2001 From: Julien Duchesne Date: Wed, 8 Jan 2025 11:32:48 -0500 Subject: [PATCH 1205/1397] Rule Concurrency: Test safe abort of rule evaluations (#15797) This test was added in the Grafana fork a while ago: https://github.com/grafana/mimir-prometheus/pull/714 and has been helpful to make sure we can safely terminate rule evaluations early The new rule evaluation logic (done here: https://github.com/prometheus/prometheus/pull/15681) does not have the bug, but the test was useful to verify that Signed-off-by: Julien Duchesne --- rules/manager_test.go | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/rules/manager_test.go b/rules/manager_test.go index defa93a68c..5c3fcc96a8 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -2326,6 +2326,41 @@ func TestUpdateWhenStopped(t *testing.T) { require.NoError(t, err) } +func TestGroup_Eval_RaceConditionOnStoppingGroupEvaluationWhileRulesAreEvaluatedConcurrently(t *testing.T) { + storage := teststorage.New(t) + t.Cleanup(func() { storage.Close() }) + + var ( + inflightQueries atomic.Int32 + maxInflight atomic.Int32 + maxConcurrency int64 = 10 + ) + + files := []string{"fixtures/rules_multiple_groups.yaml"} + files2 := []string{"fixtures/rules.yaml"} + + ruleManager := NewManager(optsFactory(storage, &maxInflight, &inflightQueries, maxConcurrency)) + go func() { + ruleManager.Run() + }() + <-ruleManager.block + + // Update the group a decent number of times to simulate start and stopping in the middle of an evaluation. + for i := 0; i < 10; i++ { + err := ruleManager.Update(time.Second, files, labels.EmptyLabels(), "", nil) + require.NoError(t, err) + + // Wait half of the query execution duration and then change the rule groups loaded by the manager + // so that the previous rule group will be interrupted while the query is executing. + time.Sleep(artificialDelay / 2) + + err = ruleManager.Update(time.Second, files2, labels.EmptyLabels(), "", nil) + require.NoError(t, err) + } + + ruleManager.Stop() +} + const artificialDelay = 250 * time.Millisecond func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.Int32, maxConcurrent int64) *ManagerOptions { From 9d6f88cb7300eb8c006942f8ce2560eca4e553c7 Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Thu, 9 Jan 2025 09:29:57 +0000 Subject: [PATCH 1206/1397] Add additional tests for operators over incompatible nhcb (#15787) * Add additional tests for operators over incompatible nhcb Signed-off-by: Fiona Liao --- .../testdata/native_histograms.test | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 6be298cf7d..414619d5cd 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1128,6 +1128,39 @@ eval_warn range from 0 to 12m step 6m sum(metric) eval_warn range from 0 to 12m step 6m avg(metric) {} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _ +# Test incompatible schemas with additional aggregation operators +eval range from 0 to 12m step 6m count(metric) + {} 2 2 3 + +eval range from 0 to 12m step 6m group(metric) + {} 1 1 1 + +eval range from 0 to 12m step 6m count(limitk(1, metric)) + {} 1 1 1 + +eval range from 0 to 12m step 6m limitk(3, metric) + metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} + metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +eval range from 0 to 12m step 6m limit_ratio(1, metric) + metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} + metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +# Test incompatible schemas with and/or +eval range from 0 to 12m step 6m metric{series="1"} and ignoring(series) metric{series="2"} + metric{series="1"} _ _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +eval range from 0 to 12m step 6m metric{series="1"} or ignoring(series) metric{series="2"} + metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ _ + +# Test incompatible schemas with arithmetic binary operators +eval_warn range from 0 to 12m step 6m metric{series="2"} + ignoring (series) metric{series="3"} + +eval_warn range from 0 to 12m step 6m metric{series="2"} - ignoring (series) metric{series="3"} + clear load 1m From b3e30d52cecd2cb6b34c9ca74633fa6c13da0d24 Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Thu, 9 Jan 2025 21:08:42 +0530 Subject: [PATCH 1207/1397] [BUGFIX] PromQL: Fix `` functions with histograms (#15711) fix aggr_over_time with histograms Signed-off-by: Neeraj Gartia --------- Signed-off-by: Neeraj Gartia --- promql/functions.go | 75 +++++++++++++--------- promql/promqltest/testdata/functions.test | 78 +++++++++++++++++++---- 2 files changed, 111 insertions(+), 42 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index 5f31a3db18..2d809571d4 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -691,9 +691,15 @@ func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod // === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { values := make(vectorByValueHeap, 0, len(s.Floats)) for _, f := range s.Floats { @@ -705,18 +711,20 @@ func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode values = append(values, Sample{F: math.Abs(f.F - median)}) } return quantile(0.5, values) - }), nil + }), annos } // === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. max_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { maxVal := s.Floats[0].F for _, f := range s.Floats { @@ -725,18 +733,20 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } } return maxVal - }), nil + }), annos } // === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. min_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { minVal := s.Floats[0].F for _, f := range s.Floats { @@ -745,7 +755,7 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } } return minVal - }), nil + }), annos } // === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === @@ -794,10 +804,6 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva q := vals[0].(Vector)[0].F el := vals[1].(Matrix)[0] if len(el.Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. quantile_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. return enh.Out, nil } @@ -805,7 +811,10 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva if math.IsNaN(q) || q < 0 || q > 1 { annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) } - + if len(el.Histograms) > 0 { + metricName := el.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo(metricName, args[0].PositionRange())) + } values := make(vectorByValueHeap, 0, len(el.Floats)) for _, f := range el.Floats { values = append(values, Sample{F: f.F}) @@ -815,13 +824,15 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva // === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. stddev_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 @@ -833,18 +844,20 @@ func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return math.Sqrt((aux + cAux) / count) - }), nil + }), annos } // === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. stdvar_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 @@ -856,7 +869,7 @@ func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return (aux + cAux) / count - }), nil + }), annos } // === absent(Vector parser.ValueTypeVector) (Vector, Annotations) === diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 6d2ade3abc..7fc636450f 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -929,35 +929,58 @@ eval instant at 1m avg_over_time(metric[2m]) # Tests for stddev_over_time and stdvar_over_time. clear load 10s - metric 0 8 8 2 3 + metric 0 8 8 2 3 + metric_histogram{type="only_histogram"} {{schema:1 sum:2 count:3}}x5 + metric_histogram{type="mix"} 1 1 1 {{schema:1 sum:2 count:3}} {{schema:1 sum:2 count:3}} eval instant at 1m stdvar_over_time(metric[2m]) - {} 10.56 + {} 10.56 eval instant at 1m stddev_over_time(metric[2m]) - {} 3.249615 + {} 3.249615 eval instant at 1m stddev_over_time((metric[2m])) - {} 3.249615 + {} 3.249615 + +# Tests for stddev_over_time and stdvar_over_time with histograms. +eval instant at 1m stddev_over_time(metric_histogram{type="only_histogram"}[2m]) + #empty + +eval_info instant at 1m stddev_over_time(metric_histogram{type="mix"}[2m]) + {type="mix"} 0 + +eval instant at 1m stdvar_over_time(metric_histogram{type="only_histogram"}[2m]) + #empty + +eval_info instant at 1m stdvar_over_time(metric_histogram{type="mix"}[2m]) + {type="mix"} 0 # Tests for stddev_over_time and stdvar_over_time #4927. clear load 10s - metric 1.5990505637277868 1.5990505637277868 1.5990505637277868 + metric 1.5990505637277868 1.5990505637277868 1.5990505637277868 eval instant at 1m stdvar_over_time(metric[1m]) - {} 0 + {} 0 eval instant at 1m stddev_over_time(metric[1m]) - {} 0 + {} 0 # Tests for mad_over_time. clear load 10s - metric 4 6 2 1 999 1 2 + metric 4 6 2 1 999 1 2 + metric_histogram{type="only_histogram"} {{schema:1 sum:2 count:3}}x5 + metric_histogram{type="mix"} 1 1 1 {{schema:1 sum:2 count:3}} {{schema:1 sum:2 count:3}} eval instant at 70s mad_over_time(metric[70s]) - {} 1 + {} 1 + +eval instant at 70s mad_over_time(metric_histogram{type="only_histogram"}[70s]) + #empty + +eval_info instant at 70s mad_over_time(metric_histogram{type="mix"}[70s]) + {type="mix"} 0 # Tests for quantile_over_time clear @@ -966,6 +989,8 @@ load 10s data{test="two samples"} 0 1 data{test="three samples"} 0 1 2 data{test="uneven samples"} 0 1 4 + data_histogram{test="only histogram samples"} {{schema:0 sum:1 count:2}}x4 + data_histogram{test="mix samples"} 0 1 2 {{schema:0 sum:1 count:2}}x2 eval instant at 1m quantile_over_time(0, data[2m]) {test="two samples"} 0 @@ -1007,6 +1032,12 @@ eval_warn instant at 1m (quantile_over_time(2, (data[2m]))) {test="three samples"} +Inf {test="uneven samples"} +Inf +eval instant at 1m quantile_over_time(0.5, data_histogram{test="only histogram samples"}[2m]) + #empty + +eval_info instant at 1m quantile_over_time(0.5, data_histogram{test="mix samples"}[2m]) + {test="mix samples"} 1 + clear # Test time-related functions. @@ -1120,15 +1151,17 @@ load 5m eval_fail instant at 0m changes({__name__=~'testmetric1|testmetric2'}[5m]) -# Tests for *_over_time clear +# Tests for *_over_time load 10s data{type="numbers"} 2 0 3 data{type="some_nan"} 2 0 NaN data{type="some_nan2"} 2 NaN 1 data{type="some_nan3"} NaN 0 1 data{type="only_nan"} NaN NaN NaN + data_histogram{type="only_histogram"} {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}} {{schema:0 sum:3 count:4}} + data_histogram{type="mix_samples"} 0 1 {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}} eval instant at 1m min_over_time(data[2m]) {type="numbers"} 0 @@ -1137,6 +1170,12 @@ eval instant at 1m min_over_time(data[2m]) {type="some_nan3"} 0 {type="only_nan"} NaN +eval instant at 1m min_over_time(data_histogram{type="only_histogram"}[2m]) + #empty + +eval_info instant at 1m min_over_time(data_histogram{type="mix_samples"}[2m]) + {type="mix_samples"} 0 + eval instant at 1m max_over_time(data[2m]) {type="numbers"} 3 {type="some_nan"} 2 @@ -1144,12 +1183,29 @@ eval instant at 1m max_over_time(data[2m]) {type="some_nan3"} 1 {type="only_nan"} NaN -eval instant at 1m last_over_time(data[2m]) +eval instant at 1m max_over_time(data_histogram{type="only_histogram"}[2m]) + #empty + +eval_info instant at 1m max_over_time(data_histogram{type="mix_samples"}[2m]) + {type="mix_samples"} 1 + +eval instant at 1m last_over_time({__name__=~"data(_histogram)?"}[2m]) data{type="numbers"} 3 data{type="some_nan"} NaN data{type="some_nan2"} 1 data{type="some_nan3"} 1 data{type="only_nan"} NaN + data_histogram{type="only_histogram"} {{schema:0 sum:3 count:4}} + data_histogram{type="mix_samples"} {{schema:0 sum:2 count:3}} + +eval instant at 1m count_over_time({__name__=~"data(_histogram)?"}[2m]) + {type="numbers"} 3 + {type="some_nan"} 3 + {type="some_nan2"} 3 + {type="some_nan3"} 3 + {type="only_nan"} 3 + {type="only_histogram"} 3 + {type="mix_samples"} 4 clear From 6339989e25102f37a57030f4338b4850c8c5b30e Mon Sep 17 00:00:00 2001 From: Vandit Singh <107131545+Vandit1604@users.noreply.github.com> Date: Thu, 9 Jan 2025 21:57:39 +0530 Subject: [PATCH 1208/1397] web/api: Add a limit parameter to /query and /query_range (#15552) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add limit param to query and rangeQuery --------- Signed-off-by: Vandit Singh Signed-off-by: Vandit Singh <107131545+Vandit1604@users.noreply.github.com> Co-authored-by: Björn Rabenstein --- docs/querying/api.md | 2 + web/api/v1/api.go | 53 +++++++++- web/api/v1/api_test.go | 228 ++++++++++++++++++++++++++++++++++++++++- 3 files changed, 280 insertions(+), 3 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index f1e7129303..e3f97886dc 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -86,6 +86,7 @@ URL query parameters: - `time=`: Evaluation timestamp. Optional. - `timeout=`: Evaluation timeout. Optional. Defaults to and is capped by the value of the `-query.timeout` flag. +- `limit=`: Maximum number of returned series. Doesn’t affect scalars or strings but truncates the number of series for matrices and vectors. Optional. 0 means disabled. The current server time is used if the `time` parameter is omitted. @@ -154,6 +155,7 @@ URL query parameters: - `step=`: Query resolution step width in `duration` format or float number of seconds. - `timeout=`: Evaluation timeout. Optional. Defaults to and is capped by the value of the `-query.timeout` flag. +- `limit=`: Maximum number of returned series. Optional. 0 means disabled. You can URL-encode these parameters directly in the request body by using the `POST` method and `Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 6e9c589087..4903f925cc 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -438,6 +438,10 @@ func (api *API) options(*http.Request) apiFuncResult { } func (api *API) query(r *http.Request) (result apiFuncResult) { + limit, err := parseLimitParam(r.FormValue("limit")) + if err != nil { + return invalidParamError(err, "limit") + } ts, err := parseTimeParam(r, "time", api.now()) if err != nil { return invalidParamError(err, "time") @@ -479,6 +483,15 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { return apiFuncResult{nil, returnAPIError(res.Err), res.Warnings, qry.Close} } + warnings := res.Warnings + if limit > 0 { + var isTruncated bool + + res, isTruncated = truncateResults(res, limit) + if isTruncated { + warnings = warnings.Add(errors.New("results truncated due to limit")) + } + } // Optional stats field in response if parameter "stats" is not empty. sr := api.statsRenderer if sr == nil { @@ -490,7 +503,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { ResultType: res.Value.Type(), Result: res.Value, Stats: qs, - }, nil, res.Warnings, qry.Close} + }, nil, warnings, qry.Close} } func (api *API) formatQuery(r *http.Request) (result apiFuncResult) { @@ -526,6 +539,10 @@ func extractQueryOpts(r *http.Request) (promql.QueryOpts, error) { } func (api *API) queryRange(r *http.Request) (result apiFuncResult) { + limit, err := parseLimitParam(r.FormValue("limit")) + if err != nil { + return invalidParamError(err, "limit") + } start, err := parseTime(r.FormValue("start")) if err != nil { return invalidParamError(err, "start") @@ -590,6 +607,16 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { return apiFuncResult{nil, returnAPIError(res.Err), res.Warnings, qry.Close} } + warnings := res.Warnings + if limit > 0 { + var isTruncated bool + + res, isTruncated = truncateResults(res, limit) + if isTruncated { + warnings = warnings.Add(errors.New("results truncated due to limit")) + } + } + // Optional stats field in response if parameter "stats" is not empty. sr := api.statsRenderer if sr == nil { @@ -601,7 +628,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { ResultType: res.Value.Type(), Result: res.Value, Stats: qs, - }, nil, res.Warnings, qry.Close} + }, nil, warnings, qry.Close} } func (api *API) queryExemplars(r *http.Request) apiFuncResult { @@ -2102,3 +2129,25 @@ func toHintLimit(limit int) int { } return limit } + +// truncateResults truncates result for queryRange() and query(). +// No truncation for other types(Scalars or Strings). +func truncateResults(result *promql.Result, limit int) (*promql.Result, bool) { + isTruncated := false + + switch v := result.Value.(type) { + case promql.Matrix: + if len(v) > limit { + result.Value = v[:limit] + isTruncated = true + } + case promql.Vector: + if len(v) > limit { + result.Value = v[:limit] + isTruncated = true + } + } + + // Return the modified result. Unchanged for other types. + return result, isTruncated +} diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 175ed2e0f0..e6ca43508b 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -1164,6 +1164,49 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, }, }, + // Only matrix and vector responses are limited/truncated. String and scalar responses aren't truncated. + { + endpoint: api.query, + query: url.Values{ + "query": []string{"2"}, + "time": []string{"123.4"}, + "limit": []string{"1"}, + }, + response: &QueryData{ + ResultType: parser.ValueTypeScalar, + Result: promql.Scalar{ + V: 2, + T: timestamp.FromTime(start.Add(123*time.Second + 400*time.Millisecond)), + }, + }, + warningsCount: 0, + }, + // When limit = 0, limit is disabled. + { + endpoint: api.query, + query: url.Values{ + "query": []string{"2"}, + "time": []string{"123.4"}, + "limit": []string{"0"}, + }, + response: &QueryData{ + ResultType: parser.ValueTypeScalar, + Result: promql.Scalar{ + V: 2, + T: timestamp.FromTime(start.Add(123*time.Second + 400*time.Millisecond)), + }, + }, + warningsCount: 0, + }, + { + endpoint: api.query, + query: url.Values{ + "query": []string{"2"}, + "time": []string{"123.4"}, + "limit": []string{"-1"}, + }, + errType: errorBadData, + }, { endpoint: api.query, query: url.Values{ @@ -1205,6 +1248,179 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, }, }, + { + endpoint: api.query, + query: url.Values{ + "query": []string{ + `label_replace(vector(42), "foo", "bar", "", "") or label_replace(vector(3.1415), "dings", "bums", "", "")`, + }, + "time": []string{"123.4"}, + "limit": []string{"2"}, + }, + warningsCount: 0, + responseAsJSON: `{ + "resultType": "vector", + "result": [ + { + "metric": { + "foo": "bar" + }, + "value": [123.4, "42"] + }, + { + "metric": { + "dings": "bums" + }, + "value": [123.4, "3.1415"] + } + ] + }`, + }, + { + endpoint: api.query, + query: url.Values{ + "query": []string{ + `label_replace(vector(42), "foo", "bar", "", "") or label_replace(vector(3.1415), "dings", "bums", "", "")`, + }, + "time": []string{"123.4"}, + "limit": []string{"1"}, + }, + warningsCount: 1, + responseAsJSON: `{ + "resultType": "vector", + "result": [ + { + "metric": { + "foo": "bar" + }, + "value": [123.4, "42"] + } + ] + }`, + }, + { + endpoint: api.query, + query: url.Values{ + "query": []string{ + `label_replace(vector(42), "foo", "bar", "", "") or label_replace(vector(3.1415), "dings", "bums", "", "")`, + }, + "time": []string{"123.4"}, + "limit": []string{"0"}, + }, + responseAsJSON: `{ + "resultType": "vector", + "result": [ + { + "metric": { + "foo": "bar" + }, + "value": [123.4, "42"] + }, + { + "metric": { + "dings": "bums" + }, + "value": [123.4, "3.1415"] + } + ] + }`, + warningsCount: 0, + }, + // limit=0 means no limit. + { + endpoint: api.queryRange, + query: url.Values{ + "query": []string{ + `label_replace(vector(42), "foo", "bar", "", "") or label_replace(vector(3.1415), "dings", "bums", "", "")`, + }, + "start": []string{"0"}, + "end": []string{"2"}, + "step": []string{"1"}, + "limit": []string{"0"}, + }, + response: &QueryData{ + ResultType: parser.ValueTypeMatrix, + Result: promql.Matrix{ + promql.Series{ + Metric: labels.FromMap(map[string]string{"dings": "bums"}), + Floats: []promql.FPoint{ + {F: 3.1415, T: timestamp.FromTime(start)}, + {F: 3.1415, T: timestamp.FromTime(start.Add(1 * time.Second))}, + {F: 3.1415, T: timestamp.FromTime(start.Add(2 * time.Second))}, + }, + }, + promql.Series{ + Metric: labels.FromMap(map[string]string{"foo": "bar"}), + Floats: []promql.FPoint{ + {F: 42, T: timestamp.FromTime(start)}, + {F: 42, T: timestamp.FromTime(start.Add(1 * time.Second))}, + {F: 42, T: timestamp.FromTime(start.Add(2 * time.Second))}, + }, + }, + }, + }, + warningsCount: 0, + }, + { + endpoint: api.queryRange, + query: url.Values{ + "query": []string{ + `label_replace(vector(42), "foo", "bar", "", "") or label_replace(vector(3.1415), "dings", "bums", "", "")`, + }, + "start": []string{"0"}, + "end": []string{"2"}, + "step": []string{"1"}, + "limit": []string{"1"}, + }, + response: &QueryData{ + ResultType: parser.ValueTypeMatrix, + Result: promql.Matrix{ + promql.Series{ + Metric: labels.FromMap(map[string]string{"dings": "bums"}), + Floats: []promql.FPoint{ + {F: 3.1415, T: timestamp.FromTime(start)}, + {F: 3.1415, T: timestamp.FromTime(start.Add(1 * time.Second))}, + {F: 3.1415, T: timestamp.FromTime(start.Add(2 * time.Second))}, + }, + }, + }, + }, + warningsCount: 1, + }, + { + endpoint: api.queryRange, + query: url.Values{ + "query": []string{ + `label_replace(vector(42), "foo", "bar", "", "") or label_replace(vector(3.1415), "dings", "bums", "", "")`, + }, + "start": []string{"0"}, + "end": []string{"2"}, + "step": []string{"1"}, + "limit": []string{"2"}, + }, + response: &QueryData{ + ResultType: parser.ValueTypeMatrix, + Result: promql.Matrix{ + promql.Series{ + Metric: labels.FromMap(map[string]string{"dings": "bums"}), + Floats: []promql.FPoint{ + {F: 3.1415, T: timestamp.FromTime(start)}, + {F: 3.1415, T: timestamp.FromTime(start.Add(1 * time.Second))}, + {F: 3.1415, T: timestamp.FromTime(start.Add(2 * time.Second))}, + }, + }, + promql.Series{ + Metric: labels.FromMap(map[string]string{"foo": "bar"}), + Floats: []promql.FPoint{ + {F: 42, T: timestamp.FromTime(start)}, + {F: 42, T: timestamp.FromTime(start.Add(1 * time.Second))}, + {F: 42, T: timestamp.FromTime(start.Add(2 * time.Second))}, + }, + }, + }, + }, + warningsCount: 0, + }, { endpoint: api.queryRange, query: url.Values{ @@ -1222,7 +1438,6 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E {F: 1, T: timestamp.FromTime(start.Add(1 * time.Second))}, {F: 2, T: timestamp.FromTime(start.Add(2 * time.Second))}, }, - // No Metric returned - use zero value for comparison. }, }, }, @@ -1235,6 +1450,17 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, responseAsJSON: `{"resultType":"vector","result":[]}`, }, + { + endpoint: api.queryRange, + query: url.Values{ + "query": []string{"bottomk(2, notExists)"}, + "start": []string{"0"}, + "end": []string{"2"}, + "step": []string{"1"}, + "limit": []string{"-1"}, + }, + errType: errorBadData, + }, // Test empty matrix result { endpoint: api.queryRange, From f030894c2cd124923030bd2cfa8a7b91a00544d6 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Thu, 9 Jan 2025 17:51:26 +0100 Subject: [PATCH 1209/1397] Fix issues raised by staticcheck (#15722) Fix issues raised by staticcheck We are not enabling staticcheck explicitly, though, because it has too many false positives. --------- Signed-off-by: Arve Knudsen --- cmd/prometheus/main.go | 6 +++--- cmd/promtool/main.go | 9 ++++----- promql/promqltest/test.go | 4 ++-- scrape/target.go | 4 ++-- storage/remote/metadata_watcher.go | 2 +- storage/remote/queue_manager.go | 2 +- tsdb/wlog/watcher.go | 2 +- web/api/v1/api.go | 2 +- web/api/v1/api_test.go | 2 +- 9 files changed, 16 insertions(+), 17 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 06f46f8d72..03c20dc52d 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -534,7 +534,7 @@ func main() { _, err := a.Parse(os.Args[1:]) if err != nil { - fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing command line arguments: %w", err)) + fmt.Fprintf(os.Stderr, "Error parsing command line arguments: %s\n", err) a.Usage(os.Args[1:]) os.Exit(2) } @@ -548,7 +548,7 @@ func main() { notifs.AddNotification(notifications.StartingUp) if err := cfg.setFeatureListOptions(logger); err != nil { - fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err)) + fmt.Fprintf(os.Stderr, "Error parsing feature list: %s\n", err) os.Exit(1) } @@ -1742,7 +1742,7 @@ func (s *readyStorage) WALReplayStatus() (tsdb.WALReplayStatus, error) { } // ErrNotReady is returned if the underlying scrape manager is not ready yet. -var ErrNotReady = errors.New("Scrape manager not ready") +var ErrNotReady = errors.New("scrape manager not ready") // ReadyScrapeManager allows a scrape manager to be retrieved. Even if it's set at a later point in time. type readyScrapeManager struct { diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index b52fe7cdbb..62a1d4f906 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -36,7 +36,7 @@ import ( "github.com/prometheus/client_golang/api" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil/promlint" - config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" @@ -45,7 +45,6 @@ import ( dto "github.com/prometheus/client_model/go" promconfig "github.com/prometheus/common/config" - "github.com/prometheus/common/expfmt" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" @@ -312,12 +311,12 @@ func main() { kingpin.Fatalf("Cannot set base auth in the server URL and use a http.config.file at the same time") } var err error - httpConfig, _, err := config_util.LoadHTTPConfigFile(httpConfigFilePath) + httpConfig, _, err := promconfig.LoadHTTPConfigFile(httpConfigFilePath) if err != nil { kingpin.Fatalf("Failed to load HTTP config file: %v", err) } - httpRoundTripper, err = promconfig.NewRoundTripperFromConfig(*httpConfig, "promtool", config_util.WithUserAgent("promtool/"+version.Version)) + httpRoundTripper, err = promconfig.NewRoundTripperFromConfig(*httpConfig, "promtool", promconfig.WithUserAgent("promtool/"+version.Version)) if err != nil { kingpin.Fatalf("Failed to create a new HTTP round tripper: %v", err) } @@ -702,7 +701,7 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin return ruleFiles, nil } -func checkTLSConfig(tlsConfig config_util.TLSConfig, checkSyntaxOnly bool) error { +func checkTLSConfig(tlsConfig promconfig.TLSConfig, checkSyntaxOnly bool) error { if len(tlsConfig.CertFile) > 0 && len(tlsConfig.KeyFile) == 0 { return fmt.Errorf("client cert file %q specified without client key file", tlsConfig.CertFile) } diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index 518164827a..5e0d9083cb 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -1419,8 +1419,8 @@ func (ll *LazyLoader) appendTill(ts int64) error { // WithSamplesTill loads the samples till given timestamp and executes the given function. func (ll *LazyLoader) WithSamplesTill(ts time.Time, fn func(error)) { - tsMilli := ts.Sub(time.Unix(0, 0).UTC()) / time.Millisecond - fn(ll.appendTill(int64(tsMilli))) + till := ts.Sub(time.Unix(0, 0).UTC()) / time.Millisecond + fn(ll.appendTill(int64(till))) } // QueryEngine returns the LazyLoader's query engine. diff --git a/scrape/target.go b/scrape/target.go index d05866f863..22cde01c05 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -295,12 +295,12 @@ func (t *Target) intervalAndTimeout(defaultInterval, defaultDuration time.Durati intervalLabel := t.labels.Get(model.ScrapeIntervalLabel) interval, err := model.ParseDuration(intervalLabel) if err != nil { - return defaultInterval, defaultDuration, fmt.Errorf("Error parsing interval label %q: %w", intervalLabel, err) + return defaultInterval, defaultDuration, fmt.Errorf("error parsing interval label %q: %w", intervalLabel, err) } timeoutLabel := t.labels.Get(model.ScrapeTimeoutLabel) timeout, err := model.ParseDuration(timeoutLabel) if err != nil { - return defaultInterval, defaultDuration, fmt.Errorf("Error parsing timeout label %q: %w", timeoutLabel, err) + return defaultInterval, defaultDuration, fmt.Errorf("error parsing timeout label %q: %w", timeoutLabel, err) } return time.Duration(interval), time.Duration(timeout), nil diff --git a/storage/remote/metadata_watcher.go b/storage/remote/metadata_watcher.go index 9306dcb4c2..d7f376c96a 100644 --- a/storage/remote/metadata_watcher.go +++ b/storage/remote/metadata_watcher.go @@ -38,7 +38,7 @@ type Watchable interface { type noopScrapeManager struct{} func (noop *noopScrapeManager) Get() (*scrape.Manager, error) { - return nil, errors.New("Scrape manager not ready") + return nil, errors.New("scrape manager not ready") } // MetadataWatcher watches the Scrape Manager for a given WriteMetadataTo. diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 475c126eff..4b966059f6 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -2119,7 +2119,7 @@ func compressPayload(tmpbuf *[]byte, inp []byte, enc Compression) (compressed [] } return compressed, nil default: - return compressed, fmt.Errorf("Unknown compression scheme [%v]", enc) + return compressed, fmt.Errorf("unknown compression scheme [%v]", enc) } } diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index 6f1bc1df35..ca74a9ceaf 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -679,7 +679,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err // Ensure we read the whole contents of every segment in the checkpoint dir. segs, err := listSegments(checkpointDir) if err != nil { - return fmt.Errorf("Unable to get segments checkpoint dir: %w", err) + return fmt.Errorf("unable to get segments checkpoint dir: %w", err) } for _, segRef := range segs { size, err := getSegmentSize(checkpointDir, segRef.index) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 4903f925cc..ea7d5c5fe4 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -2043,7 +2043,7 @@ func parseTimeParam(r *http.Request, paramName string, defaultValue time.Time) ( } result, err := parseTime(val) if err != nil { - return time.Time{}, fmt.Errorf("Invalid time value for '%s': %w", paramName, err) + return time.Time{}, fmt.Errorf("invalid time value for '%s': %w", paramName, err) } return result, nil } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index e6ca43508b..37227d849d 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -4186,7 +4186,7 @@ func TestParseTimeParam(t *testing.T) { asTime: time.Time{}, asError: func() error { _, err := parseTime("baz") - return fmt.Errorf("Invalid time value for '%s': %w", "foo", err) + return fmt.Errorf("invalid time value for '%s': %w", "foo", err) }, }, }, From b880cea613577f50377832efcf801e0172d2ad1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Mierzwa?= Date: Mon, 2 Oct 2023 14:47:00 +0100 Subject: [PATCH 1210/1397] Fix locks in db.reloadBlocks() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This partially reverts ae3d392aa9c3a5c5f92f8116738c5b32c98b09a7. ae3d392aa9c3a5c5f92f8116738c5b32c98b09a7 added a call to db.mtx.Lock() that lasts for the entire duration of db.reloadBlocks(), previous db.mtx would be locked only during critical part of db.reloadBlocks(). The motivation was to protect against races: https://github.com/prometheus/prometheus/pull/8007/commits/9e0351e161dcf60cd6d06437b6a2221805910498#r555699794 The 'reloads' being mentioned are (I think) reloadBlocks() calls, rather than db.reload() or other methods. TestTombstoneCleanRetentionLimitsRace was added to catch this but I wasn't able to ever get any error out of it, even after disabling all calls to db.mtx in reloadBlocks() and CleanTombstones(). To make things more complicated CleanupTombstones() itself calls reloadBlocks(), so it seems that the real issue is that we might have concurrent calls to reloadBlocks(). The problem with this change is that db.reloadBlocks() can take a very long time, that's because it might need to load very large blocks from disk, which is slow. While db.mtx is locked a large chunk of the db is locked, including queries, since db.mtx read lock is needed for db.Querier() call. One of the issues this manifests itself as is a gap in all metrics and blocked queries just after a large block compaction happens. When compaction merges multiple day-or-more blocks into a week-or-more block it create a single very big block. After that block is written it needs to be loaded and that seems to be taking many seconds (30-45), during which mtx is held and everything is blocked. Turns out that there is another lock that is more fine grained and aimed at this specific use case: // cmtx ensures that compactions and deletions don't run simultaneously. cmtx sync.Mutex All calls to reloadBlocks() are wrapped inside cmtx lock. The only exception is db.reload() which this change fixes. We can't add cmtx lock inside reloadBlocks() itself because it's called by a number of functions, some of which are already holding cmtx. Looking at the code I think it is sufficient to hold cmtx and skip a reloadBlocks() wide mtx call. Signed-off-by: Łukasz Mierzwa --- tsdb/db.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tsdb/db.go b/tsdb/db.go index 2d35e3fb00..ea8cbfff0b 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -992,9 +992,14 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn db.metrics.maxBytes.Set(float64(maxBytes)) db.metrics.retentionDuration.Set((time.Duration(opts.RetentionDuration) * time.Millisecond).Seconds()) + // Calling db.reload() calls db.reloadBlocks() which requires cmtx to be locked. + db.cmtx.Lock() if err := db.reload(); err != nil { + db.cmtx.Unlock() return nil, err } + db.cmtx.Unlock() + // Set the min valid time for the ingested samples // to be no lower than the maxt of the last block. minValidTime := int64(math.MinInt64) @@ -1568,13 +1573,9 @@ func (db *DB) reloadBlocks() (err error) { db.metrics.reloads.Inc() }() - // Now that we reload TSDB every minute, there is a high chance for a race condition with a reload - // triggered by CleanTombstones(). We need to lock the reload to avoid the situation where - // a normal reload and CleanTombstones try to delete the same block. - db.mtx.Lock() - defer db.mtx.Unlock() - + db.mtx.RLock() loadable, corrupted, err := openBlocks(db.logger, db.dir, db.blocks, db.chunkPool, db.opts.PostingsDecoderFactory) + db.mtx.RUnlock() if err != nil { return err } @@ -1643,8 +1644,10 @@ func (db *DB) reloadBlocks() (err error) { }) // Swap new blocks first for subsequently created readers to be seen. + db.mtx.Lock() oldBlocks := db.blocks db.blocks = toLoad + db.mtx.Unlock() // Only check overlapping blocks when overlapping compaction is enabled. if db.opts.EnableOverlappingCompaction { From 92788d313ad15cf80190eb6113bb01ff5e4f9828 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Mierzwa?= Date: Mon, 2 Oct 2023 16:05:34 +0100 Subject: [PATCH 1211/1397] Remove TestTombstoneCleanRetentionLimitsRace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This test ensures that running db.reloadBlocks() and db.CleanTombstones() at the same time doesn't race. The problem is that CleanTombstones() is a public method while reloadBlocks() is internal. CleanTombstones() sets db.cmtx lock while reloadBlocks() is not protected by any locks at all, it expects the public method through which it was called to do it. So having a race between these two is not unexpected and we shouldn't really be testing this. db.cmtx ensures that no other function can be modifying the list of open blocks and so the scenario tested here cannot happen. If it would happen it would be only because some other method doesn't aquire db.ctmx lock, something this test cannot detect. Signed-off-by: Łukasz Mierzwa --- tsdb/db_test.go | 55 ------------------------------------------------- 1 file changed, 55 deletions(-) diff --git a/tsdb/db_test.go b/tsdb/db_test.go index b858e6f524..a2861b7bf0 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1350,61 +1350,6 @@ func TestTombstoneCleanFail(t *testing.T) { require.Len(t, intersection(oldBlockDirs, actualBlockDirs), len(actualBlockDirs)-1) } -// TestTombstoneCleanRetentionLimitsRace tests that a CleanTombstones operation -// and retention limit policies, when triggered at the same time, -// won't race against each other. -func TestTombstoneCleanRetentionLimitsRace(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - opts := DefaultOptions() - var wg sync.WaitGroup - - // We want to make sure that a race doesn't happen when a normal reload and a CleanTombstones() - // reload try to delete the same block. Without the correct lock placement, it can happen if a - // block is marked for deletion due to retention limits and also has tombstones to be cleaned at - // the same time. - // - // That is something tricky to trigger, so let's try several times just to make sure. - for i := 0; i < 20; i++ { - t.Run(fmt.Sprintf("iteration%d", i), func(t *testing.T) { - db := openTestDB(t, opts, nil) - totalBlocks := 20 - dbDir := db.Dir() - // Generate some blocks with old mint (near epoch). - for j := 0; j < totalBlocks; j++ { - blockDir := createBlock(t, dbDir, genSeries(10, 1, int64(j), int64(j)+1)) - block, err := OpenBlock(nil, blockDir, nil, nil) - require.NoError(t, err) - // Cover block with tombstones so it can be deleted with CleanTombstones() as well. - tomb := tombstones.NewMemTombstones() - tomb.AddInterval(0, tombstones.Interval{Mint: int64(j), Maxt: int64(j) + 1}) - block.tombstones = tomb - - db.blocks = append(db.blocks, block) - } - - wg.Add(2) - // Run reload and CleanTombstones together, with a small time window randomization - go func() { - defer wg.Done() - time.Sleep(time.Duration(rand.Float64() * 100 * float64(time.Millisecond))) - require.NoError(t, db.reloadBlocks()) - }() - go func() { - defer wg.Done() - time.Sleep(time.Duration(rand.Float64() * 100 * float64(time.Millisecond))) - require.NoError(t, db.CleanTombstones()) - }() - - wg.Wait() - - require.NoError(t, db.Close()) - }) - } -} - func intersection(oldBlocks, actualBlocks []string) (intersection []string) { hash := make(map[string]bool) for _, e := range oldBlocks { From d106b3beb7f9069bbf00c63e7b6fcccceb97ffb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Mierzwa?= Date: Wed, 18 Oct 2023 09:29:59 +0100 Subject: [PATCH 1212/1397] Wrap db.blocks read in a read lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't hold db.mtx lock when trying to read db.blocks here so we need a read lock around this loop. Signed-off-by: Łukasz Mierzwa --- tsdb/db.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tsdb/db.go b/tsdb/db.go index ea8cbfff0b..001515081d 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -1518,7 +1518,9 @@ func (db *DB) compactBlocks() (err error) { default: } + db.mtx.RLock() uids, err := db.compactor.Compact(db.dir, plan, db.blocks) + db.mtx.RUnlock() if err != nil { return fmt.Errorf("compact %s: %w", plan, err) } @@ -1601,11 +1603,13 @@ func (db *DB) reloadBlocks() (err error) { if len(corrupted) > 0 { // Corrupted but no child loaded for it. // Close all new blocks to release the lock for windows. + db.mtx.RLock() for _, block := range loadable { if _, open := getBlock(db.blocks, block.Meta().ULID); !open { block.Close() } } + db.mtx.RUnlock() errs := tsdb_errors.NewMulti() for ulid, err := range corrupted { if err != nil { From a1740cd2e799807922d50f60b71d96aa6e3a1f09 Mon Sep 17 00:00:00 2001 From: Lukasz Mierzwa Date: Wed, 28 Aug 2024 10:47:40 +0100 Subject: [PATCH 1213/1397] Remove unnecessary locks Compact() is an uppercase function that deals with locks on its own, so we shouldn't have a lock around it. Signed-off-by: Lukasz Mierzwa --- tsdb/db.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/tsdb/db.go b/tsdb/db.go index 001515081d..ccb0fa62bd 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -1518,9 +1518,7 @@ func (db *DB) compactBlocks() (err error) { default: } - db.mtx.RLock() uids, err := db.compactor.Compact(db.dir, plan, db.blocks) - db.mtx.RUnlock() if err != nil { return fmt.Errorf("compact %s: %w", plan, err) } From e3728122b2ae1f35f4a7f9322f1bac0aadd44fcc Mon Sep 17 00:00:00 2001 From: Lukasz Mierzwa Date: Thu, 9 Jan 2025 17:20:10 +0000 Subject: [PATCH 1214/1397] Update comments for methods that require a lock Signed-off-by: Lukasz Mierzwa --- tsdb/db.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tsdb/db.go b/tsdb/db.go index ccb0fa62bd..9ab150c5b4 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -1368,6 +1368,7 @@ func (db *DB) CompactOOOHead(ctx context.Context) error { // Callback for testing. var compactOOOHeadTestingCallback func() +// The db.cmtx mutex should be held before calling this method. func (db *DB) compactOOOHead(ctx context.Context) error { if !db.oooWasEnabled.Load() { return nil @@ -1422,6 +1423,7 @@ func (db *DB) compactOOOHead(ctx context.Context) error { // compactOOO creates a new block per possible block range in the compactor's directory from the OOO Head given. // Each ULID in the result corresponds to a block in a unique time range. +// The db.cmtx mutex should be held before calling this method. func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID, err error) { start := time.Now() @@ -1466,7 +1468,7 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID } // compactHead compacts the given RangeHead. -// The compaction mutex should be held before calling this method. +// The db.cmtx should be held before calling this method. func (db *DB) compactHead(head *RangeHead) error { uids, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil) if err != nil { @@ -1492,7 +1494,7 @@ func (db *DB) compactHead(head *RangeHead) error { } // compactBlocks compacts all the eligible on-disk blocks. -// The compaction mutex should be held before calling this method. +// The db.cmtx should be held before calling this method. func (db *DB) compactBlocks() (err error) { // Check for compactions of multiple blocks. for { @@ -1549,6 +1551,7 @@ func getBlock(allBlocks []*Block, id ulid.ULID) (*Block, bool) { } // reload reloads blocks and truncates the head and its WAL. +// The db.cmtx mutex should be held before calling this method. func (db *DB) reload() error { if err := db.reloadBlocks(); err != nil { return fmt.Errorf("reloadBlocks: %w", err) @@ -1565,6 +1568,7 @@ func (db *DB) reload() error { // reloadBlocks reloads blocks without touching head. // Blocks that are obsolete due to replacement or retention will be deleted. +// The db.cmtx mutex should be held before calling this method. func (db *DB) reloadBlocks() (err error) { defer func() { if err != nil { From 0a19f1268e2dbb9bae9568f007a9809e8fba0402 Mon Sep 17 00:00:00 2001 From: Julien Duchesne Date: Thu, 9 Jan 2025 18:14:21 -0500 Subject: [PATCH 1215/1397] Rule Concurrency: Simpler loop for sequential (default) executions (#15801) --- rules/group.go | 50 +++++++++++++++++++++++++++++-------------- rules/manager.go | 6 +----- rules/manager_test.go | 7 +----- 3 files changed, 36 insertions(+), 27 deletions(-) diff --git a/rules/group.go b/rules/group.go index 724b926d4f..4398d9211d 100644 --- a/rules/group.go +++ b/rules/group.go @@ -643,28 +643,46 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { if ctrl == nil { ctrl = sequentialRuleEvalController{} } - for _, batch := range ctrl.SplitGroupIntoBatches(ctx, g) { - for _, ruleIndex := range batch { + + batches := ctrl.SplitGroupIntoBatches(ctx, g) + if len(batches) == 0 { + // Sequential evaluation when batches aren't set. + // This is the behaviour without a defined RuleConcurrencyController + for i, rule := range g.rules { + // Check if the group has been stopped. select { case <-g.done: return default: } - - rule := g.rules[ruleIndex] - if len(batch) > 1 && ctrl.Allow(ctx, g, rule) { - wg.Add(1) - - go eval(ruleIndex, rule, func() { - wg.Done() - ctrl.Done(ctx) - }) - } else { - eval(ruleIndex, rule, nil) - } + eval(i, rule, nil) + } + } else { + // Concurrent evaluation. + for _, batch := range batches { + for _, ruleIndex := range batch { + // Check if the group has been stopped. + select { + case <-g.done: + wg.Wait() + return + default: + } + rule := g.rules[ruleIndex] + if len(batch) > 1 && ctrl.Allow(ctx, g, rule) { + wg.Add(1) + + go eval(ruleIndex, rule, func() { + wg.Done() + ctrl.Done(ctx) + }) + } else { + eval(ruleIndex, rule, nil) + } + } + // It is important that we finish processing any rules in this current batch - before we move into the next one. + wg.Wait() } - // It is important that we finish processing any rules in this current batch - before we move into the next one. - wg.Wait() } g.metrics.GroupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal.Load()) diff --git a/rules/manager.go b/rules/manager.go index c4c0f8a1ef..a3ae716e2b 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -550,11 +550,7 @@ func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) } func (c sequentialRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules { - order := make([]ConcurrentRules, len(g.rules)) - for i := range g.rules { - order[i] = []int{i} - } - return order + return nil } func (c sequentialRuleEvalController) Done(_ context.Context) {} diff --git a/rules/manager_test.go b/rules/manager_test.go index 5c3fcc96a8..45da7a44b0 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -1989,12 +1989,7 @@ func TestAsyncRuleEvaluation(t *testing.T) { // Expected evaluation order order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group) - require.Equal(t, []ConcurrentRules{ - {0}, - {1}, - {2}, - {3}, - }, order) + require.Nil(t, order) // Never expect more than 1 inflight query at a time. require.EqualValues(t, 1, maxInflight.Load()) From 616914abe22ae745349b1891ba8065c2fd4abf4b Mon Sep 17 00:00:00 2001 From: crystalstall Date: Mon, 6 Jan 2025 16:13:18 +0800 Subject: [PATCH 1216/1397] Signed-off-by: crystalstall refactor: using slices.Contains to simplify the code Signed-off-by: crystalstall --- discovery/consul/consul.go | 8 ++------ tsdb/block.go | 13 ++----------- util/testutil/port.go | 8 ++------ 3 files changed, 6 insertions(+), 23 deletions(-) diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index 33b82d23a4..4c8de6e291 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -19,6 +19,7 @@ import ( "fmt" "log/slog" "net" + "slices" "strconv" "strings" "time" @@ -248,12 +249,7 @@ func (d *Discovery) shouldWatchFromName(name string) bool { return true } - for _, sn := range d.watchedServices { - if sn == name { - return true - } - } - return false + return slices.Contains(d.watchedServices, name) } // shouldWatchFromTags returns whether the service of the given name should be watched based on its tags. diff --git a/tsdb/block.go b/tsdb/block.go index 6483f8d8bb..4ffd2463c3 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -221,7 +221,7 @@ type BlockMetaCompaction struct { } func (bm *BlockMetaCompaction) SetOutOfOrder() { - if bm.containsHint(CompactionHintFromOutOfOrder) { + if bm.FromOutOfOrder() { return } bm.Hints = append(bm.Hints, CompactionHintFromOutOfOrder) @@ -229,16 +229,7 @@ func (bm *BlockMetaCompaction) SetOutOfOrder() { } func (bm *BlockMetaCompaction) FromOutOfOrder() bool { - return bm.containsHint(CompactionHintFromOutOfOrder) -} - -func (bm *BlockMetaCompaction) containsHint(hint string) bool { - for _, h := range bm.Hints { - if h == hint { - return true - } - } - return false + return slices.Contains(bm.Hints, CompactionHintFromOutOfOrder) } const ( diff --git a/util/testutil/port.go b/util/testutil/port.go index 7cf4cf1ccc..91c1291749 100644 --- a/util/testutil/port.go +++ b/util/testutil/port.go @@ -15,6 +15,7 @@ package testutil import ( "net" + "slices" "sync" "testing" ) @@ -48,12 +49,7 @@ func RandomUnprivilegedPort(t *testing.T) int { } func portWasUsed(port int) bool { - for _, usedPort := range usedPorts { - if port == usedPort { - return true - } - } - return false + return slices.Contains(usedPorts, port) } func getPort() (int, error) { From 47563d942eb459071b91050be2d1d1dcca73d64b Mon Sep 17 00:00:00 2001 From: Owen Williams Date: Fri, 10 Jan 2025 15:19:45 -0500 Subject: [PATCH 1217/1397] parser: fix misleading error message in grouping processing Signed-off-by: Owen Williams --- promql/parser/generated_parser.y | 9 +++++---- promql/parser/generated_parser.y.go | 9 +++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index 3865dc6548..ca710b1ab0 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -363,17 +363,18 @@ grouping_label_list: grouping_label : maybe_label { if !model.LabelName($1.Val).IsValid() { - yylex.(*parser).unexpected("grouping opts", "label") + yylex.(*parser).addParseErrf($1.PositionRange(),"invalid label name for grouping: %q", $1.Val) } $$ = $1 } | STRING { - if !model.LabelName(yylex.(*parser).unquoteString($1.Val)).IsValid() { - yylex.(*parser).unexpected("grouping opts", "label") + unquoted := yylex.(*parser).unquoteString($1.Val) + if !model.LabelName(unquoted).IsValid() { + yylex.(*parser).addParseErrf($1.PositionRange(),"invalid label name for grouping: %q", unquoted) } $$ = $1 $$.Pos++ - $$.Val = yylex.(*parser).unquoteString($$.Val) + $$.Val = unquoted } | error { yylex.(*parser).unexpected("grouping opts", "label"); $$ = Item{} } diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 7ff8591169..04bc081f2f 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -1259,19 +1259,20 @@ yydefault: yyDollar = yyS[yypt-1 : yypt+1] { if !model.LabelName(yyDollar[1].item.Val).IsValid() { - yylex.(*parser).unexpected("grouping opts", "label") + yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid label name for grouping: %q", yyDollar[1].item.Val) } yyVAL.item = yyDollar[1].item } case 59: yyDollar = yyS[yypt-1 : yypt+1] { - if !model.LabelName(yylex.(*parser).unquoteString(yyDollar[1].item.Val)).IsValid() { - yylex.(*parser).unexpected("grouping opts", "label") + unquoted := yylex.(*parser).unquoteString(yyDollar[1].item.Val) + if !model.LabelName(unquoted).IsValid() { + yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid label name for grouping: %q", unquoted) } yyVAL.item = yyDollar[1].item yyVAL.item.Pos++ - yyVAL.item.Val = yylex.(*parser).unquoteString(yyVAL.item.Val) + yyVAL.item.Val = unquoted } case 60: yyDollar = yyS[yypt-1 : yypt+1] From 6ccd9add1e023f84589d1ca2e18b8c5256bf47a0 Mon Sep 17 00:00:00 2001 From: leonnicolas Date: Fri, 10 Jan 2025 23:32:05 +0100 Subject: [PATCH 1218/1397] Make "hide empty rules persistent" It can be a bit annoying to always press "hide empty rules". This commit uses the session storage of the browser to make it persistent. Signed-off-by: leonnicolas --- .../src/components/SettingsMenu.tsx | 54 +++++++++------ web/ui/mantine-ui/src/pages/AlertsPage.tsx | 69 +++++++++++-------- .../src/state/localStorageMiddleware.ts | 5 +- web/ui/mantine-ui/src/state/settingsSlice.ts | 14 ++-- 4 files changed, 89 insertions(+), 53 deletions(-) diff --git a/web/ui/mantine-ui/src/components/SettingsMenu.tsx b/web/ui/mantine-ui/src/components/SettingsMenu.tsx index aae38909d0..79d9c201cc 100644 --- a/web/ui/mantine-ui/src/components/SettingsMenu.tsx +++ b/web/ui/mantine-ui/src/components/SettingsMenu.tsx @@ -5,7 +5,7 @@ import { Checkbox, Stack, Group, - NumberInput, + NumberInput } from "@mantine/core"; import { IconSettings } from "@tabler/icons-react"; import { FC } from "react"; @@ -21,8 +21,9 @@ const SettingsMenu: FC = () => { enableSyntaxHighlighting, enableLinter, showAnnotations, + hideEmptyGroups, ruleGroupsPerPage, - alertGroupsPerPage, + alertGroupsPerPage } = useSettings(); const dispatch = useAppDispatch(); @@ -48,7 +49,7 @@ const SettingsMenu: FC = () => { onChange={(event) => dispatch( updateSettings({ - useLocalTime: event.currentTarget.checked, + useLocalTime: event.currentTarget.checked }) ) } @@ -63,7 +64,7 @@ const SettingsMenu: FC = () => { onChange={(event) => dispatch( updateSettings({ - enableQueryHistory: event.currentTarget.checked, + enableQueryHistory: event.currentTarget.checked }) ) } @@ -74,7 +75,7 @@ const SettingsMenu: FC = () => { onChange={(event) => dispatch( updateSettings({ - enableAutocomplete: event.currentTarget.checked, + enableAutocomplete: event.currentTarget.checked }) ) } @@ -85,7 +86,7 @@ const SettingsMenu: FC = () => { onChange={(event) => dispatch( updateSettings({ - enableSyntaxHighlighting: event.currentTarget.checked, + enableSyntaxHighlighting: event.currentTarget.checked }) ) } @@ -96,7 +97,7 @@ const SettingsMenu: FC = () => { onChange={(event) => dispatch( updateSettings({ - enableLinter: event.currentTarget.checked, + enableLinter: event.currentTarget.checked }) ) } @@ -107,17 +108,30 @@ const SettingsMenu: FC = () => {
    - - dispatch( - updateSettings({ - showAnnotations: event.currentTarget.checked, - }) - ) - } - /> + + + dispatch( + updateSettings({ + showAnnotations: event.currentTarget.checked + }) + ) + } + /> + + dispatch( + updateSettings({ + hideEmptyGroups: event.currentTarget.checked + }) + ) + } + /> +
    { dispatch( updateSettings({ - alertGroupsPerPage: value, + alertGroupsPerPage: value }) ); }} @@ -151,7 +165,7 @@ const SettingsMenu: FC = () => { dispatch( updateSettings({ - ruleGroupsPerPage: value, + ruleGroupsPerPage: value }) ); }} diff --git a/web/ui/mantine-ui/src/pages/AlertsPage.tsx b/web/ui/mantine-ui/src/pages/AlertsPage.tsx index 3143f0b41d..33e170556d 100644 --- a/web/ui/mantine-ui/src/pages/AlertsPage.tsx +++ b/web/ui/mantine-ui/src/pages/AlertsPage.tsx @@ -11,7 +11,7 @@ import { Alert, TextInput, Anchor, - Pagination, + Pagination } from "@mantine/core"; import { useSuspenseAPIQuery } from "../api/api"; import { AlertingRule, AlertingRulesResult } from "../api/responseTypes/rules"; @@ -23,14 +23,14 @@ import { Fragment, useEffect, useMemo } from "react"; import { StateMultiSelect } from "../components/StateMultiSelect"; import { IconInfoCircle, IconSearch } from "@tabler/icons-react"; import { LabelBadges } from "../components/LabelBadges"; -import { useSettings } from "../state/settingsSlice"; +import { useSettings, updateSettings } from "../state/settingsSlice"; +import { useAppDispatch } from "../state/hooks"; import { ArrayParam, - BooleanParam, NumberParam, StringParam, useQueryParam, - withDefault, + withDefault } from "use-query-params"; import { useDebouncedValue } from "@mantine/hooks"; import { KVSearch } from "@nexucis/kvsearch"; @@ -67,7 +67,7 @@ type AlertsPageData = { const kvSearch = new KVSearch({ shouldSort: true, - indexedKeys: ["name", "labels", ["labels", /.*/]], + indexedKeys: ["name", "labels", ["labels", /.*/]] }); const buildAlertsPageData = ( @@ -79,9 +79,9 @@ const buildAlertsPageData = ( globalCounts: { inactive: 0, pending: 0, - firing: 0, + firing: 0 }, - groups: [], + groups: [] }; for (const group of data.groups) { @@ -89,7 +89,7 @@ const buildAlertsPageData = ( total: 0, inactive: 0, pending: 0, - firing: 0, + firing: 0 }; for (const r of group.rules) { @@ -126,9 +126,9 @@ const buildAlertsPageData = ( rule: r, counts: { firing: r.alerts.filter((a) => a.state === "firing").length, - pending: r.alerts.filter((a) => a.state === "pending").length, - }, - })), + pending: r.alerts.filter((a) => a.state === "pending").length + } + })) }); } @@ -146,11 +146,12 @@ export default function AlertsPage() { const { data } = useSuspenseAPIQuery({ path: `/rules`, params: { - type: "alert", - }, + type: "alert" + } }); - const { showAnnotations } = useSettings(); + const { hideEmptyGroups, showAnnotations } = useSettings(); + const dispatch = useAppDispatch(); // Define URL query params. const [stateFilter, setStateFilter] = useQueryParam( @@ -162,10 +163,6 @@ export default function AlertsPage() { withDefault(StringParam, "") ); const [debouncedSearch] = useDebouncedValue(searchFilter.trim(), 250); - const [showEmptyGroups, setShowEmptyGroups] = useQueryParam( - "showEmptyGroups", - withDefault(BooleanParam, true) - ); const { alertGroupsPerPage } = useSettings(); const [activePage, setActivePage] = useQueryParam( @@ -181,10 +178,10 @@ export default function AlertsPage() { const shownGroups = useMemo( () => - showEmptyGroups + !hideEmptyGroups ? alertsPageData.groups : alertsPageData.groups.filter((g) => g.rules.length > 0), - [alertsPageData.groups, showEmptyGroups] + [alertsPageData.groups, hideEmptyGroups] ); // If we were e.g. on page 10 and the number of total pages decreases to 5 (due to filtering @@ -255,7 +252,13 @@ export default function AlertsPage() { setShowEmptyGroups(false)} + onClick={() => { + dispatch( + updateSettings({ + hideEmptyGroups: true + }) + ); + }} > Hide empty groups @@ -267,7 +270,13 @@ export default function AlertsPage() { setShowEmptyGroups(false)} + onClick={() => { + dispatch( + updateSettings({ + hideEmptyGroups: true + }) + ); + }} > Hide empty groups @@ -286,8 +295,8 @@ export default function AlertsPage() { // have a different background color than their surrounding group card in dark mode, // but it would be better to use CSS to override the light/dark colors for // collapsed/expanded accordion items. - backgroundColor: "#c0c0c015", - }, + backgroundColor: "#c0c0c015" + } }} key={j} value={j.toString()} @@ -403,7 +412,7 @@ export default function AlertsPage() { )} )), - [currentPageGroups, showAnnotations, setShowEmptyGroups] + [currentPageGroups, showAnnotations, dispatch] ); return ( @@ -442,7 +451,7 @@ export default function AlertsPage() { No rules found. ) : ( - !showEmptyGroups && + hideEmptyGroups && alertsPageData.groups.length !== shownGroups.length && ( Hiding {alertsPageData.groups.length - shownGroups.length} empty groups due to filters or no rules. - setShowEmptyGroups(true)}> + + dispatch(updateSettings({ hideEmptyGroups: false })) + } + > Show empty groups diff --git a/web/ui/mantine-ui/src/state/localStorageMiddleware.ts b/web/ui/mantine-ui/src/state/localStorageMiddleware.ts index 79baa5ac64..b3dc70662b 100644 --- a/web/ui/mantine-ui/src/state/localStorageMiddleware.ts +++ b/web/ui/mantine-ui/src/state/localStorageMiddleware.ts @@ -37,7 +37,7 @@ startAppListening({ effect: ({ payload }) => { persistToLocalStorage( localStorageKeyServiceDiscoveryPageCollapsedPools, - payload + payload, ); }, }); @@ -47,7 +47,7 @@ startAppListening({ effect: (_, { getState }) => { persistToLocalStorage( localStorageKeyQueryHistory, - getState().queryPage.queryHistory + getState().queryPage.queryHistory, ); }, }); @@ -62,6 +62,7 @@ startAppListening({ case "enableAutocomplete": case "enableSyntaxHighlighting": case "enableLinter": + case "hideEmptyGroups": case "showAnnotations": case "ruleGroupsPerPage": return persistToLocalStorage(`settings.${key}`, value); diff --git a/web/ui/mantine-ui/src/state/settingsSlice.ts b/web/ui/mantine-ui/src/state/settingsSlice.ts index c4154b7253..da1c1546bb 100644 --- a/web/ui/mantine-ui/src/state/settingsSlice.ts +++ b/web/ui/mantine-ui/src/state/settingsSlice.ts @@ -13,6 +13,7 @@ interface Settings { enableAutocomplete: boolean; enableSyntaxHighlighting: boolean; enableLinter: boolean; + hideEmptyGroups: boolean; showAnnotations: boolean; ruleGroupsPerPage: number; alertGroupsPerPage: number; @@ -30,6 +31,7 @@ export const localStorageKeyEnableAutocomplete = "settings.enableAutocomplete"; export const localStorageKeyEnableSyntaxHighlighting = "settings.enableSyntaxHighlighting"; export const localStorageKeyEnableLinter = "settings.enableLinter"; +export const localStorageKeyHideEmptyGroups = "settings.hideEmptyGroups"; export const localStorageKeyShowAnnotations = "settings.showAnnotations"; export const localStorageKeyRuleGroupsPerPage = "settings.ruleGroupsPerPage"; export const localStorageKeyAlertGroupsPerPage = "settings.alertGroupsPerPage"; @@ -53,7 +55,7 @@ const getPathPrefix = (path: string) => { "/flags", "/config", "/alertmanager-discovery", - "/agent", + "/agent" ]; const pagePath = pagePaths.find((p) => path.endsWith(p)); @@ -95,6 +97,10 @@ export const initialState: Settings = { localStorageKeyEnableLinter, true ), + hideEmptyGroups: initializeFromLocalStorage( + localStorageKeyHideEmptyGroups, + false + ), showAnnotations: initializeFromLocalStorage( localStorageKeyShowAnnotations, true @@ -106,7 +112,7 @@ export const initialState: Settings = { alertGroupsPerPage: initializeFromLocalStorage( localStorageKeyAlertGroupsPerPage, 10 - ), + ) }; export const settingsSlice = createSlice({ @@ -115,8 +121,8 @@ export const settingsSlice = createSlice({ reducers: { updateSettings: (state, { payload }: PayloadAction>) => { Object.assign(state, payload); - }, - }, + } + } }); export const { updateSettings } = settingsSlice.actions; From b3531a12f333cdf0b290f60898447eb8681f151a Mon Sep 17 00:00:00 2001 From: leonnicolas Date: Mon, 13 Jan 2025 09:37:44 +0100 Subject: [PATCH 1219/1397] Make "show empty pools" setting persistent Just like for showing empty groups on the Alerts page, also make the setting for showing empty pools on the Targets page persistent. Signed-off-by: leonnicolas --- .../src/components/SettingsMenu.tsx | 36 +++++++++++++------ web/ui/mantine-ui/src/pages/AlertsPage.tsx | 30 ++++++++-------- .../src/pages/targets/ScrapePoolsList.tsx | 21 ++++++----- .../src/state/localStorageMiddleware.ts | 5 +-- web/ui/mantine-ui/src/state/settingsSlice.ts | 14 +++++--- 5 files changed, 66 insertions(+), 40 deletions(-) diff --git a/web/ui/mantine-ui/src/components/SettingsMenu.tsx b/web/ui/mantine-ui/src/components/SettingsMenu.tsx index 79d9c201cc..d7fe458ca5 100644 --- a/web/ui/mantine-ui/src/components/SettingsMenu.tsx +++ b/web/ui/mantine-ui/src/components/SettingsMenu.tsx @@ -5,7 +5,7 @@ import { Checkbox, Stack, Group, - NumberInput + NumberInput, } from "@mantine/core"; import { IconSettings } from "@tabler/icons-react"; import { FC } from "react"; @@ -23,7 +23,8 @@ const SettingsMenu: FC = () => { showAnnotations, hideEmptyGroups, ruleGroupsPerPage, - alertGroupsPerPage + alertGroupsPerPage, + showEmptyPools, } = useSettings(); const dispatch = useAppDispatch(); @@ -49,7 +50,7 @@ const SettingsMenu: FC = () => { onChange={(event) => dispatch( updateSettings({ - useLocalTime: event.currentTarget.checked + useLocalTime: event.currentTarget.checked, }) ) } @@ -64,7 +65,7 @@ const SettingsMenu: FC = () => { onChange={(event) => dispatch( updateSettings({ - enableQueryHistory: event.currentTarget.checked + enableQueryHistory: event.currentTarget.checked, }) ) } @@ -75,7 +76,7 @@ const SettingsMenu: FC = () => { onChange={(event) => dispatch( updateSettings({ - enableAutocomplete: event.currentTarget.checked + enableAutocomplete: event.currentTarget.checked, }) ) } @@ -86,7 +87,7 @@ const SettingsMenu: FC = () => { onChange={(event) => dispatch( updateSettings({ - enableSyntaxHighlighting: event.currentTarget.checked + enableSyntaxHighlighting: event.currentTarget.checked, }) ) } @@ -97,13 +98,26 @@ const SettingsMenu: FC = () => { onChange={(event) => dispatch( updateSettings({ - enableLinter: event.currentTarget.checked + enableLinter: event.currentTarget.checked, }) ) } />
    +
    + + dispatch( + updateSettings({ + showEmptyPools: event.currentTarget.checked, + }) + ) + } + /> +
    @@ -115,7 +129,7 @@ const SettingsMenu: FC = () => { onChange={(event) => dispatch( updateSettings({ - showAnnotations: event.currentTarget.checked + showAnnotations: event.currentTarget.checked, }) ) } @@ -126,7 +140,7 @@ const SettingsMenu: FC = () => { onChange={(event) => dispatch( updateSettings({ - hideEmptyGroups: event.currentTarget.checked + hideEmptyGroups: event.currentTarget.checked, }) ) } @@ -146,7 +160,7 @@ const SettingsMenu: FC = () => { dispatch( updateSettings({ - alertGroupsPerPage: value + alertGroupsPerPage: value, }) ); }} @@ -165,7 +179,7 @@ const SettingsMenu: FC = () => { dispatch( updateSettings({ - ruleGroupsPerPage: value + ruleGroupsPerPage: value, }) ); }} diff --git a/web/ui/mantine-ui/src/pages/AlertsPage.tsx b/web/ui/mantine-ui/src/pages/AlertsPage.tsx index 33e170556d..69c4d7f130 100644 --- a/web/ui/mantine-ui/src/pages/AlertsPage.tsx +++ b/web/ui/mantine-ui/src/pages/AlertsPage.tsx @@ -11,7 +11,7 @@ import { Alert, TextInput, Anchor, - Pagination + Pagination, } from "@mantine/core"; import { useSuspenseAPIQuery } from "../api/api"; import { AlertingRule, AlertingRulesResult } from "../api/responseTypes/rules"; @@ -30,7 +30,7 @@ import { NumberParam, StringParam, useQueryParam, - withDefault + withDefault, } from "use-query-params"; import { useDebouncedValue } from "@mantine/hooks"; import { KVSearch } from "@nexucis/kvsearch"; @@ -67,7 +67,7 @@ type AlertsPageData = { const kvSearch = new KVSearch({ shouldSort: true, - indexedKeys: ["name", "labels", ["labels", /.*/]] + indexedKeys: ["name", "labels", ["labels", /.*/]], }); const buildAlertsPageData = ( @@ -79,9 +79,9 @@ const buildAlertsPageData = ( globalCounts: { inactive: 0, pending: 0, - firing: 0 + firing: 0, }, - groups: [] + groups: [], }; for (const group of data.groups) { @@ -89,7 +89,7 @@ const buildAlertsPageData = ( total: 0, inactive: 0, pending: 0, - firing: 0 + firing: 0, }; for (const r of group.rules) { @@ -126,9 +126,9 @@ const buildAlertsPageData = ( rule: r, counts: { firing: r.alerts.filter((a) => a.state === "firing").length, - pending: r.alerts.filter((a) => a.state === "pending").length - } - })) + pending: r.alerts.filter((a) => a.state === "pending").length, + }, + })), }); } @@ -146,8 +146,8 @@ export default function AlertsPage() { const { data } = useSuspenseAPIQuery({ path: `/rules`, params: { - type: "alert" - } + type: "alert", + }, }); const { hideEmptyGroups, showAnnotations } = useSettings(); @@ -255,7 +255,7 @@ export default function AlertsPage() { onClick={() => { dispatch( updateSettings({ - hideEmptyGroups: true + hideEmptyGroups: true, }) ); }} @@ -273,7 +273,7 @@ export default function AlertsPage() { onClick={() => { dispatch( updateSettings({ - hideEmptyGroups: true + hideEmptyGroups: true, }) ); }} @@ -295,8 +295,8 @@ export default function AlertsPage() { // have a different background color than their surrounding group card in dark mode, // but it would be better to use CSS to override the light/dark colors for // collapsed/expanded accordion items. - backgroundColor: "#c0c0c015" - } + backgroundColor: "#c0c0c015", + }, }} key={j} value={j.toString()} diff --git a/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx b/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx index be91b149bb..68c1db184b 100644 --- a/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx +++ b/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx @@ -30,6 +30,7 @@ import { setCollapsedPools, setShowLimitAlert, } from "../../state/targetsPageSlice"; +import { useSettings, updateSettings } from "../../state/settingsSlice"; import EndpointLink from "../../components/EndpointLink"; import CustomInfiniteScroll from "../../components/CustomInfiniteScroll"; @@ -38,7 +39,6 @@ import panelClasses from "../../Panel.module.css"; import TargetLabels from "./TargetLabels"; import { useDebouncedValue } from "@mantine/hooks"; import { targetPoolDisplayLimit } from "./TargetsPage"; -import { BooleanParam, useQueryParam, withDefault } from "use-query-params"; import { badgeIconStyle } from "../../styles"; type ScrapePool = { @@ -164,11 +164,8 @@ const ScrapePoolList: FC = ({ }, }); + const { showEmptyPools } = useSettings(); const dispatch = useAppDispatch(); - const [showEmptyPools, setShowEmptyPools] = useQueryParam( - "showEmptyPools", - withDefault(BooleanParam, true) - ); const { collapsedPools, showLimitAlert } = useAppSelector( (state) => state.targetsPage @@ -207,7 +204,11 @@ const ScrapePoolList: FC = ({ > Hiding {allPoolNames.length - shownPoolNames.length} empty pools due to filters or no targets. - setShowEmptyPools(true)}> + dispatch(updateSettings({ showEmptyPools: true }))} + > Show empty pools @@ -281,7 +282,9 @@ const ScrapePoolList: FC = ({ setShowEmptyPools(false)} + onClick={() => + dispatch(updateSettings({ showEmptyPools: false })) + } > Hide empty pools @@ -293,7 +296,9 @@ const ScrapePoolList: FC = ({ setShowEmptyPools(false)} + onClick={() => + dispatch(updateSettings({ showEmptyPools: false })) + } > Hide empty pools diff --git a/web/ui/mantine-ui/src/state/localStorageMiddleware.ts b/web/ui/mantine-ui/src/state/localStorageMiddleware.ts index b3dc70662b..e27e858444 100644 --- a/web/ui/mantine-ui/src/state/localStorageMiddleware.ts +++ b/web/ui/mantine-ui/src/state/localStorageMiddleware.ts @@ -37,7 +37,7 @@ startAppListening({ effect: ({ payload }) => { persistToLocalStorage( localStorageKeyServiceDiscoveryPageCollapsedPools, - payload, + payload ); }, }); @@ -47,7 +47,7 @@ startAppListening({ effect: (_, { getState }) => { persistToLocalStorage( localStorageKeyQueryHistory, - getState().queryPage.queryHistory, + getState().queryPage.queryHistory ); }, }); @@ -65,6 +65,7 @@ startAppListening({ case "hideEmptyGroups": case "showAnnotations": case "ruleGroupsPerPage": + case "showEmptyPools": return persistToLocalStorage(`settings.${key}`, value); } }); diff --git a/web/ui/mantine-ui/src/state/settingsSlice.ts b/web/ui/mantine-ui/src/state/settingsSlice.ts index da1c1546bb..06b4c06ca0 100644 --- a/web/ui/mantine-ui/src/state/settingsSlice.ts +++ b/web/ui/mantine-ui/src/state/settingsSlice.ts @@ -17,6 +17,7 @@ interface Settings { showAnnotations: boolean; ruleGroupsPerPage: number; alertGroupsPerPage: number; + showEmptyPools: boolean; } // Declared/defined in public/index.html, value replaced by Prometheus when serving bundle. @@ -35,6 +36,7 @@ export const localStorageKeyHideEmptyGroups = "settings.hideEmptyGroups"; export const localStorageKeyShowAnnotations = "settings.showAnnotations"; export const localStorageKeyRuleGroupsPerPage = "settings.ruleGroupsPerPage"; export const localStorageKeyAlertGroupsPerPage = "settings.alertGroupsPerPage"; +export const localStorageKeyShowEmptyPools = "settings.showEmptyPools"; // This dynamically/generically determines the pathPrefix by stripping the first known // endpoint suffix from the window location path. It works out of the box for both direct @@ -55,7 +57,7 @@ const getPathPrefix = (path: string) => { "/flags", "/config", "/alertmanager-discovery", - "/agent" + "/agent", ]; const pagePath = pagePaths.find((p) => path.endsWith(p)); @@ -112,7 +114,11 @@ export const initialState: Settings = { alertGroupsPerPage: initializeFromLocalStorage( localStorageKeyAlertGroupsPerPage, 10 - ) + ), + showEmptyPools: initializeFromLocalStorage( + localStorageKeyShowEmptyPools, + true + ), }; export const settingsSlice = createSlice({ @@ -121,8 +127,8 @@ export const settingsSlice = createSlice({ reducers: { updateSettings: (state, { payload }: PayloadAction>) => { Object.assign(state, payload); - } - } + }, + }, }); export const { updateSettings } = settingsSlice.actions; From 60b71108e5d4f9ebb763aaf3a30f86718b4323f2 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 13 Jan 2025 16:23:51 +0100 Subject: [PATCH 1220/1397] Merge two accidental "Alerts page settings" sections into one Signed-off-by: Julius Volz --- .../src/components/SettingsMenu.tsx | 56 +++++++++---------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/web/ui/mantine-ui/src/components/SettingsMenu.tsx b/web/ui/mantine-ui/src/components/SettingsMenu.tsx index aae38909d0..e5376eef2c 100644 --- a/web/ui/mantine-ui/src/components/SettingsMenu.tsx +++ b/web/ui/mantine-ui/src/components/SettingsMenu.tsx @@ -107,36 +107,36 @@ const SettingsMenu: FC = () => {
    - - dispatch( - updateSettings({ - showAnnotations: event.currentTarget.checked, - }) - ) - } - /> -
    -
    - { - if (typeof value !== "number") { - return; + + + dispatch( + updateSettings({ + showAnnotations: event.currentTarget.checked, + }) + ) } + /> + { + if (typeof value !== "number") { + return; + } - dispatch( - updateSettings({ - alertGroupsPerPage: value, - }) - ); - }} - /> + dispatch( + updateSettings({ + alertGroupsPerPage: value, + }) + ); + }} + /> +
    Date: Mon, 13 Jan 2025 17:56:11 +0100 Subject: [PATCH 1221/1397] Remove settings from setting menu Signed-off-by: leonnicolas --- .../src/components/SettingsMenu.tsx | 50 ++++--------------- web/ui/mantine-ui/src/pages/AlertsPage.tsx | 43 ++++++---------- .../src/pages/targets/ScrapePoolsList.tsx | 21 +++----- .../src/state/localStorageMiddleware.ts | 2 - web/ui/mantine-ui/src/state/settingsSlice.ts | 12 ----- 5 files changed, 33 insertions(+), 95 deletions(-) diff --git a/web/ui/mantine-ui/src/components/SettingsMenu.tsx b/web/ui/mantine-ui/src/components/SettingsMenu.tsx index d7fe458ca5..aae38909d0 100644 --- a/web/ui/mantine-ui/src/components/SettingsMenu.tsx +++ b/web/ui/mantine-ui/src/components/SettingsMenu.tsx @@ -21,10 +21,8 @@ const SettingsMenu: FC = () => { enableSyntaxHighlighting, enableLinter, showAnnotations, - hideEmptyGroups, ruleGroupsPerPage, alertGroupsPerPage, - showEmptyPools, } = useSettings(); const dispatch = useAppDispatch(); @@ -105,47 +103,21 @@ const SettingsMenu: FC = () => { />
    -
    - - dispatch( - updateSettings({ - showEmptyPools: event.currentTarget.checked, - }) - ) - } - /> -
    - - - dispatch( - updateSettings({ - showAnnotations: event.currentTarget.checked, - }) - ) - } - /> - - dispatch( - updateSettings({ - hideEmptyGroups: event.currentTarget.checked, - }) - ) - } - /> - + + dispatch( + updateSettings({ + showAnnotations: event.currentTarget.checked, + }) + ) + } + />
    (searchFilter.trim(), 250); + const [showEmptyGroups, setShowEmptyGroups] = useLocalStorage({ + key: "alerts-page-show-empty-groups", + defaultValue: true, + }); const { alertGroupsPerPage } = useSettings(); const [activePage, setActivePage] = useQueryParam( @@ -178,10 +181,10 @@ export default function AlertsPage() { const shownGroups = useMemo( () => - !hideEmptyGroups + showEmptyGroups ? alertsPageData.groups : alertsPageData.groups.filter((g) => g.rules.length > 0), - [alertsPageData.groups, hideEmptyGroups] + [alertsPageData.groups, showEmptyGroups] ); // If we were e.g. on page 10 and the number of total pages decreases to 5 (due to filtering @@ -252,13 +255,7 @@ export default function AlertsPage() { { - dispatch( - updateSettings({ - hideEmptyGroups: true, - }) - ); - }} + onClick={() => setShowEmptyGroups(false)} > Hide empty groups @@ -270,13 +267,7 @@ export default function AlertsPage() { { - dispatch( - updateSettings({ - hideEmptyGroups: true, - }) - ); - }} + onClick={() => setShowEmptyGroups(false)} > Hide empty groups @@ -412,7 +403,7 @@ export default function AlertsPage() { )} )), - [currentPageGroups, showAnnotations, dispatch] + [currentPageGroups, showAnnotations, setShowEmptyGroups] ); return ( @@ -451,7 +442,7 @@ export default function AlertsPage() { No rules found. ) : ( - hideEmptyGroups && + !showEmptyGroups && alertsPageData.groups.length !== shownGroups.length && ( Hiding {alertsPageData.groups.length - shownGroups.length} empty groups due to filters or no rules. - - dispatch(updateSettings({ hideEmptyGroups: false })) - } - > + setShowEmptyGroups(true)}> Show empty groups diff --git a/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx b/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx index 68c1db184b..f674a1f4d3 100644 --- a/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx +++ b/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx @@ -25,12 +25,12 @@ import { humanizeDuration, now, } from "../../lib/formatTime"; +import { useLocalStorage } from "@mantine/hooks"; import { useAppDispatch, useAppSelector } from "../../state/hooks"; import { setCollapsedPools, setShowLimitAlert, } from "../../state/targetsPageSlice"; -import { useSettings, updateSettings } from "../../state/settingsSlice"; import EndpointLink from "../../components/EndpointLink"; import CustomInfiniteScroll from "../../components/CustomInfiniteScroll"; @@ -164,8 +164,11 @@ const ScrapePoolList: FC = ({ }, }); - const { showEmptyPools } = useSettings(); const dispatch = useAppDispatch(); + const [showEmptyPools, setShowEmptyPools] = useLocalStorage({ + key: "targets-page-show-empty-pools", + defaultValue: true, + }); const { collapsedPools, showLimitAlert } = useAppSelector( (state) => state.targetsPage @@ -204,11 +207,7 @@ const ScrapePoolList: FC = ({ > Hiding {allPoolNames.length - shownPoolNames.length} empty pools due to filters or no targets. - dispatch(updateSettings({ showEmptyPools: true }))} - > + setShowEmptyPools(true)}> Show empty pools @@ -282,9 +281,7 @@ const ScrapePoolList: FC = ({ - dispatch(updateSettings({ showEmptyPools: false })) - } + onClick={() => setShowEmptyPools(false)} > Hide empty pools @@ -296,9 +293,7 @@ const ScrapePoolList: FC = ({ - dispatch(updateSettings({ showEmptyPools: false })) - } + onClick={() => setShowEmptyPools(false)} > Hide empty pools diff --git a/web/ui/mantine-ui/src/state/localStorageMiddleware.ts b/web/ui/mantine-ui/src/state/localStorageMiddleware.ts index e27e858444..79baa5ac64 100644 --- a/web/ui/mantine-ui/src/state/localStorageMiddleware.ts +++ b/web/ui/mantine-ui/src/state/localStorageMiddleware.ts @@ -62,10 +62,8 @@ startAppListening({ case "enableAutocomplete": case "enableSyntaxHighlighting": case "enableLinter": - case "hideEmptyGroups": case "showAnnotations": case "ruleGroupsPerPage": - case "showEmptyPools": return persistToLocalStorage(`settings.${key}`, value); } }); diff --git a/web/ui/mantine-ui/src/state/settingsSlice.ts b/web/ui/mantine-ui/src/state/settingsSlice.ts index 06b4c06ca0..c4154b7253 100644 --- a/web/ui/mantine-ui/src/state/settingsSlice.ts +++ b/web/ui/mantine-ui/src/state/settingsSlice.ts @@ -13,11 +13,9 @@ interface Settings { enableAutocomplete: boolean; enableSyntaxHighlighting: boolean; enableLinter: boolean; - hideEmptyGroups: boolean; showAnnotations: boolean; ruleGroupsPerPage: number; alertGroupsPerPage: number; - showEmptyPools: boolean; } // Declared/defined in public/index.html, value replaced by Prometheus when serving bundle. @@ -32,11 +30,9 @@ export const localStorageKeyEnableAutocomplete = "settings.enableAutocomplete"; export const localStorageKeyEnableSyntaxHighlighting = "settings.enableSyntaxHighlighting"; export const localStorageKeyEnableLinter = "settings.enableLinter"; -export const localStorageKeyHideEmptyGroups = "settings.hideEmptyGroups"; export const localStorageKeyShowAnnotations = "settings.showAnnotations"; export const localStorageKeyRuleGroupsPerPage = "settings.ruleGroupsPerPage"; export const localStorageKeyAlertGroupsPerPage = "settings.alertGroupsPerPage"; -export const localStorageKeyShowEmptyPools = "settings.showEmptyPools"; // This dynamically/generically determines the pathPrefix by stripping the first known // endpoint suffix from the window location path. It works out of the box for both direct @@ -99,10 +95,6 @@ export const initialState: Settings = { localStorageKeyEnableLinter, true ), - hideEmptyGroups: initializeFromLocalStorage( - localStorageKeyHideEmptyGroups, - false - ), showAnnotations: initializeFromLocalStorage( localStorageKeyShowAnnotations, true @@ -115,10 +107,6 @@ export const initialState: Settings = { localStorageKeyAlertGroupsPerPage, 10 ), - showEmptyPools: initializeFromLocalStorage( - localStorageKeyShowEmptyPools, - true - ), }; export const settingsSlice = createSlice({ From 7e27fd7417805c267a030dc12cde12174736b89b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:06:49 +0100 Subject: [PATCH 1222/1397] chore(deps-dev): bump vite from 6.0.3 to 6.0.7 in /web/ui (#15781) Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 6.0.3 to 6.0.7. - [Release notes](https://github.com/vitejs/vite/releases) - [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) - [Commits](https://github.com/vitejs/vite/commits/v6.0.7/packages/vite) --- updated-dependencies: - dependency-name: vite dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 253 ++++++++++++++++----------------- web/ui/package.json | 2 +- 3 files changed, 124 insertions(+), 133 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index c3f1e0fbfd..5f3d95a2e3 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -67,7 +67,7 @@ "postcss": "^8.4.47", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", - "vite": "^6.0.3", + "vite": "^6.0.7", "vitest": "^2.1.8" } } diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 6db3035c27..a29e4a3d02 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -19,7 +19,7 @@ "prettier": "^3.4.2", "ts-jest": "^29.2.2", "typescript": "^5.7.2", - "vite": "^6.0.3" + "vite": "^6.0.7" } }, "mantine-ui": { @@ -81,7 +81,7 @@ "postcss": "^8.4.47", "postcss-preset-mantine": "^1.17.0", "postcss-simple-vars": "^7.0.1", - "vite": "^6.0.3", + "vite": "^6.0.7", "vitest": "^2.1.8" } }, @@ -876,14 +876,13 @@ } }, "node_modules/@esbuild/aix-ppc64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.24.0.tgz", - "integrity": "sha512-WtKdFM7ls47zkKHFVzMz8opM7LkcsIp9amDUBIAWirg70RM71WRSjdILPsY5Uv1D42ZpUfaPILDlfactHgsRkw==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.24.2.tgz", + "integrity": "sha512-thpVCb/rhxE/BnMLQ7GReQLLN8q9qbHmI55F4489/ByVg2aQaQ6kbcLb6FHkocZzQhxc4gx0sCk0tJkKBFzDhA==", "cpu": [ "ppc64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "aix" @@ -893,14 +892,13 @@ } }, "node_modules/@esbuild/android-arm": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.24.0.tgz", - "integrity": "sha512-arAtTPo76fJ/ICkXWetLCc9EwEHKaeya4vMrReVlEIUCAUncH7M4bhMQ+M9Vf+FFOZJdTNMXNBrWwW+OXWpSew==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.24.2.tgz", + "integrity": "sha512-tmwl4hJkCfNHwFB3nBa8z1Uy3ypZpxqxfTQOcHX+xRByyYgunVbZ9MzUUfb0RxaHIMnbHagwAxuTL+tnNM+1/Q==", "cpu": [ "arm" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "android" @@ -910,14 +908,13 @@ } }, "node_modules/@esbuild/android-arm64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.24.0.tgz", - "integrity": "sha512-Vsm497xFM7tTIPYK9bNTYJyF/lsP590Qc1WxJdlB6ljCbdZKU9SY8i7+Iin4kyhV/KV5J2rOKsBQbB77Ab7L/w==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.24.2.tgz", + "integrity": "sha512-cNLgeqCqV8WxfcTIOeL4OAtSmL8JjcN6m09XIgro1Wi7cF4t/THaWEa7eL5CMoMBdjoHOTh/vwTO/o2TRXIyzg==", "cpu": [ "arm64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "android" @@ -927,14 +924,13 @@ } }, "node_modules/@esbuild/android-x64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.24.0.tgz", - "integrity": "sha512-t8GrvnFkiIY7pa7mMgJd7p8p8qqYIz1NYiAoKc75Zyv73L3DZW++oYMSHPRarcotTKuSs6m3hTOa5CKHaS02TQ==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.24.2.tgz", + "integrity": "sha512-B6Q0YQDqMx9D7rvIcsXfmJfvUYLoP722bgfBlO5cGvNVb5V/+Y7nhBE3mHV9OpxBf4eAS2S68KZztiPaWq4XYw==", "cpu": [ "x64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "android" @@ -944,14 +940,13 @@ } }, "node_modules/@esbuild/darwin-arm64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.24.0.tgz", - "integrity": "sha512-CKyDpRbK1hXwv79soeTJNHb5EiG6ct3efd/FTPdzOWdbZZfGhpbcqIpiD0+vwmpu0wTIL97ZRPZu8vUt46nBSw==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.24.2.tgz", + "integrity": "sha512-kj3AnYWc+CekmZnS5IPu9D+HWtUI49hbnyqk0FLEJDbzCIQt7hg7ucF1SQAilhtYpIujfaHr6O0UHlzzSPdOeA==", "cpu": [ "arm64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "darwin" @@ -961,14 +956,13 @@ } }, "node_modules/@esbuild/darwin-x64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.24.0.tgz", - "integrity": "sha512-rgtz6flkVkh58od4PwTRqxbKH9cOjaXCMZgWD905JOzjFKW+7EiUObfd/Kav+A6Gyud6WZk9w+xu6QLytdi2OA==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.24.2.tgz", + "integrity": "sha512-WeSrmwwHaPkNR5H3yYfowhZcbriGqooyu3zI/3GGpF8AyUdsrrP0X6KumITGA9WOyiJavnGZUwPGvxvwfWPHIA==", "cpu": [ "x64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "darwin" @@ -978,14 +972,13 @@ } }, "node_modules/@esbuild/freebsd-arm64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.24.0.tgz", - "integrity": "sha512-6Mtdq5nHggwfDNLAHkPlyLBpE5L6hwsuXZX8XNmHno9JuL2+bg2BX5tRkwjyfn6sKbxZTq68suOjgWqCicvPXA==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.24.2.tgz", + "integrity": "sha512-UN8HXjtJ0k/Mj6a9+5u6+2eZ2ERD7Edt1Q9IZiB5UZAIdPnVKDoG7mdTVGhHJIeEml60JteamR3qhsr1r8gXvg==", "cpu": [ "arm64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "freebsd" @@ -995,14 +988,13 @@ } }, "node_modules/@esbuild/freebsd-x64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.24.0.tgz", - "integrity": "sha512-D3H+xh3/zphoX8ck4S2RxKR6gHlHDXXzOf6f/9dbFt/NRBDIE33+cVa49Kil4WUjxMGW0ZIYBYtaGCa2+OsQwQ==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.24.2.tgz", + "integrity": "sha512-TvW7wE/89PYW+IevEJXZ5sF6gJRDY/14hyIGFXdIucxCsbRmLUcjseQu1SyTko+2idmCw94TgyaEZi9HUSOe3Q==", "cpu": [ "x64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "freebsd" @@ -1012,14 +1004,13 @@ } }, "node_modules/@esbuild/linux-arm": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.24.0.tgz", - "integrity": "sha512-gJKIi2IjRo5G6Glxb8d3DzYXlxdEj2NlkixPsqePSZMhLudqPhtZ4BUrpIuTjJYXxvF9njql+vRjB2oaC9XpBw==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.24.2.tgz", + "integrity": "sha512-n0WRM/gWIdU29J57hJyUdIsk0WarGd6To0s+Y+LwvlC55wt+GT/OgkwoXCXvIue1i1sSNWblHEig00GBWiJgfA==", "cpu": [ "arm" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" @@ -1029,14 +1020,13 @@ } }, "node_modules/@esbuild/linux-arm64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.24.0.tgz", - "integrity": "sha512-TDijPXTOeE3eaMkRYpcy3LarIg13dS9wWHRdwYRnzlwlA370rNdZqbcp0WTyyV/k2zSxfko52+C7jU5F9Tfj1g==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.24.2.tgz", + "integrity": "sha512-7HnAD6074BW43YvvUmE/35Id9/NB7BeX5EoNkK9obndmZBUk8xmJJeU7DwmUeN7tkysslb2eSl6CTrYz6oEMQg==", "cpu": [ "arm64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" @@ -1046,14 +1036,13 @@ } }, "node_modules/@esbuild/linux-ia32": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.24.0.tgz", - "integrity": "sha512-K40ip1LAcA0byL05TbCQ4yJ4swvnbzHscRmUilrmP9Am7//0UjPreh4lpYzvThT2Quw66MhjG//20mrufm40mA==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.24.2.tgz", + "integrity": "sha512-sfv0tGPQhcZOgTKO3oBE9xpHuUqguHvSo4jl+wjnKwFpapx+vUDcawbwPNuBIAYdRAvIDBfZVvXprIj3HA+Ugw==", "cpu": [ "ia32" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" @@ -1063,14 +1052,13 @@ } }, "node_modules/@esbuild/linux-loong64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.24.0.tgz", - "integrity": "sha512-0mswrYP/9ai+CU0BzBfPMZ8RVm3RGAN/lmOMgW4aFUSOQBjA31UP8Mr6DDhWSuMwj7jaWOT0p0WoZ6jeHhrD7g==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.24.2.tgz", + "integrity": "sha512-CN9AZr8kEndGooS35ntToZLTQLHEjtVB5n7dl8ZcTZMonJ7CCfStrYhrzF97eAecqVbVJ7APOEe18RPI4KLhwQ==", "cpu": [ "loong64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" @@ -1080,14 +1068,13 @@ } }, "node_modules/@esbuild/linux-mips64el": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.24.0.tgz", - "integrity": "sha512-hIKvXm0/3w/5+RDtCJeXqMZGkI2s4oMUGj3/jM0QzhgIASWrGO5/RlzAzm5nNh/awHE0A19h/CvHQe6FaBNrRA==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.24.2.tgz", + "integrity": "sha512-iMkk7qr/wl3exJATwkISxI7kTcmHKE+BlymIAbHO8xanq/TjHaaVThFF6ipWzPHryoFsesNQJPE/3wFJw4+huw==", "cpu": [ "mips64el" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" @@ -1097,14 +1084,13 @@ } }, "node_modules/@esbuild/linux-ppc64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.24.0.tgz", - "integrity": "sha512-HcZh5BNq0aC52UoocJxaKORfFODWXZxtBaaZNuN3PUX3MoDsChsZqopzi5UupRhPHSEHotoiptqikjN/B77mYQ==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.24.2.tgz", + "integrity": "sha512-shsVrgCZ57Vr2L8mm39kO5PPIb+843FStGt7sGGoqiiWYconSxwTiuswC1VJZLCjNiMLAMh34jg4VSEQb+iEbw==", "cpu": [ "ppc64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" @@ -1114,14 +1100,13 @@ } }, "node_modules/@esbuild/linux-riscv64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.24.0.tgz", - "integrity": "sha512-bEh7dMn/h3QxeR2KTy1DUszQjUrIHPZKyO6aN1X4BCnhfYhuQqedHaa5MxSQA/06j3GpiIlFGSsy1c7Gf9padw==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.24.2.tgz", + "integrity": "sha512-4eSFWnU9Hhd68fW16GD0TINewo1L6dRrB+oLNNbYyMUAeOD2yCK5KXGK1GH4qD/kT+bTEXjsyTCiJGHPZ3eM9Q==", "cpu": [ "riscv64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" @@ -1131,14 +1116,13 @@ } }, "node_modules/@esbuild/linux-s390x": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.24.0.tgz", - "integrity": "sha512-ZcQ6+qRkw1UcZGPyrCiHHkmBaj9SiCD8Oqd556HldP+QlpUIe2Wgn3ehQGVoPOvZvtHm8HPx+bH20c9pvbkX3g==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.24.2.tgz", + "integrity": "sha512-S0Bh0A53b0YHL2XEXC20bHLuGMOhFDO6GN4b3YjRLK//Ep3ql3erpNcPlEFed93hsQAjAQDNsvcK+hV90FubSw==", "cpu": [ "s390x" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" @@ -1148,14 +1132,13 @@ } }, "node_modules/@esbuild/linux-x64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.24.0.tgz", - "integrity": "sha512-vbutsFqQ+foy3wSSbmjBXXIJ6PL3scghJoM8zCL142cGaZKAdCZHyf+Bpu/MmX9zT9Q0zFBVKb36Ma5Fzfa8xA==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.24.2.tgz", + "integrity": "sha512-8Qi4nQcCTbLnK9WoMjdC9NiTG6/E38RNICU6sUNqK0QFxCYgoARqVqxdFmWkdonVsvGqWhmm7MO0jyTqLqwj0Q==", "cpu": [ "x64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "linux" @@ -1164,15 +1147,30 @@ "node": ">=18" } }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.24.2.tgz", + "integrity": "sha512-wuLK/VztRRpMt9zyHSazyCVdCXlpHkKm34WUyinD2lzK07FAHTq0KQvZZlXikNWkDGoT6x3TD51jKQ7gMVpopw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, "node_modules/@esbuild/netbsd-x64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.24.0.tgz", - "integrity": "sha512-hjQ0R/ulkO8fCYFsG0FZoH+pWgTTDreqpqY7UnQntnaKv95uP5iW3+dChxnx7C3trQQU40S+OgWhUVwCjVFLvg==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.24.2.tgz", + "integrity": "sha512-VefFaQUc4FMmJuAxmIHgUmfNiLXY438XrL4GDNV1Y1H/RW3qow68xTwjZKfj/+Plp9NANmzbH5R40Meudu8mmw==", "cpu": [ "x64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "netbsd" @@ -1182,14 +1180,13 @@ } }, "node_modules/@esbuild/openbsd-arm64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.24.0.tgz", - "integrity": "sha512-MD9uzzkPQbYehwcN583yx3Tu5M8EIoTD+tUgKF982WYL9Pf5rKy9ltgD0eUgs8pvKnmizxjXZyLt0z6DC3rRXg==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.24.2.tgz", + "integrity": "sha512-YQbi46SBct6iKnszhSvdluqDmxCJA+Pu280Av9WICNwQmMxV7nLRHZfjQzwbPs3jeWnuAhE9Jy0NrnJ12Oz+0A==", "cpu": [ "arm64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "openbsd" @@ -1199,14 +1196,13 @@ } }, "node_modules/@esbuild/openbsd-x64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.24.0.tgz", - "integrity": "sha512-4ir0aY1NGUhIC1hdoCzr1+5b43mw99uNwVzhIq1OY3QcEwPDO3B7WNXBzaKY5Nsf1+N11i1eOfFcq+D/gOS15Q==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.24.2.tgz", + "integrity": "sha512-+iDS6zpNM6EnJyWv0bMGLWSWeXGN/HTaF/LXHXHwejGsVi+ooqDfMCCTerNFxEkM3wYVcExkeGXNqshc9iMaOA==", "cpu": [ "x64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "openbsd" @@ -1216,14 +1212,13 @@ } }, "node_modules/@esbuild/sunos-x64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.24.0.tgz", - "integrity": "sha512-jVzdzsbM5xrotH+W5f1s+JtUy1UWgjU0Cf4wMvffTB8m6wP5/kx0KiaLHlbJO+dMgtxKV8RQ/JvtlFcdZ1zCPA==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.24.2.tgz", + "integrity": "sha512-hTdsW27jcktEvpwNHJU4ZwWFGkz2zRJUz8pvddmXPtXDzVKTTINmlmga3ZzwcuMpUvLw7JkLy9QLKyGpD2Yxig==", "cpu": [ "x64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "sunos" @@ -1233,14 +1228,13 @@ } }, "node_modules/@esbuild/win32-arm64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.24.0.tgz", - "integrity": "sha512-iKc8GAslzRpBytO2/aN3d2yb2z8XTVfNV0PjGlCxKo5SgWmNXx82I/Q3aG1tFfS+A2igVCY97TJ8tnYwpUWLCA==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.24.2.tgz", + "integrity": "sha512-LihEQ2BBKVFLOC9ZItT9iFprsE9tqjDjnbulhHoFxYQtQfai7qfluVODIYxt1PgdoyQkz23+01rzwNwYfutxUQ==", "cpu": [ "arm64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "win32" @@ -1250,14 +1244,13 @@ } }, "node_modules/@esbuild/win32-ia32": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.24.0.tgz", - "integrity": "sha512-vQW36KZolfIudCcTnaTpmLQ24Ha1RjygBo39/aLkM2kmjkWmZGEJ5Gn9l5/7tzXA42QGIoWbICfg6KLLkIw6yw==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.24.2.tgz", + "integrity": "sha512-q+iGUwfs8tncmFC9pcnD5IvRHAzmbwQ3GPS5/ceCyHdjXubwQWI12MKWSNSMYLJMq23/IUCvJMS76PDqXe1fxA==", "cpu": [ "ia32" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "win32" @@ -1267,14 +1260,13 @@ } }, "node_modules/@esbuild/win32-x64": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.24.0.tgz", - "integrity": "sha512-7IAFPrjSQIJrGsK6flwg7NFmwBoSTyF3rl7If0hNUFQU4ilTsEPL6GuMuU9BfIWVVGuRnuIidkSMC+c0Otu8IA==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.24.2.tgz", + "integrity": "sha512-7VTgWzgMGvup6aSqDPLiW5zHaxYJGTO4OokMjIlrCtf+VpEL+cXKtCvg723iguPYI5oaUNdS+/V7OU2gvXVWEg==", "cpu": [ "x64" ], "dev": true, - "license": "MIT", "optional": true, "os": [ "win32" @@ -4474,12 +4466,11 @@ "license": "MIT" }, "node_modules/esbuild": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.24.0.tgz", - "integrity": "sha512-FuLPevChGDshgSicjisSooU0cemp/sGXR841D5LHMB7mTVOmsEHcAxaH3irL53+8YDIeVNQEySh4DaYU/iuPqQ==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.24.2.tgz", + "integrity": "sha512-+9egpBW8I3CD5XPe0n6BfT5fxLzxrlDzqydF3aviG+9ni1lDC/OvMHcxqEFV0+LANZG5R1bFMWfUrjVsdwxJvA==", "dev": true, "hasInstallScript": true, - "license": "MIT", "bin": { "esbuild": "bin/esbuild" }, @@ -4487,30 +4478,31 @@ "node": ">=18" }, "optionalDependencies": { - "@esbuild/aix-ppc64": "0.24.0", - "@esbuild/android-arm": "0.24.0", - "@esbuild/android-arm64": "0.24.0", - "@esbuild/android-x64": "0.24.0", - "@esbuild/darwin-arm64": "0.24.0", - "@esbuild/darwin-x64": "0.24.0", - "@esbuild/freebsd-arm64": "0.24.0", - "@esbuild/freebsd-x64": "0.24.0", - "@esbuild/linux-arm": "0.24.0", - "@esbuild/linux-arm64": "0.24.0", - "@esbuild/linux-ia32": "0.24.0", - "@esbuild/linux-loong64": "0.24.0", - "@esbuild/linux-mips64el": "0.24.0", - "@esbuild/linux-ppc64": "0.24.0", - "@esbuild/linux-riscv64": "0.24.0", - "@esbuild/linux-s390x": "0.24.0", - "@esbuild/linux-x64": "0.24.0", - "@esbuild/netbsd-x64": "0.24.0", - "@esbuild/openbsd-arm64": "0.24.0", - "@esbuild/openbsd-x64": "0.24.0", - "@esbuild/sunos-x64": "0.24.0", - "@esbuild/win32-arm64": "0.24.0", - "@esbuild/win32-ia32": "0.24.0", - "@esbuild/win32-x64": "0.24.0" + "@esbuild/aix-ppc64": "0.24.2", + "@esbuild/android-arm": "0.24.2", + "@esbuild/android-arm64": "0.24.2", + "@esbuild/android-x64": "0.24.2", + "@esbuild/darwin-arm64": "0.24.2", + "@esbuild/darwin-x64": "0.24.2", + "@esbuild/freebsd-arm64": "0.24.2", + "@esbuild/freebsd-x64": "0.24.2", + "@esbuild/linux-arm": "0.24.2", + "@esbuild/linux-arm64": "0.24.2", + "@esbuild/linux-ia32": "0.24.2", + "@esbuild/linux-loong64": "0.24.2", + "@esbuild/linux-mips64el": "0.24.2", + "@esbuild/linux-ppc64": "0.24.2", + "@esbuild/linux-riscv64": "0.24.2", + "@esbuild/linux-s390x": "0.24.2", + "@esbuild/linux-x64": "0.24.2", + "@esbuild/netbsd-arm64": "0.24.2", + "@esbuild/netbsd-x64": "0.24.2", + "@esbuild/openbsd-arm64": "0.24.2", + "@esbuild/openbsd-x64": "0.24.2", + "@esbuild/sunos-x64": "0.24.2", + "@esbuild/win32-arm64": "0.24.2", + "@esbuild/win32-ia32": "0.24.2", + "@esbuild/win32-x64": "0.24.2" } }, "node_modules/escalade": { @@ -9164,13 +9156,12 @@ } }, "node_modules/vite": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/vite/-/vite-6.0.3.tgz", - "integrity": "sha512-Cmuo5P0ENTN6HxLSo6IHsjCLn/81Vgrp81oaiFFMRa8gGDj5xEjIcEpf2ZymZtZR8oU0P2JX5WuUp/rlXcHkAw==", + "version": "6.0.7", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.0.7.tgz", + "integrity": "sha512-RDt8r/7qx9940f8FcOIAH9PTViRrghKaK2K1jY3RaAURrEUbm9Du1mJ72G+jlhtG3WwodnfzY8ORQZbBavZEAQ==", "dev": true, - "license": "MIT", "dependencies": { - "esbuild": "^0.24.0", + "esbuild": "^0.24.2", "postcss": "^8.4.49", "rollup": "^4.23.0" }, diff --git a/web/ui/package.json b/web/ui/package.json index 62b3f26246..10ba19538f 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -22,6 +22,6 @@ "prettier": "^3.4.2", "ts-jest": "^29.2.2", "typescript": "^5.7.2", - "vite": "^6.0.3" + "vite": "^6.0.7" } } From 1542e73b663be8507253dec920437647107de5fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:07:29 +0100 Subject: [PATCH 1223/1397] chore(deps): bump github.com/prometheus/sigv4 from 0.1.0 to 0.1.1 (#15764) Bumps [github.com/prometheus/sigv4](https://github.com/prometheus/sigv4) from 0.1.0 to 0.1.1. - [Release notes](https://github.com/prometheus/sigv4/releases) - [Changelog](https://github.com/prometheus/sigv4/blob/main/RELEASE.md) - [Commits](https://github.com/prometheus/sigv4/compare/v0.1.0...v0.1.1) --- updated-dependencies: - dependency-name: github.com/prometheus/sigv4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 01af5f1c8b..941e6439d4 100644 --- a/go.mod +++ b/go.mod @@ -55,7 +55,7 @@ require ( github.com/prometheus/common v0.61.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/exporter-toolkit v0.13.2 - github.com/prometheus/sigv4 v0.1.0 + github.com/prometheus/sigv4 v0.1.1 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.10.0 diff --git a/go.sum b/go.sum index 72abefc4c3..bc4fa905c8 100644 --- a/go.sum +++ b/go.sum @@ -437,8 +437,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/sigv4 v0.1.0 h1:FgxH+m1qf9dGQ4w8Dd6VkthmpFQfGTzUeavMoQeG1LA= -github.com/prometheus/sigv4 v0.1.0/go.mod h1:doosPW9dOitMzYe2I2BN0jZqUuBrGPbXrNsTScN18iU= +github.com/prometheus/sigv4 v0.1.1 h1:UJxjOqVcXctZlwDjpUpZ2OiMWJdFijgSofwLzO1Xk0Q= +github.com/prometheus/sigv4 v0.1.1/go.mod h1:RAmWVKqx0bwi0Qm4lrKMXFM0nhpesBcenfCtz9qRyH8= github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= From 042cea1e18ce6332a88a3146e483292bc5212420 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:08:03 +0100 Subject: [PATCH 1224/1397] chore(deps): bump google.golang.org/protobuf from 1.36.0 to 1.36.1 (#15767) Bumps google.golang.org/protobuf from 1.36.0 to 1.36.1. --- updated-dependencies: - dependency-name: google.golang.org/protobuf dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 941e6439d4..8b1f8a1c83 100644 --- a/go.mod +++ b/go.mod @@ -82,7 +82,7 @@ require ( google.golang.org/api v0.213.0 google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 google.golang.org/grpc v1.69.0 - google.golang.org/protobuf v1.36.0 + google.golang.org/protobuf v1.36.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.3 diff --git a/go.sum b/go.sum index bc4fa905c8..fa2eb22242 100644 --- a/go.sum +++ b/go.sum @@ -650,8 +650,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI= google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= -google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ= -google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 337c7394181fe975c8359f7904b5f6fdec6c52fd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:11:53 +0100 Subject: [PATCH 1225/1397] chore(deps): bump github.com/linode/linodego from 1.43.0 to 1.44.0 (#15770) Bumps [github.com/linode/linodego](https://github.com/linode/linodego) from 1.43.0 to 1.44.0. - [Release notes](https://github.com/linode/linodego/releases) - [Commits](https://github.com/linode/linodego/compare/v1.43.0...v1.44.0) --- updated-dependencies: - dependency-name: github.com/linode/linodego dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 8b1f8a1c83..6ae5ba9675 100644 --- a/go.mod +++ b/go.mod @@ -41,7 +41,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.43.0 + github.com/linode/linodego v1.44.0 github.com/miekg/dns v1.1.62 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f @@ -127,7 +127,7 @@ require ( github.com/go-openapi/spec v0.20.14 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/validate v0.23.0 // indirect - github.com/go-resty/resty/v2 v2.15.3 // indirect + github.com/go-resty/resty/v2 v2.16.2 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/glog v1.2.2 // indirect diff --git a/go.sum b/go.sum index fa2eb22242..b264521043 100644 --- a/go.sum +++ b/go.sum @@ -160,8 +160,8 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= -github.com/go-resty/resty/v2 v2.15.3 h1:bqff+hcqAflpiF591hhJzNdkRsFhlB96CYfBwSFvql8= -github.com/go-resty/resty/v2 v2.15.3/go.mod h1:0fHAoK7JoBy/Ch36N8VFeMsK7xQOHhvWaC3iOktwmIU= +github.com/go-resty/resty/v2 v2.16.2 h1:CpRqTjIzq/rweXUt9+GxzzQdlkqMdt8Lm/fuK/CAbAg= +github.com/go-resty/resty/v2 v2.16.2/go.mod h1:0fHAoK7JoBy/Ch36N8VFeMsK7xQOHhvWaC3iOktwmIU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -317,8 +317,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linode/linodego v1.43.0 h1:sGeBB3caZt7vKBoPS5p4AVzmlG4JoqQOdigIibx3egk= -github.com/linode/linodego v1.43.0/go.mod h1:n4TMFu1UVNala+icHqrTEFFaicYSF74cSAUG5zkTwfA= +github.com/linode/linodego v1.44.0 h1:JZLLWzCAx3CmHSV9NmCoXisuqKtrmPhfY9MrgvaHMUY= +github.com/linode/linodego v1.44.0/go.mod h1:umdoNOmtbqAdGQbmQnPFZ2YS4US+/mU/1bA7MjoKAvg= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= From 3351e79a504724782168e61cc7ba84985670ddf1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:12:25 +0100 Subject: [PATCH 1226/1397] chore(deps-dev): bump @types/node in /web/ui/react-app (#15768) Bumps [@types/node](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/node) from 22.10.2 to 22.10.3. - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/node) --- updated-dependencies: - dependency-name: "@types/node" dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 9 ++++----- web/ui/react-app/package.json | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 6dec476163..f06b873077 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -53,7 +53,7 @@ "@types/flot": "0.0.36", "@types/jest": "^29.5.14", "@types/jquery": "^3.5.32", - "@types/node": "^22.10.2", + "@types/node": "^22.10.3", "@types/react": "^17.0.71", "@types/react-copy-to-clipboard": "^5.0.7", "@types/react-dom": "^17.0.25", @@ -5403,10 +5403,9 @@ "dev": true }, "node_modules/@types/node": { - "version": "22.10.2", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.10.2.tgz", - "integrity": "sha512-Xxr6BBRCAOQixvonOye19wnzyDiUtTeqldOOmj3CkeblonbccA12PFwlufvRdrpjXxqnmUaeiU5EOA+7s5diUQ==", - "license": "MIT", + "version": "22.10.3", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.10.3.tgz", + "integrity": "sha512-DifAyw4BkrufCILvD3ucnuN8eydUfc/C1GlyrnI+LK6543w5/L3VeVgf05o3B4fqSXP1dKYLOZsKfutpxPzZrw==", "dependencies": { "undici-types": "~6.20.0" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index a801be62d7..685db2592b 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -70,7 +70,7 @@ "@types/flot": "0.0.36", "@types/jest": "^29.5.14", "@types/jquery": "^3.5.32", - "@types/node": "^22.10.2", + "@types/node": "^22.10.3", "@types/react": "^17.0.71", "@types/react-copy-to-clipboard": "^5.0.7", "@types/react-dom": "^17.0.25", From 2a5cd8179dfa2e6871f314d423f5ba1bd4663ace Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:13:12 +0100 Subject: [PATCH 1227/1397] chore(deps): bump github/codeql-action from 3.27.7 to 3.28.0 (#15759) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.27.7 to 3.28.0. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/babb554ede22fd5605947329c4d04d8e7a0b8155...48ab28a6f5dbc2a99bf1e0131198dd8f1df78169) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 56839da9cb..9a522fb6c9 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,12 +27,12 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Initialize CodeQL - uses: github/codeql-action/init@babb554ede22fd5605947329c4d04d8e7a0b8155 # v3.27.7 + uses: github/codeql-action/init@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@babb554ede22fd5605947329c4d04d8e7a0b8155 # v3.27.7 + uses: github/codeql-action/autobuild@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@babb554ede22fd5605947329c4d04d8e7a0b8155 # v3.27.7 + uses: github/codeql-action/analyze@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index edb347abea..212c2cd617 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@babb554ede22fd5605947329c4d04d8e7a0b8155 # tag=v3.27.7 + uses: github/codeql-action/upload-sarif@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # tag=v3.28.0 with: sarif_file: results.sarif From ebb3c4e35f705f19aeeb2646cc81ba3328079667 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:13:29 +0100 Subject: [PATCH 1228/1397] chore(deps): bump actions/upload-artifact from 4.4.3 to 4.5.0 (#15757) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.4.3 to 4.5.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882...6f51ac03b9356f520e9adb1b1b7802705f340c2b) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/fuzzing.yml | 2 +- .github/workflows/scorecards.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index 5f1b0f25ce..9f5ef96296 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -21,7 +21,7 @@ jobs: fuzz-seconds: 600 dry-run: false - name: Upload Crash - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0 if: failure() && steps.build.outcome == 'success' with: name: artifacts diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 212c2cd617..1415fead8e 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -37,7 +37,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # tag=v4.4.3 + uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # tag=v4.5.0 with: name: SARIF file path: results.sarif From cb0dc8bc555a1d10a1ece8175b4aa85b42437b11 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:13:47 +0100 Subject: [PATCH 1229/1397] chore(deps): bump bufbuild/buf-setup-action from 1.47.2 to 1.48.0 (#15756) Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.47.2 to 1.48.0. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/9672cee01808979ea1249f81d6d321217b9a10f6...1115d0acd3d2a120b30023fac52abc46807c8fd6) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index d03f190769..31e4d7ee2a 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: bufbuild/buf-setup-action@9672cee01808979ea1249f81d6d321217b9a10f6 # v1.47.2 + - uses: bufbuild/buf-setup-action@1115d0acd3d2a120b30023fac52abc46807c8fd6 # v1.48.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index bf8ae3f6a4..b72b927bb0 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -13,7 +13,7 @@ jobs: if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: bufbuild/buf-setup-action@9672cee01808979ea1249f81d6d321217b9a10f6 # v1.47.2 + - uses: bufbuild/buf-setup-action@1115d0acd3d2a120b30023fac52abc46807c8fd6 # v1.48.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 From f5976c0afb4f4aa5d3e878d05b43edcbea527198 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:14:43 +0100 Subject: [PATCH 1230/1397] chore(deps): bump sass from 1.82.0 to 1.83.0 in /web/ui/react-app (#15762) Bumps [sass](https://github.com/sass/dart-sass) from 1.82.0 to 1.83.0. - [Release notes](https://github.com/sass/dart-sass/releases) - [Changelog](https://github.com/sass/dart-sass/blob/main/CHANGELOG.md) - [Commits](https://github.com/sass/dart-sass/compare/1.82.0...1.83.0) --- updated-dependencies: - dependency-name: sass dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 9 ++++----- web/ui/react-app/package.json | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index f06b873077..ece343d719 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -43,7 +43,7 @@ "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", "sanitize-html": "^2.13.1", - "sass": "1.82.0", + "sass": "1.83.0", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" }, @@ -21202,10 +21202,9 @@ "dev": true }, "node_modules/sass": { - "version": "1.82.0", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.82.0.tgz", - "integrity": "sha512-j4GMCTa8elGyN9A7x7bEglx0VgSpNUG4W4wNedQ33wSMdnkqQCT8HTwOaVSV4e6yQovcu/3Oc4coJP/l0xhL2Q==", - "license": "MIT", + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.83.0.tgz", + "integrity": "sha512-qsSxlayzoOjdvXMVLkzF84DJFc2HZEL/rFyGIKbbilYtAvlCxyuzUeff9LawTn4btVnLKg75Z8MMr1lxU1lfGw==", "dependencies": { "chokidar": "^4.0.0", "immutable": "^5.0.2", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 685db2592b..4e699abfb9 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -38,7 +38,7 @@ "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", "sanitize-html": "^2.13.1", - "sass": "1.82.0", + "sass": "1.83.0", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" }, From 0bc95c388f4b6c7d083611a02bbfe027af7100a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:46:39 +0100 Subject: [PATCH 1231/1397] chore(deps): bump google.golang.org/api from 0.213.0 to 0.216.0 (#15812) Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.213.0 to 0.216.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.213.0...v0.216.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 14 +++++++------- go.sum | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index 6ae5ba9675..ee88a0e5f9 100644 --- a/go.mod +++ b/go.mod @@ -74,14 +74,14 @@ require ( go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 - golang.org/x/oauth2 v0.24.0 + golang.org/x/oauth2 v0.25.0 golang.org/x/sync v0.10.0 golang.org/x/sys v0.28.0 golang.org/x/text v0.21.0 golang.org/x/tools v0.28.0 - google.golang.org/api v0.213.0 + google.golang.org/api v0.216.0 google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 - google.golang.org/grpc v1.69.0 + google.golang.org/grpc v1.69.2 google.golang.org/protobuf v1.36.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -137,7 +137,7 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect - github.com/googleapis/gax-go/v2 v2.14.0 // indirect + github.com/googleapis/gax-go/v2 v2.14.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect @@ -189,10 +189,10 @@ require ( golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.32.0 // indirect + golang.org/x/net v0.33.0 // indirect golang.org/x/term v0.27.0 // indirect - golang.org/x/time v0.8.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + golang.org/x/time v0.9.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index b264521043..7ab4986307 100644 --- a/go.sum +++ b/go.sum @@ -213,8 +213,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= -github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= -github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= +github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= +github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw= github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -558,12 +558,12 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= -golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= -golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -614,8 +614,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -632,8 +632,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.213.0 h1:KmF6KaDyFqB417T68tMPbVmmwtIXs2VB60OJKIHB0xQ= -google.golang.org/api v0.213.0/go.mod h1:V0T5ZhNUUNpYAlL306gFZPFt5F5D/IeyLoktduYYnvQ= +google.golang.org/api v0.216.0 h1:xnEHy+xWFrtYInWPy8OdGFsyIfWJjtVnO39g7pz2BFY= +google.golang.org/api v0.216.0/go.mod h1:K9wzQMvWi47Z9IU7OgdOofvZuw75Ge3PPITImZR/UyI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -641,15 +641,15 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 h1:ChAdCYNQFDk5fYvFZMywKLIijG7TC2m1C2CMEu11G3o= google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484/go.mod h1:KRUmxRI4JmbpAm8gcZM4Jsffi859fo5LQjILwuqj9z8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI= -google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= +google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= From 16420e859f38005c6f3c37428f4991780bc11e3c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:54:17 +0100 Subject: [PATCH 1232/1397] chore(deps): bump @codemirror/view in /web/ui/react-app (#15761) Bumps [@codemirror/view](https://github.com/codemirror/view) from 6.35.3 to 6.36.1. - [Changelog](https://github.com/codemirror/view/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/view/compare/6.35.3...6.36.1) --- updated-dependencies: - dependency-name: "@codemirror/view" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 9 ++++----- web/ui/react-app/package.json | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index ece343d719..0714720bb8 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -14,7 +14,7 @@ "@codemirror/lint": "^6.8.4", "@codemirror/search": "^6.5.8", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.35.3", + "@codemirror/view": "^6.36.1", "@forevolve/bootstrap-dark": "^4.0.2", "@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2", @@ -2312,10 +2312,9 @@ } }, "node_modules/@codemirror/view": { - "version": "6.35.3", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.35.3.tgz", - "integrity": "sha512-ScY7L8+EGdPl4QtoBiOzE4FELp7JmNUsBvgBcCakXWM2uiv/K89VAzU3BMDscf0DsACLvTKePbd5+cFDTcei6g==", - "license": "MIT", + "version": "6.36.1", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.36.1.tgz", + "integrity": "sha512-miD1nyT4m4uopZaDdO2uXU/LLHliKNYL9kB1C1wJHrunHLm/rpkb5QVSokqgw9hFqEZakrdlb/VGWX8aYZTslQ==", "dependencies": { "@codemirror/state": "^6.5.0", "style-mod": "^4.1.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 4e699abfb9..1ccbe38f67 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -9,7 +9,7 @@ "@codemirror/lint": "^6.8.4", "@codemirror/search": "^6.5.8", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.35.3", + "@codemirror/view": "^6.36.1", "@forevolve/bootstrap-dark": "^4.0.2", "@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2", From 2c2a1ac66d847cc4f5f3f35fd104aa52dfc0e58b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:55:02 +0100 Subject: [PATCH 1233/1397] chore(deps-dev): bump @eslint/js from 9.16.0 to 9.17.0 in /web/ui (#15753) Bumps [@eslint/js](https://github.com/eslint/eslint/tree/HEAD/packages/js) from 9.16.0 to 9.17.0. - [Release notes](https://github.com/eslint/eslint/releases) - [Changelog](https://github.com/eslint/eslint/blob/main/CHANGELOG.md) - [Commits](https://github.com/eslint/eslint/commits/v9.17.0/packages/js) --- updated-dependencies: - dependency-name: "@eslint/js" dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 5f3d95a2e3..89cb23f453 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -53,7 +53,7 @@ "devDependencies": { "@eslint/compat": "^1.2.4", "@eslint/eslintrc": "^3.2.0", - "@eslint/js": "^9.16.0", + "@eslint/js": "^9.17.0", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", "@typescript-eslint/eslint-plugin": "^6.21.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index a29e4a3d02..c1081e4a62 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -67,7 +67,7 @@ "devDependencies": { "@eslint/compat": "^1.2.4", "@eslint/eslintrc": "^3.2.0", - "@eslint/js": "^9.16.0", + "@eslint/js": "^9.17.0", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", "@typescript-eslint/eslint-plugin": "^6.21.0", @@ -145,6 +145,15 @@ } } }, + "mantine-ui/node_modules/eslint/node_modules/@eslint/js": { + "version": "9.16.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.16.0.tgz", + "integrity": "sha512-tw2HxzQkrbeuvyj1tG2Yqq+0H9wGoI2IMk4EOsQeX+vmd75FtJAzf+gTA69WF+baUKRYQ3x2kbLE08js5OsTVg==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", "version": "0.301.0", @@ -1396,9 +1405,9 @@ } }, "node_modules/@eslint/js": { - "version": "9.16.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.16.0.tgz", - "integrity": "sha512-tw2HxzQkrbeuvyj1tG2Yqq+0H9wGoI2IMk4EOsQeX+vmd75FtJAzf+gTA69WF+baUKRYQ3x2kbLE08js5OsTVg==", + "version": "9.17.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.17.0.tgz", + "integrity": "sha512-Sxc4hqcs1kTu0iID3kcZDW3JHq2a77HO9P8CP6YEA/FpH3Ll8UXE2r/86Rz9YJLKme39S9vU5OWNjC6Xl0Cr3w==", "dev": true, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" From a59f23f55b2ef3741ed31589c91da8aa436d48ca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:55:49 +0100 Subject: [PATCH 1234/1397] chore(deps): bump @uiw/react-codemirror from 4.23.6 to 4.23.7 in /web/ui (#15752) Bumps [@uiw/react-codemirror](https://github.com/uiwjs/react-codemirror) from 4.23.6 to 4.23.7. - [Release notes](https://github.com/uiwjs/react-codemirror/releases) - [Commits](https://github.com/uiwjs/react-codemirror/compare/v4.23.6...v4.23.7) --- updated-dependencies: - dependency-name: "@uiw/react-codemirror" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 89cb23f453..d53c1fc841 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -36,7 +36,7 @@ "@testing-library/react": "^16.1.0", "@types/lodash": "^4.17.13", "@types/sanitize-html": "^2.13.0", - "@uiw/react-codemirror": "^4.23.6", + "@uiw/react-codemirror": "^4.23.7", "clsx": "^2.1.1", "dayjs": "^1.11.10", "lodash": "^4.17.21", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index c1081e4a62..4c92167653 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -50,7 +50,7 @@ "@testing-library/react": "^16.1.0", "@types/lodash": "^4.17.13", "@types/sanitize-html": "^2.13.0", - "@uiw/react-codemirror": "^4.23.6", + "@uiw/react-codemirror": "^4.23.7", "clsx": "^2.1.1", "dayjs": "^1.11.10", "lodash": "^4.17.21", @@ -3270,9 +3270,9 @@ } }, "node_modules/@uiw/codemirror-extensions-basic-setup": { - "version": "4.23.6", - "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.23.6.tgz", - "integrity": "sha512-bvtq8IOvdkLJMhoJBRGPEzU51fMpPDwEhcAHp9xCR05MtbIokQgsnLXrmD1aZm6e7s/3q47H+qdSfAAkR5MkLA==", + "version": "4.23.7", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.23.7.tgz", + "integrity": "sha512-9/2EUa1Lck4kFKkR2BkxlZPpgD/EWuKHnOlysf1yHKZGraaZmZEaUw+utDK4QcuJc8Iz097vsLz4f4th5EU27g==", "dependencies": { "@codemirror/autocomplete": "^6.0.0", "@codemirror/commands": "^6.0.0", @@ -3296,15 +3296,15 @@ } }, "node_modules/@uiw/react-codemirror": { - "version": "4.23.6", - "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.23.6.tgz", - "integrity": "sha512-caYKGV6TfGLRV1HHD3p0G3FiVzKL1go7wes5XT2nWjB0+dTdyzyb81MKRSacptgZcotujfNO6QXn65uhETRAMw==", + "version": "4.23.7", + "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.23.7.tgz", + "integrity": "sha512-Nh/0P6W+kWta+ARp9YpnKPD9ick5teEnwmtNoPQnyd6NPv0EQP3Ui4YmRVNj1nkUEo+QjrAUaEfcejJ2up/HZA==", "dependencies": { "@babel/runtime": "^7.18.6", "@codemirror/commands": "^6.1.0", "@codemirror/state": "^6.1.1", "@codemirror/theme-one-dark": "^6.0.0", - "@uiw/codemirror-extensions-basic-setup": "4.23.6", + "@uiw/codemirror-extensions-basic-setup": "4.23.7", "codemirror": "^6.0.0" }, "funding": { From 23fe8bc1a304f1e585426cad0c44a6a4b9aa656c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:56:14 +0100 Subject: [PATCH 1235/1397] chore(deps-dev): bump globals from 15.13.0 to 15.14.0 in /web/ui (#15751) Bumps [globals](https://github.com/sindresorhus/globals) from 15.13.0 to 15.14.0. - [Release notes](https://github.com/sindresorhus/globals/releases) - [Commits](https://github.com/sindresorhus/globals/compare/v15.13.0...v15.14.0) --- updated-dependencies: - dependency-name: globals dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index d53c1fc841..e0200088e6 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -62,7 +62,7 @@ "eslint": "^9.16.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.16", - "globals": "^15.13.0", + "globals": "^15.14.0", "jsdom": "^25.0.1", "postcss": "^8.4.47", "postcss-preset-mantine": "^1.17.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 4c92167653..98c4c10984 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -76,7 +76,7 @@ "eslint": "^9.16.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.16", - "globals": "^15.13.0", + "globals": "^15.14.0", "jsdom": "^25.0.1", "postcss": "^8.4.47", "postcss-preset-mantine": "^1.17.0", @@ -5304,9 +5304,9 @@ } }, "node_modules/globals": { - "version": "15.13.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-15.13.0.tgz", - "integrity": "sha512-49TewVEz0UxZjr1WYYsWpPrhyC/B/pA8Bq0fUmet2n+eR7yn0IvNzNaoBwnK6mdkzcN+se7Ez9zUgULTz2QH4g==", + "version": "15.14.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-15.14.0.tgz", + "integrity": "sha512-OkToC372DtlQeje9/zHIo5CT8lRP/FUgEOKBEhU4e0abL7J7CD24fD9ohiLN5hagG/kWCYj4K5oaxxtj2Z0Dig==", "dev": true, "engines": { "node": ">=18" From e24375f1b33b92d1d2a1ca6b1fa84ae85b996528 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:56:42 +0100 Subject: [PATCH 1236/1397] chore(deps): bump @codemirror/language in /web/ui/react-app (#15754) Bumps [@codemirror/language](https://github.com/codemirror/language) from 6.10.6 to 6.10.8. - [Changelog](https://github.com/codemirror/language/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/language/compare/6.10.6...6.10.8) --- updated-dependencies: - dependency-name: "@codemirror/language" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 9 ++++----- web/ui/react-app/package.json | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 0714720bb8..a74effa39c 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -10,7 +10,7 @@ "dependencies": { "@codemirror/autocomplete": "^6.18.3", "@codemirror/commands": "^6.7.1", - "@codemirror/language": "^6.10.6", + "@codemirror/language": "^6.10.8", "@codemirror/lint": "^6.8.4", "@codemirror/search": "^6.5.8", "@codemirror/state": "^6.3.3", @@ -2267,10 +2267,9 @@ } }, "node_modules/@codemirror/language": { - "version": "6.10.6", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.6.tgz", - "integrity": "sha512-KrsbdCnxEztLVbB5PycWXFxas4EOyk/fPAfruSOnDDppevQgid2XZ+KbJ9u+fDikP/e7MW7HPBTvTb8JlZK9vA==", - "license": "MIT", + "version": "6.10.8", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.8.tgz", + "integrity": "sha512-wcP8XPPhDH2vTqf181U8MbZnW+tDyPYy0UzVOa+oHORjyT+mhhom9vBd7dApJwoDz9Nb/a8kHjJIsuA/t8vNFw==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.23.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 1ccbe38f67..90fb4fb502 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -5,7 +5,7 @@ "dependencies": { "@codemirror/autocomplete": "^6.18.3", "@codemirror/commands": "^6.7.1", - "@codemirror/language": "^6.10.6", + "@codemirror/language": "^6.10.8", "@codemirror/lint": "^6.8.4", "@codemirror/search": "^6.5.8", "@codemirror/state": "^6.3.3", From 938bb65518c9f9afd0604ce3e6db7eafb0bfa90f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:57:14 +0100 Subject: [PATCH 1237/1397] chore(deps): bump @mantine/code-highlight in /web/ui (#15749) Bumps [@mantine/code-highlight](https://github.com/mantinedev/mantine/tree/HEAD/packages/@mantine/code-highlight) from 7.15.0 to 7.15.2. - [Release notes](https://github.com/mantinedev/mantine/releases) - [Changelog](https://github.com/mantinedev/mantine/blob/master/CHANGELOG.md) - [Commits](https://github.com/mantinedev/mantine/commits/7.15.2/packages/@mantine/code-highlight) --- updated-dependencies: - dependency-name: "@mantine/code-highlight" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 68 +++++++++++++++++----------------- 2 files changed, 36 insertions(+), 34 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index e0200088e6..ad169cd582 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -20,7 +20,7 @@ "@floating-ui/dom": "^1.6.12", "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.1", - "@mantine/code-highlight": "^7.15.0", + "@mantine/code-highlight": "^7.15.2", "@mantine/core": "^7.11.2", "@mantine/dates": "^7.15.0", "@mantine/hooks": "^7.11.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 98c4c10984..fc9a461621 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -34,7 +34,7 @@ "@floating-ui/dom": "^1.6.12", "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.1", - "@mantine/code-highlight": "^7.15.0", + "@mantine/code-highlight": "^7.15.2", "@mantine/core": "^7.11.2", "@mantine/dates": "^7.15.0", "@mantine/hooks": "^7.11.2", @@ -2136,36 +2136,34 @@ } }, "node_modules/@mantine/code-highlight": { - "version": "7.15.0", - "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-7.15.0.tgz", - "integrity": "sha512-UJ1Qfjs7LigFQt/yuvjjWv7y6AGhfAc177tmlegs1E2OTVL8f11pM4PKGuoQxRD70Wwz+hG23ZNMumibEcRejg==", - "license": "MIT", + "version": "7.15.2", + "resolved": "https://registry.npmjs.org/@mantine/code-highlight/-/code-highlight-7.15.2.tgz", + "integrity": "sha512-vp4RLYtLQJ++W4TUVt5K78RAiwlqSTAM3N1yFdDjDF2d5sK3k7CEztYP2ahwroIuaMYgJy/iOIW7l5iMLBbl7w==", "dependencies": { "clsx": "^2.1.1", "highlight.js": "^11.10.0" }, "peerDependencies": { - "@mantine/core": "7.15.0", - "@mantine/hooks": "7.15.0", + "@mantine/core": "7.15.2", + "@mantine/hooks": "7.15.2", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/core": { - "version": "7.15.0", - "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.15.0.tgz", - "integrity": "sha512-tZlRydfaEaaCNIi3tl1u+VtJxNyBoml2iCJAy6ZqcNNcjy/krJmta5lVtjUeApZfE33rkvr+3WVqGk0YjW6oSQ==", - "license": "MIT", + "version": "7.15.2", + "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.15.2.tgz", + "integrity": "sha512-640ns0L/HZAXYjz3+FRffr8UNcH1fU7ENUVxKLzqNA311Dcx0qS3byVKTY/IVJYln6AkjoEfIJMiixT9fCZBiQ==", "dependencies": { "@floating-ui/react": "^0.26.28", "clsx": "^2.1.1", "react-number-format": "^5.4.2", "react-remove-scroll": "^2.6.0", - "react-textarea-autosize": "8.5.5", + "react-textarea-autosize": "8.5.6", "type-fest": "^4.27.0" }, "peerDependencies": { - "@mantine/hooks": "7.15.0", + "@mantine/hooks": "7.15.2", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } @@ -2187,10 +2185,9 @@ } }, "node_modules/@mantine/hooks": { - "version": "7.15.0", - "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.15.0.tgz", - "integrity": "sha512-AV4ItRbVIWVDzpOPyj3ICrfQq7HEdKhO7IE7xxkdbJ4oj73DAq2jFsMoNdj3dN9u2tOn1bhPBIaP+8gKd0oAcw==", - "license": "MIT", + "version": "7.15.2", + "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.15.2.tgz", + "integrity": "sha512-p8dsW0fdJxzYhULbm1noFYRHuBvJHleYviC0BlwbkVySC8AsvFI8AmC3sMssWV3dQ3yQ/SidYo9U+K/czpDpZw==", "peerDependencies": { "react": "^18.x || ^19.x" } @@ -8091,9 +8088,9 @@ } }, "node_modules/react-textarea-autosize": { - "version": "8.5.5", - "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.5.tgz", - "integrity": "sha512-CVA94zmfp8m4bSHtWwmANaBR8EPsKy2aZ7KwqhoS4Ftib87F9Kvi7XQhOixypPLMc6kVYgOXvKFuuzZDpHGRPg==", + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.6.tgz", + "integrity": "sha512-aT3ioKXMa8f6zHYGebhbdMD2L00tKeRX1zuVuDx9YQK/JLLRSaSxq3ugECEmUB9z2kvk6bFSIoRHLkkUv0RJiw==", "dependencies": { "@babel/runtime": "^7.20.13", "use-composed-ref": "^1.3.0", @@ -8103,7 +8100,7 @@ "node": ">=10" }, "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "node_modules/react-transition-group": { @@ -9052,19 +9049,24 @@ } }, "node_modules/use-composed-ref": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/use-composed-ref/-/use-composed-ref-1.3.0.tgz", - "integrity": "sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/use-composed-ref/-/use-composed-ref-1.4.0.tgz", + "integrity": "sha512-djviaxuOOh7wkj0paeO1Q/4wMZ8Zrnag5H6yBvzN7AKKe8beOaED9SF5/ByLqsku8NP4zQqsvM2u3ew/tJK8/w==", "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } } }, "node_modules/use-isomorphic-layout-effect": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz", - "integrity": "sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.2.0.tgz", + "integrity": "sha512-q6ayo8DWoPZT0VdG4u3D3uxcgONP3Mevx2i2b0434cwWBoL+aelL1DzkXI6w3PhTZzUeR2kaVlZn70iCiseP6w==", "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" }, "peerDependenciesMeta": { "@types/react": { @@ -9073,14 +9075,14 @@ } }, "node_modules/use-latest": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/use-latest/-/use-latest-1.2.1.tgz", - "integrity": "sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw==", + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/use-latest/-/use-latest-1.3.0.tgz", + "integrity": "sha512-mhg3xdm9NaM8q+gLT8KryJPnRFOz1/5XPBhmDEVZK1webPzDjrPk7f/mbpeLqTgB9msytYWANxgALOCJKnLvcQ==", "dependencies": { "use-isomorphic-layout-effect": "^1.1.1" }, "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" }, "peerDependenciesMeta": { "@types/react": { From 75d9c328d71be928f8aac0eefb4ae21b2aa4cacc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:58:17 +0100 Subject: [PATCH 1238/1397] chore(deps): bump react-router-dom from 7.0.2 to 7.1.1 in /web/ui (#15743) Bumps [react-router-dom](https://github.com/remix-run/react-router/tree/HEAD/packages/react-router-dom) from 7.0.2 to 7.1.1. - [Release notes](https://github.com/remix-run/react-router/releases) - [Changelog](https://github.com/remix-run/react-router/blob/main/packages/react-router-dom/CHANGELOG.md) - [Commits](https://github.com/remix-run/react-router/commits/react-router-dom@7.1.1/packages/react-router-dom) --- updated-dependencies: - dependency-name: react-router-dom dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 28 +++++++++++----------------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index ad169cd582..ff62f74a64 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -44,7 +44,7 @@ "react-dom": "^18.3.1", "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.2.0", - "react-router-dom": "^7.0.2", + "react-router-dom": "^7.1.1", "sanitize-html": "^2.13.1", "uplot": "^1.6.31", "uplot-react": "^1.2.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index fc9a461621..e0f28f1e1d 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -58,7 +58,7 @@ "react-dom": "^18.3.1", "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.2.0", - "react-router-dom": "^7.0.2", + "react-router-dom": "^7.1.1", "sanitize-html": "^2.13.1", "uplot": "^1.6.31", "uplot-react": "^1.2.2", @@ -2839,8 +2839,7 @@ "node_modules/@types/cookie": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz", - "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==", - "license": "MIT" + "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==" }, "node_modules/@types/estree": { "version": "1.0.6", @@ -4080,7 +4079,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.0.2.tgz", "integrity": "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA==", - "license": "MIT", "engines": { "node": ">=18" } @@ -8026,10 +8024,9 @@ } }, "node_modules/react-router": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.0.2.tgz", - "integrity": "sha512-m5AcPfTRUcjwmhBzOJGEl6Y7+Crqyju0+TgTQxoS4SO+BkWbhOrcfZNq6wSWdl2BBbJbsAoBUb8ZacOFT+/JlA==", - "license": "MIT", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.1.1.tgz", + "integrity": "sha512-39sXJkftkKWRZ2oJtHhCxmoCrBCULr/HAH4IT5DHlgu/Q0FCPV0S4Lx+abjDTx/74xoZzNYDYbOZWlJjruyuDQ==", "dependencies": { "@types/cookie": "^0.6.0", "cookie": "^1.0.1", @@ -8050,12 +8047,11 @@ } }, "node_modules/react-router-dom": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.0.2.tgz", - "integrity": "sha512-VJOQ+CDWFDGaWdrG12Nl+d7yHtLaurNgAQZVgaIy7/Xd+DojgmYLosFfZdGz1wpxmjJIAkAMVTKWcvkx1oggAw==", - "license": "MIT", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.1.1.tgz", + "integrity": "sha512-vSrQHWlJ5DCfyrhgo0k6zViOe9ToK8uT5XGSmnuC2R3/g261IdIMpZVqfjD6vWSXdnf5Czs4VA/V60oVR6/jnA==", "dependencies": { - "react-router": "7.0.2" + "react-router": "7.1.1" }, "engines": { "node": ">=20.0.0" @@ -8395,8 +8391,7 @@ "node_modules/set-cookie-parser": { "version": "2.7.1", "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.1.tgz", - "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==", - "license": "MIT" + "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==" }, "node_modules/shebang-command": { "version": "2.0.0", @@ -8911,8 +8906,7 @@ "node_modules/turbo-stream": { "version": "2.4.0", "resolved": "https://registry.npmjs.org/turbo-stream/-/turbo-stream-2.4.0.tgz", - "integrity": "sha512-FHncC10WpBd2eOmGwpmQsWLDoK4cqsA/UT/GqNoaKOQnT8uzhtCbg3EoUDMvqpOSAI0S26mr0rkjzbOO6S3v1g==", - "license": "ISC" + "integrity": "sha512-FHncC10WpBd2eOmGwpmQsWLDoK4cqsA/UT/GqNoaKOQnT8uzhtCbg3EoUDMvqpOSAI0S26mr0rkjzbOO6S3v1g==" }, "node_modules/type-check": { "version": "0.4.0", From c5ef7153e0ce22de376c6db2c27a334227777716 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:58:55 +0100 Subject: [PATCH 1239/1397] chore(deps): bump @tanstack/react-query in /web/ui (#15737) Bumps [@tanstack/react-query](https://github.com/TanStack/query/tree/HEAD/packages/react-query) from 5.62.7 to 5.62.11. - [Release notes](https://github.com/TanStack/query/releases) - [Commits](https://github.com/TanStack/query/commits/v5.62.11/packages/react-query) --- updated-dependencies: - dependency-name: "@tanstack/react-query" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 18 ++++++++---------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index ff62f74a64..f82c03262d 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -31,7 +31,7 @@ "@prometheus-io/codemirror-promql": "0.301.0", "@reduxjs/toolkit": "^2.5.0", "@tabler/icons-react": "^3.24.0", - "@tanstack/react-query": "^5.62.7", + "@tanstack/react-query": "^5.62.11", "@testing-library/jest-dom": "^6.6.3", "@testing-library/react": "^16.1.0", "@types/lodash": "^4.17.13", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index e0f28f1e1d..e9955be556 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -45,7 +45,7 @@ "@prometheus-io/codemirror-promql": "0.301.0", "@reduxjs/toolkit": "^2.5.0", "@tabler/icons-react": "^3.24.0", - "@tanstack/react-query": "^5.62.7", + "@tanstack/react-query": "^5.62.11", "@testing-library/jest-dom": "^6.6.3", "@testing-library/react": "^16.1.0", "@types/lodash": "^4.17.13", @@ -2674,22 +2674,20 @@ } }, "node_modules/@tanstack/query-core": { - "version": "5.62.7", - "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.62.7.tgz", - "integrity": "sha512-fgpfmwatsrUal6V+8EC2cxZIQVl9xvL7qYa03gsdsCy985UTUlS4N+/3hCzwR0PclYDqisca2AqR1BVgJGpUDA==", - "license": "MIT", + "version": "5.62.9", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.62.9.tgz", + "integrity": "sha512-lwePd8hNYhyQ4nM/iRQ+Wz2cDtspGeZZHFZmCzHJ7mfKXt+9S301fULiY2IR2byJYY6Z03T427E5PoVfMexHjw==", "funding": { "type": "github", "url": "https://github.com/sponsors/tannerlinsley" } }, "node_modules/@tanstack/react-query": { - "version": "5.62.7", - "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.62.7.tgz", - "integrity": "sha512-+xCtP4UAFDTlRTYyEjLx0sRtWyr5GIk7TZjZwBu4YaNahi3Rt2oMyRqfpfVrtwsqY2sayP4iXVCwmC+ZqqFmuw==", - "license": "MIT", + "version": "5.62.11", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.62.11.tgz", + "integrity": "sha512-Xb1nw0cYMdtFmwkvH9+y5yYFhXvLRCnXoqlzSw7UkqtCVFq3cG8q+rHZ2Yz1XrC+/ysUaTqbLKJqk95mCgC1oQ==", "dependencies": { - "@tanstack/query-core": "5.62.7" + "@tanstack/query-core": "5.62.9" }, "funding": { "type": "github", From 16f3639521dde70e5189eb6394ba29daa0aec97d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:59:32 +0100 Subject: [PATCH 1240/1397] chore(deps-dev): bump @rollup/plugin-node-resolve in /web/ui (#15740) Bumps [@rollup/plugin-node-resolve](https://github.com/rollup/plugins/tree/HEAD/packages/node-resolve) from 15.3.0 to 16.0.0. - [Changelog](https://github.com/rollup/plugins/blob/master/packages/node-resolve/CHANGELOG.md) - [Commits](https://github.com/rollup/plugins/commits/commonjs-v16.0.0/packages/node-resolve) --- updated-dependencies: - dependency-name: "@rollup/plugin-node-resolve" dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 282ab5ab62..270e57797f 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -34,7 +34,7 @@ "@lezer/generator": "^1.7.2", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", - "@rollup/plugin-node-resolve": "^15.3.0" + "@rollup/plugin-node-resolve": "^16.0.0" }, "peerDependencies": { "@lezer/highlight": "^1.1.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index e9955be556..e7d0b000fa 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -195,7 +195,7 @@ "@lezer/generator": "^1.7.2", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", - "@rollup/plugin-node-resolve": "^15.3.0" + "@rollup/plugin-node-resolve": "^16.0.0" }, "peerDependencies": { "@lezer/highlight": "^1.1.2", @@ -2331,9 +2331,9 @@ } }, "node_modules/@rollup/plugin-node-resolve": { - "version": "15.3.0", - "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.3.0.tgz", - "integrity": "sha512-9eO5McEICxMzJpDW9OnMYSv4Sta3hmt7VtBFz5zR9273suNOydOyq/FrGeGy+KsTRFm8w0SLVhzig2ILFT63Ag==", + "version": "16.0.0", + "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-16.0.0.tgz", + "integrity": "sha512-0FPvAeVUT/zdWoO0jnb/V5BlBsUSNfkIOtFHzMO4H9MOklrmQFY6FduVHKucNb/aTFxvnGhj4MNj/T1oNdDfNg==", "dev": true, "dependencies": { "@rollup/pluginutils": "^5.0.1", From 287e56058b3bdf3deaa48367ef5117805565e0a1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 20:23:36 +0100 Subject: [PATCH 1241/1397] chore(deps): bump google.golang.org/grpc from 1.69.0 to 1.69.4 (#15811) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.69.0 to 1.69.4. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.69.0...v1.69.4) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ee88a0e5f9..93cd89f469 100644 --- a/go.mod +++ b/go.mod @@ -81,7 +81,7 @@ require ( golang.org/x/tools v0.28.0 google.golang.org/api v0.216.0 google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 - google.golang.org/grpc v1.69.2 + google.golang.org/grpc v1.69.4 google.golang.org/protobuf v1.36.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 diff --git a/go.sum b/go.sum index 7ab4986307..515fb9c053 100644 --- a/go.sum +++ b/go.sum @@ -648,8 +648,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= -google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= From fccd78d3cd40e5d05f69595109514276742cdcb8 Mon Sep 17 00:00:00 2001 From: leonnicolas Date: Mon, 13 Jan 2025 21:46:45 +0100 Subject: [PATCH 1242/1397] Update storage keys Signed-off-by: leonnicolas --- web/ui/mantine-ui/src/pages/AlertsPage.tsx | 2 +- web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/AlertsPage.tsx b/web/ui/mantine-ui/src/pages/AlertsPage.tsx index e107adfb23..4ce42d9059 100644 --- a/web/ui/mantine-ui/src/pages/AlertsPage.tsx +++ b/web/ui/mantine-ui/src/pages/AlertsPage.tsx @@ -163,7 +163,7 @@ export default function AlertsPage() { ); const [debouncedSearch] = useDebouncedValue(searchFilter.trim(), 250); const [showEmptyGroups, setShowEmptyGroups] = useLocalStorage({ - key: "alerts-page-show-empty-groups", + key: "alertsPage.showEmptyGroups", defaultValue: true, }); diff --git a/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx b/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx index f674a1f4d3..1e29c568e1 100644 --- a/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx +++ b/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx @@ -166,7 +166,7 @@ const ScrapePoolList: FC = ({ const dispatch = useAppDispatch(); const [showEmptyPools, setShowEmptyPools] = useLocalStorage({ - key: "targets-page-show-empty-pools", + key: "targetsPage.showEmptyPools", defaultValue: true, }); From e5415f8ecd62dc843e48147494392371185d004c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 21:54:09 +0100 Subject: [PATCH 1243/1397] chore(deps): bump sanitize-html from 2.13.1 to 2.14.0 in /web/ui (#15739) Bumps [sanitize-html](https://github.com/apostrophecms/sanitize-html) from 2.13.1 to 2.14.0. - [Changelog](https://github.com/apostrophecms/sanitize-html/blob/main/CHANGELOG.md) - [Commits](https://github.com/apostrophecms/sanitize-html/compare/2.13.1...2.14.0) --- updated-dependencies: - dependency-name: sanitize-html dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index f82c03262d..0ee8802e1b 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -45,7 +45,7 @@ "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.2.0", "react-router-dom": "^7.1.1", - "sanitize-html": "^2.13.1", + "sanitize-html": "^2.14.0", "uplot": "^1.6.31", "uplot-react": "^1.2.2", "use-query-params": "^2.2.1" diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index e7d0b000fa..f8f1ba90e6 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -59,7 +59,7 @@ "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.2.0", "react-router-dom": "^7.1.1", - "sanitize-html": "^2.13.1", + "sanitize-html": "^2.14.0", "uplot": "^1.6.31", "uplot-react": "^1.2.2", "use-query-params": "^2.2.1" @@ -8333,9 +8333,9 @@ "license": "MIT" }, "node_modules/sanitize-html": { - "version": "2.13.1", - "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.13.1.tgz", - "integrity": "sha512-ZXtKq89oue4RP7abL9wp/9URJcqQNABB5GGJ2acW1sdO8JTVl92f4ygD7Yc9Ze09VAZhnt2zegeU0tbNsdcLYg==", + "version": "2.14.0", + "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.14.0.tgz", + "integrity": "sha512-CafX+IUPxZshXqqRaG9ZClSlfPVjSxI0td7n07hk8QO2oO+9JDnlcL8iM8TWeOXOIBFgIOx6zioTzM53AOMn3g==", "dependencies": { "deepmerge": "^4.2.2", "escape-string-regexp": "^4.0.0", From 17ca5c8fa5fbe01cacb6191d418063cad74f1ea8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 21:54:39 +0100 Subject: [PATCH 1244/1397] chore(deps): bump @codemirror/autocomplete in /web/ui (#15750) Bumps [@codemirror/autocomplete](https://github.com/codemirror/autocomplete) from 6.18.3 to 6.18.4. - [Changelog](https://github.com/codemirror/autocomplete/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/autocomplete/compare/6.18.3...6.18.4) --- updated-dependencies: - dependency-name: "@codemirror/autocomplete" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 17 +++++------------ 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 0ee8802e1b..bd741503d9 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -12,7 +12,7 @@ "test": "vitest" }, "dependencies": { - "@codemirror/autocomplete": "^6.18.3", + "@codemirror/autocomplete": "^6.18.4", "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.4.1", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index d4e95c7ee8..aea4d22c62 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -33,7 +33,7 @@ "lru-cache": "^11.0.2" }, "devDependencies": { - "@codemirror/autocomplete": "^6.18.3", + "@codemirror/autocomplete": "^6.18.4", "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.3.3", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index f8f1ba90e6..74d898066e 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -26,7 +26,7 @@ "name": "@prometheus-io/mantine-ui", "version": "0.301.0", "dependencies": { - "@codemirror/autocomplete": "^6.18.3", + "@codemirror/autocomplete": "^6.18.4", "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.4.1", @@ -163,7 +163,7 @@ "lru-cache": "^11.0.2" }, "devDependencies": { - "@codemirror/autocomplete": "^6.18.3", + "@codemirror/autocomplete": "^6.18.4", "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.3.3", @@ -788,21 +788,14 @@ "peer": true }, "node_modules/@codemirror/autocomplete": { - "version": "6.18.3", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.3.tgz", - "integrity": "sha512-1dNIOmiM0z4BIBwxmxEfA1yoxh1MF/6KPBbh20a5vphGV0ictKlgQsbJs6D6SkR6iJpGbpwRsa6PFMNlg9T9pQ==", - "license": "MIT", + "version": "6.18.4", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.4.tgz", + "integrity": "sha512-sFAphGQIqyQZfP2ZBsSHV7xQvo9Py0rV0dW7W3IMRdS+zDuNb2l3no78CvUaWKGfzFjI4FTrLdUSj86IGb2hRA==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.17.0", "@lezer/common": "^1.0.0" - }, - "peerDependencies": { - "@codemirror/language": "^6.0.0", - "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.0.0", - "@lezer/common": "^1.0.0" } }, "node_modules/@codemirror/commands": { From bde2d11ea42f820683b2c0498402e5bcae2544f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 21:55:14 +0100 Subject: [PATCH 1245/1397] chore(deps-dev): bump eslint from 9.16.0 to 9.18.0 in /web/ui (#15814) Bumps [eslint](https://github.com/eslint/eslint) from 9.16.0 to 9.18.0. - [Release notes](https://github.com/eslint/eslint/releases) - [Changelog](https://github.com/eslint/eslint/blob/main/CHANGELOG.md) - [Commits](https://github.com/eslint/eslint/compare/v9.16.0...v9.18.0) --- updated-dependencies: - dependency-name: eslint dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 81 +++++++++------------------------- 2 files changed, 21 insertions(+), 62 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index bd741503d9..7ffe65c4c1 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -59,7 +59,7 @@ "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "@vitejs/plugin-react": "^4.3.4", - "eslint": "^9.16.0", + "eslint": "^9.18.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.16", "globals": "^15.14.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 74d898066e..febeaa4dcd 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -73,7 +73,7 @@ "@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/parser": "^6.21.0", "@vitejs/plugin-react": "^4.3.4", - "eslint": "^9.16.0", + "eslint": "^9.18.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.16", "globals": "^15.14.0", @@ -86,19 +86,18 @@ } }, "mantine-ui/node_modules/eslint": { - "version": "9.16.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.16.0.tgz", - "integrity": "sha512-whp8mSQI4C8VXd+fLgSM0lh3UlmcFtVwUQjyKCFfsp+2ItAIYhlq/hqGahGqHE6cv9unM41VlqKk2VtKYR2TaA==", + "version": "9.18.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.18.0.tgz", + "integrity": "sha512-+waTfRWQlSbpt3KWE+CjrPPYnbq9kfZIYUqapc0uBXyjTp8aYXZDsUH16m39Ryq3NjAVP4tjuF7KaukeqoCoaA==", "dev": true, - "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.12.1", "@eslint/config-array": "^0.19.0", - "@eslint/core": "^0.9.0", + "@eslint/core": "^0.10.0", "@eslint/eslintrc": "^3.2.0", - "@eslint/js": "9.16.0", - "@eslint/plugin-kit": "^0.2.3", + "@eslint/js": "9.18.0", + "@eslint/plugin-kit": "^0.2.5", "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", "@humanwhocodes/retry": "^0.4.1", @@ -106,7 +105,7 @@ "@types/json-schema": "^7.0.15", "ajv": "^6.12.4", "chalk": "^4.0.0", - "cross-spawn": "^7.0.5", + "cross-spawn": "^7.0.6", "debug": "^4.3.2", "escape-string-regexp": "^4.0.0", "eslint-scope": "^8.2.0", @@ -145,15 +144,6 @@ } } }, - "mantine-ui/node_modules/eslint/node_modules/@eslint/js": { - "version": "9.16.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.16.0.tgz", - "integrity": "sha512-tw2HxzQkrbeuvyj1tG2Yqq+0H9wGoI2IMk4EOsQeX+vmd75FtJAzf+gTA69WF+baUKRYQ3x2kbLE08js5OsTVg==", - "dev": true, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - } - }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", "version": "0.301.0", @@ -1338,7 +1328,6 @@ "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.19.1.tgz", "integrity": "sha512-fo6Mtm5mWyKjA/Chy1BYTdn5mGJoDNjC7C64ug20ADsRDGrA85bN3uK3MaKbeRkRuuIEAR5N33Jr1pbm411/PA==", "dev": true, - "license": "Apache-2.0", "dependencies": { "@eslint/object-schema": "^2.1.5", "debug": "^4.3.1", @@ -1349,11 +1338,10 @@ } }, "node_modules/@eslint/core": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.9.1.tgz", - "integrity": "sha512-GuUdqkyyzQI5RMIWkHhvTWLCyLo1jNK3vzkSyaExH5kHPDHcuL2VOpHjmMY+y3+NC69qAKToBqldTBgYeLSr9Q==", + "version": "0.10.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.10.0.tgz", + "integrity": "sha512-gFHJ+xBOo4G3WRlR1e/3G8A6/KZAH6zcE/hkLRCZTi/B9avAG365QhFA8uOGzTMqgTghpn7/fSnscW++dpMSAw==", "dev": true, - "license": "Apache-2.0", "dependencies": { "@types/json-schema": "^7.0.15" }, @@ -1398,9 +1386,9 @@ } }, "node_modules/@eslint/js": { - "version": "9.17.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.17.0.tgz", - "integrity": "sha512-Sxc4hqcs1kTu0iID3kcZDW3JHq2a77HO9P8CP6YEA/FpH3Ll8UXE2r/86Rz9YJLKme39S9vU5OWNjC6Xl0Cr3w==", + "version": "9.18.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.18.0.tgz", + "integrity": "sha512-fK6L7rxcq6/z+AaQMtiFTkvbHkBLNlwyRxHpKawP0x3u9+NC6MQTnFW+AdpwC6gfHTW0051cokQgtTN2FqlxQA==", "dev": true, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -1411,18 +1399,17 @@ "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.5.tgz", "integrity": "sha512-o0bhxnL89h5Bae5T318nFoFzGy+YE5i/gGkoPAgkmTVdRKTiv3p8JHevPiPaMwoloKfEiiaHlawCqaZMqRm+XQ==", "dev": true, - "license": "Apache-2.0", "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, "node_modules/@eslint/plugin-kit": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.4.tgz", - "integrity": "sha512-zSkKow6H5Kdm0ZUQUB2kV5JIXqoG0+uH5YADhaEHswm664N9Db8dXSi0nMJpacpMf+MyyglF1vnZohpEg5yUtg==", + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.5.tgz", + "integrity": "sha512-lB05FkqEdUg2AA0xEbUz0SnkXT1LcCTa438W4IWTUh4hdOnVbQyOJ81OrDXsJk/LSiJHubgGEFoR5EHq1NsH1A==", "dev": true, - "license": "Apache-2.0", "dependencies": { + "@eslint/core": "^0.10.0", "levn": "^0.4.1" }, "engines": { @@ -1484,7 +1471,6 @@ "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", "dev": true, - "license": "Apache-2.0", "engines": { "node": ">=18.18.0" } @@ -1494,7 +1480,6 @@ "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.6.tgz", "integrity": "sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw==", "dev": true, - "license": "Apache-2.0", "dependencies": { "@humanfs/core": "^0.19.1", "@humanwhocodes/retry": "^0.3.0" @@ -1508,7 +1493,6 @@ "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.3.1.tgz", "integrity": "sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA==", "dev": true, - "license": "Apache-2.0", "engines": { "node": ">=18.18" }, @@ -1523,7 +1507,6 @@ "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", "deprecated": "Use @eslint/config-array instead", "dev": true, - "license": "Apache-2.0", "peer": true, "dependencies": { "@humanwhocodes/object-schema": "^2.0.3", @@ -1554,7 +1537,6 @@ "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", "deprecated": "Use @eslint/object-schema instead", "dev": true, - "license": "BSD-3-Clause", "peer": true }, "node_modules/@humanwhocodes/retry": { @@ -1562,7 +1544,6 @@ "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.1.tgz", "integrity": "sha512-c7hNEllBlenFTHBky65mhq8WD2kbN9Q6gk0bTk8lSBvc554jpXSkST1iePudpt7+A/AQvuHs9EMqjHDXMY1lrA==", "dev": true, - "license": "Apache-2.0", "engines": { "node": ">=18.18" }, @@ -3312,7 +3293,6 @@ "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.1.tgz", "integrity": "sha512-fEzPV3hSkSMltkw152tJKNARhOupqbH96MZWyRjNaYZOMIzbrTeQDG+MTc6Mr2pgzFQzFxAfmhGDNP5QK++2ZA==", "dev": true, - "license": "ISC", "peer": true }, "node_modules/@vitejs/plugin-react": { @@ -4305,7 +4285,6 @@ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", "dev": true, - "license": "Apache-2.0", "peer": true, "dependencies": { "esutils": "^2.0.2" @@ -4528,7 +4507,6 @@ "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", @@ -4651,7 +4629,6 @@ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.2.0.tgz", "integrity": "sha512-PHlWUfG6lvPc3yvP5A4PNyBL1W8fkDUccmI21JUu/+GKZBoH/W5u6usENXUrWFRsyoW5ACUjFGgAFQp5gUlb/A==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" @@ -4681,7 +4658,6 @@ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "ajv": "^6.12.4", @@ -4706,7 +4682,6 @@ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -4717,7 +4692,6 @@ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", "dev": true, - "license": "BSD-2-Clause", "peer": true, "dependencies": { "esrecurse": "^4.3.0", @@ -4735,7 +4709,6 @@ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", "dev": true, - "license": "Apache-2.0", "peer": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -4749,7 +4722,6 @@ "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", "dev": true, - "license": "BSD-2-Clause", "peer": true, "dependencies": { "acorn": "^8.9.0", @@ -4768,7 +4740,6 @@ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "flat-cache": "^3.0.4" @@ -4782,7 +4753,6 @@ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "flatted": "^3.2.9", @@ -4798,7 +4768,6 @@ "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", "dev": true, - "license": "MIT", "peer": true, "dependencies": { "type-fest": "^0.20.2" @@ -4815,7 +4784,6 @@ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", "dev": true, - "license": "(MIT OR CC0-1.0)", "peer": true, "engines": { "node": ">=10" @@ -4875,7 +4843,6 @@ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { "estraverse": "^5.2.0" }, @@ -5059,7 +5026,6 @@ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", "dev": true, - "license": "MIT", "dependencies": { "flat-cache": "^4.0.0" }, @@ -5135,7 +5101,6 @@ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", "dev": true, - "license": "MIT", "dependencies": { "flatted": "^3.2.9", "keyv": "^4.5.4" @@ -5148,8 +5113,7 @@ "version": "3.3.2", "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.2.tgz", "integrity": "sha512-AiwGJM8YcNOaobumgtng+6NHuOqC3A7MixFeDafM3X9cIUM+xUXoS5Vfgf+OihAYe20fxqNM9yPBXJzRtZ/4eA==", - "dev": true, - "license": "ISC" + "dev": true }, "node_modules/form-data": { "version": "4.0.0", @@ -5656,7 +5620,6 @@ "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", "dev": true, - "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -6819,8 +6782,7 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", - "dev": true, - "license": "MIT" + "dev": true }, "node_modules/json-parse-even-better-errors": { "version": "2.3.1", @@ -6869,7 +6831,6 @@ "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", "dev": true, - "license": "MIT", "dependencies": { "json-buffer": "3.0.1" } @@ -8238,7 +8199,6 @@ "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "deprecated": "Rimraf versions prior to v4 are no longer supported", "dev": true, - "license": "ISC", "peer": true, "dependencies": { "glob": "^7.1.3" @@ -8706,7 +8666,6 @@ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", "dev": true, - "license": "MIT", "peer": true }, "node_modules/throttle-debounce": { From 4c2a31642442661bb6fe78f229ec4d6afb5c3ac5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 21:55:46 +0100 Subject: [PATCH 1246/1397] chore(deps): bump sanitize-html in /web/ui/react-app (#15758) Bumps [sanitize-html](https://github.com/apostrophecms/sanitize-html) from 2.13.1 to 2.14.0. - [Changelog](https://github.com/apostrophecms/sanitize-html/blob/main/CHANGELOG.md) - [Commits](https://github.com/apostrophecms/sanitize-html/compare/2.13.1...2.14.0) --- updated-dependencies: - dependency-name: sanitize-html dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 9 ++++----- web/ui/react-app/package.json | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index a74effa39c..8a56e41e22 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -42,7 +42,7 @@ "react-router-dom": "^5.3.4", "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", - "sanitize-html": "^2.13.1", + "sanitize-html": "^2.14.0", "sass": "1.83.0", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" @@ -21180,10 +21180,9 @@ "dev": true }, "node_modules/sanitize-html": { - "version": "2.13.1", - "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.13.1.tgz", - "integrity": "sha512-ZXtKq89oue4RP7abL9wp/9URJcqQNABB5GGJ2acW1sdO8JTVl92f4ygD7Yc9Ze09VAZhnt2zegeU0tbNsdcLYg==", - "license": "MIT", + "version": "2.14.0", + "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.14.0.tgz", + "integrity": "sha512-CafX+IUPxZshXqqRaG9ZClSlfPVjSxI0td7n07hk8QO2oO+9JDnlcL8iM8TWeOXOIBFgIOx6zioTzM53AOMn3g==", "dependencies": { "deepmerge": "^4.2.2", "escape-string-regexp": "^4.0.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 90fb4fb502..3bdb72251c 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -37,7 +37,7 @@ "react-router-dom": "^5.3.4", "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", - "sanitize-html": "^2.13.1", + "sanitize-html": "^2.14.0", "sass": "1.83.0", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" From 16b5395bd77801d12bac9e39cf29ed8cc6b3841a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 21:56:24 +0100 Subject: [PATCH 1247/1397] chore(deps): bump @codemirror/autocomplete in /web/ui/react-app (#15760) Bumps [@codemirror/autocomplete](https://github.com/codemirror/autocomplete) from 6.18.3 to 6.18.4. - [Changelog](https://github.com/codemirror/autocomplete/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/autocomplete/compare/6.18.3...6.18.4) --- updated-dependencies: - dependency-name: "@codemirror/autocomplete" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 15 ++++----------- web/ui/react-app/package.json | 2 +- 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 8a56e41e22..ff74defe6e 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -8,7 +8,7 @@ "name": "@prometheus-io/app", "version": "0.300.1", "dependencies": { - "@codemirror/autocomplete": "^6.18.3", + "@codemirror/autocomplete": "^6.18.4", "@codemirror/commands": "^6.7.1", "@codemirror/language": "^6.10.8", "@codemirror/lint": "^6.8.4", @@ -2237,21 +2237,14 @@ "dev": true }, "node_modules/@codemirror/autocomplete": { - "version": "6.18.3", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.3.tgz", - "integrity": "sha512-1dNIOmiM0z4BIBwxmxEfA1yoxh1MF/6KPBbh20a5vphGV0ictKlgQsbJs6D6SkR6iJpGbpwRsa6PFMNlg9T9pQ==", - "license": "MIT", + "version": "6.18.4", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.4.tgz", + "integrity": "sha512-sFAphGQIqyQZfP2ZBsSHV7xQvo9Py0rV0dW7W3IMRdS+zDuNb2l3no78CvUaWKGfzFjI4FTrLdUSj86IGb2hRA==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.17.0", "@lezer/common": "^1.0.0" - }, - "peerDependencies": { - "@codemirror/language": "^6.0.0", - "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.0.0", - "@lezer/common": "^1.0.0" } }, "node_modules/@codemirror/commands": { diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 3bdb72251c..d126e85a79 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -3,7 +3,7 @@ "version": "0.300.1", "private": true, "dependencies": { - "@codemirror/autocomplete": "^6.18.3", + "@codemirror/autocomplete": "^6.18.4", "@codemirror/commands": "^6.7.1", "@codemirror/language": "^6.10.8", "@codemirror/lint": "^6.8.4", From 766d44d47b3a49882216c09e08f6d98af37ea377 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 21:56:56 +0100 Subject: [PATCH 1248/1397] chore(deps): bump @mantine/notifications in /web/ui (#15818) Bumps [@mantine/notifications](https://github.com/mantinedev/mantine/tree/HEAD/packages/@mantine/notifications) from 7.15.0 to 7.15.3. - [Release notes](https://github.com/mantinedev/mantine/releases) - [Changelog](https://github.com/mantinedev/mantine/blob/master/CHANGELOG.md) - [Commits](https://github.com/mantinedev/mantine/commits/7.15.3/packages/@mantine/notifications) --- updated-dependencies: - dependency-name: "@mantine/notifications" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 115 +++++++++++++++------------------ 2 files changed, 53 insertions(+), 64 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 7ffe65c4c1..0c0d99027b 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -24,7 +24,7 @@ "@mantine/core": "^7.11.2", "@mantine/dates": "^7.15.0", "@mantine/hooks": "^7.11.2", - "@mantine/notifications": "^7.15.0", + "@mantine/notifications": "^7.15.3", "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index febeaa4dcd..4592a18b3c 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -38,7 +38,7 @@ "@mantine/core": "^7.11.2", "@mantine/dates": "^7.15.0", "@mantine/hooks": "^7.11.2", - "@mantine/notifications": "^7.15.0", + "@mantine/notifications": "^7.15.3", "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", @@ -2125,19 +2125,19 @@ } }, "node_modules/@mantine/core": { - "version": "7.15.2", - "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.15.2.tgz", - "integrity": "sha512-640ns0L/HZAXYjz3+FRffr8UNcH1fU7ENUVxKLzqNA311Dcx0qS3byVKTY/IVJYln6AkjoEfIJMiixT9fCZBiQ==", + "version": "7.15.3", + "resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.15.3.tgz", + "integrity": "sha512-8IMTq5xDJDjByDUYkDNKImikASStzrnPtVumKsrEnyEY0zhAWkAe/z/+PjTUMcN44ncJ/PrXQkJ6qMaVWzSZwA==", "dependencies": { "@floating-ui/react": "^0.26.28", "clsx": "^2.1.1", - "react-number-format": "^5.4.2", - "react-remove-scroll": "^2.6.0", + "react-number-format": "^5.4.3", + "react-remove-scroll": "^2.6.2", "react-textarea-autosize": "8.5.6", "type-fest": "^4.27.0" }, "peerDependencies": { - "@mantine/hooks": "7.15.2", + "@mantine/hooks": "7.15.3", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } @@ -2159,34 +2159,32 @@ } }, "node_modules/@mantine/hooks": { - "version": "7.15.2", - "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.15.2.tgz", - "integrity": "sha512-p8dsW0fdJxzYhULbm1noFYRHuBvJHleYviC0BlwbkVySC8AsvFI8AmC3sMssWV3dQ3yQ/SidYo9U+K/czpDpZw==", + "version": "7.15.3", + "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.15.3.tgz", + "integrity": "sha512-rZYObhrmww3OIb4O30pDox/rc+9k3AExO0FSw13t7cfz5/Di+Ho1cChswVFAshnp81ucGEod1fiDOfuyGW7JhA==", "peerDependencies": { "react": "^18.x || ^19.x" } }, "node_modules/@mantine/notifications": { - "version": "7.15.0", - "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-7.15.0.tgz", - "integrity": "sha512-F1g2mFRUTk++ATsbsi8T2WaTRhejB05FusvG3iHC4/a1+0K5Vjh2Armt3VNY0vsUR3V5RdAnP8uZDlMhM7YTQQ==", - "license": "MIT", + "version": "7.15.3", + "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-7.15.3.tgz", + "integrity": "sha512-C1obM5dQsSHIB3B3Kajk0TdLnBpLXFMOIy0otG5khoL/8c8qOU4U0kHxtPVFBFvU/hw4rx7/idiiJdjp8DepDQ==", "dependencies": { - "@mantine/store": "7.15.0", + "@mantine/store": "7.15.3", "react-transition-group": "4.4.5" }, "peerDependencies": { - "@mantine/core": "7.15.0", - "@mantine/hooks": "7.15.0", + "@mantine/core": "7.15.3", + "@mantine/hooks": "7.15.3", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "node_modules/@mantine/store": { - "version": "7.15.0", - "resolved": "https://registry.npmjs.org/@mantine/store/-/store-7.15.0.tgz", - "integrity": "sha512-XzQcVFTkD0XspPNsB2NivzbAeZUrLFGO5j8hvKcmGGvUWYlR99GbL7q13ujwJQnNpElqAPSeuN161tnbCqB+Ng==", - "license": "MIT", + "version": "7.15.3", + "resolved": "https://registry.npmjs.org/@mantine/store/-/store-7.15.3.tgz", + "integrity": "sha512-E3pCEm5ozRF/iK/jM1liKntjqaKhotvPtNAqSBcx6AkWSJ8bt16JhNrmrs3J3RmWvfqzF+fftT8HI/3HYbgu9w==", "peerDependencies": { "react": "^18.x || ^19.x" } @@ -5521,14 +5519,6 @@ "license": "ISC", "peer": true }, - "node_modules/invariant": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", - "dependencies": { - "loose-envify": "^1.0.0" - } - }, "node_modules/is-arrayish": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", @@ -7889,12 +7879,12 @@ "peer": true }, "node_modules/react-number-format": { - "version": "5.4.2", - "resolved": "https://registry.npmjs.org/react-number-format/-/react-number-format-5.4.2.tgz", - "integrity": "sha512-cg//jVdS49PYDgmcYoBnMMHl4XNTMuV723ZnHD2aXYtWWWqbVF3hjQ8iB+UZEuXapLbeA8P8H+1o6ZB1lcw3vg==", + "version": "5.4.3", + "resolved": "https://registry.npmjs.org/react-number-format/-/react-number-format-5.4.3.tgz", + "integrity": "sha512-VCY5hFg/soBighAoGcdE+GagkJq0230qN6jcS5sp8wQX1qy1fYN/RX7/BXkrs0oyzzwqR8/+eSUrqXbGeywdUQ==", "peerDependencies": { - "react": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0", - "react-dom": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0" + "react": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^0.14 || ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "node_modules/react-redux": { @@ -7931,22 +7921,22 @@ } }, "node_modules/react-remove-scroll": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.6.0.tgz", - "integrity": "sha512-I2U4JVEsQenxDAKaVa3VZ/JeJZe0/2DxPWL8Tj8yLKctQJQiZM52pn/GWFpSp8dftjM3pSAHVJZscAnC/y+ySQ==", + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.6.2.tgz", + "integrity": "sha512-KmONPx5fnlXYJQqC62Q+lwIeAk64ws/cUw6omIumRzMRPqgnYqhSSti99nbj0Ry13bv7dF+BKn7NB+OqkdZGTw==", "dependencies": { - "react-remove-scroll-bar": "^2.3.6", + "react-remove-scroll-bar": "^2.3.7", "react-style-singleton": "^2.2.1", "tslib": "^2.1.0", - "use-callback-ref": "^1.3.0", + "use-callback-ref": "^1.3.3", "use-sidecar": "^1.1.2" }, "engines": { "node": ">=10" }, "peerDependencies": { - "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -7955,19 +7945,19 @@ } }, "node_modules/react-remove-scroll-bar": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.6.tgz", - "integrity": "sha512-DtSYaao4mBmX+HDo5YWYdBWQwYIQQshUV/dVxFxK+KM26Wjwp1gZ6rv6OC3oujI6Bfu6Xyg3TwK533AQutsn/g==", + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.8.tgz", + "integrity": "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q==", "dependencies": { - "react-style-singleton": "^2.2.1", + "react-style-singleton": "^2.2.2", "tslib": "^2.0.0" }, "engines": { "node": ">=10" }, "peerDependencies": { - "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" }, "peerDependenciesMeta": { "@types/react": { @@ -8014,20 +8004,19 @@ } }, "node_modules/react-style-singleton": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.1.tgz", - "integrity": "sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==", + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.3.tgz", + "integrity": "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ==", "dependencies": { "get-nonce": "^1.0.0", - "invariant": "^2.2.4", "tslib": "^2.0.0" }, "engines": { "node": ">=10" }, "peerDependencies": { - "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -8973,9 +8962,9 @@ } }, "node_modules/use-callback-ref": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.2.tgz", - "integrity": "sha512-elOQwe6Q8gqZgDA8mrh44qRTQqpIHDcZ3hXTLjBe1i4ph8XpNJnO+aQf3NaG+lriLopI4HMx9VjQLfPQ6vhnoA==", + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.3.tgz", + "integrity": "sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg==", "dependencies": { "tslib": "^2.0.0" }, @@ -8983,8 +8972,8 @@ "node": ">=10" }, "peerDependencies": { - "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -9058,9 +9047,9 @@ } }, "node_modules/use-sidecar": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.2.tgz", - "integrity": "sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.3.tgz", + "integrity": "sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ==", "dependencies": { "detect-node-es": "^1.1.0", "tslib": "^2.0.0" @@ -9069,8 +9058,8 @@ "node": ">=10" }, "peerDependencies": { - "@types/react": "^16.9.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { From 9142f10fbbb37756c1c084d578964081f82feac4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 21:57:28 +0100 Subject: [PATCH 1249/1397] chore(deps): bump @mantine/dates from 7.15.0 to 7.15.3 in /web/ui (#15819) Bumps [@mantine/dates](https://github.com/mantinedev/mantine/tree/HEAD/packages/@mantine/dates) from 7.15.0 to 7.15.3. - [Release notes](https://github.com/mantinedev/mantine/releases) - [Changelog](https://github.com/mantinedev/mantine/blob/master/CHANGELOG.md) - [Commits](https://github.com/mantinedev/mantine/commits/7.15.3/packages/@mantine/dates) --- updated-dependencies: - dependency-name: "@mantine/dates" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 13 ++++++------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 0c0d99027b..b89ab83546 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -22,7 +22,7 @@ "@lezer/highlight": "^1.2.1", "@mantine/code-highlight": "^7.15.2", "@mantine/core": "^7.11.2", - "@mantine/dates": "^7.15.0", + "@mantine/dates": "^7.15.3", "@mantine/hooks": "^7.11.2", "@mantine/notifications": "^7.15.3", "@microsoft/fetch-event-source": "^2.0.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 4592a18b3c..ad4996bc9a 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -36,7 +36,7 @@ "@lezer/highlight": "^1.2.1", "@mantine/code-highlight": "^7.15.2", "@mantine/core": "^7.11.2", - "@mantine/dates": "^7.15.0", + "@mantine/dates": "^7.15.3", "@mantine/hooks": "^7.11.2", "@mantine/notifications": "^7.15.3", "@microsoft/fetch-event-source": "^2.0.1", @@ -2143,16 +2143,15 @@ } }, "node_modules/@mantine/dates": { - "version": "7.15.0", - "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-7.15.0.tgz", - "integrity": "sha512-EM1Tp29DH6i6XH+fPkpNsjUQA32u+4yqOggFtytvuHgDITOj82xh2NIZGSyGH/rzkcy/Bh/t65pK3SoZJGTY6Q==", - "license": "MIT", + "version": "7.15.3", + "resolved": "https://registry.npmjs.org/@mantine/dates/-/dates-7.15.3.tgz", + "integrity": "sha512-lv71dcfA8qB43v03cRELC2/G7FQXfAgj0tSMImj2p2FL3PSWWF4WRvW6byiB+hszk4lgooSo7kppzkSMVUlsdA==", "dependencies": { "clsx": "^2.1.1" }, "peerDependencies": { - "@mantine/core": "7.15.0", - "@mantine/hooks": "7.15.0", + "@mantine/core": "7.15.3", + "@mantine/hooks": "7.15.3", "dayjs": ">=1.0.0", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" From f34f717b3d714a536fd32a4d25ac310072cf2e1c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 21:57:52 +0100 Subject: [PATCH 1250/1397] chore(deps): bump @tabler/icons-react from 3.24.0 to 3.28.1 in /web/ui (#15820) Bumps [@tabler/icons-react](https://github.com/tabler/tabler-icons/tree/HEAD/packages/icons-react) from 3.24.0 to 3.28.1. - [Release notes](https://github.com/tabler/tabler-icons/releases) - [Commits](https://github.com/tabler/tabler-icons/commits/v3.28.1/packages/icons-react) --- updated-dependencies: - dependency-name: "@tabler/icons-react" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/package-lock.json | 18 ++++++++---------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index b89ab83546..252df975f1 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -30,7 +30,7 @@ "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.301.0", "@reduxjs/toolkit": "^2.5.0", - "@tabler/icons-react": "^3.24.0", + "@tabler/icons-react": "^3.28.1", "@tanstack/react-query": "^5.62.11", "@testing-library/jest-dom": "^6.6.3", "@testing-library/react": "^16.1.0", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index ad4996bc9a..4675460743 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -44,7 +44,7 @@ "@nexucis/kvsearch": "^0.9.1", "@prometheus-io/codemirror-promql": "0.301.0", "@reduxjs/toolkit": "^2.5.0", - "@tabler/icons-react": "^3.24.0", + "@tabler/icons-react": "^3.28.1", "@tanstack/react-query": "^5.62.11", "@testing-library/jest-dom": "^6.6.3", "@testing-library/react": "^16.1.0", @@ -2619,22 +2619,20 @@ } }, "node_modules/@tabler/icons": { - "version": "3.24.0", - "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-3.24.0.tgz", - "integrity": "sha512-qNis9e90QcdxAGV3wNIeX0Ba2R7ktm0cnqOToKHJfC2kj3fvJwEVLsw63K0/fm7NW8rSZjDSTQRmMnSg8g/wrg==", - "license": "MIT", + "version": "3.28.1", + "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-3.28.1.tgz", + "integrity": "sha512-h7nqKEvFooLtFxhMOC1/2eiV+KRXhBUuDUUJrJlt6Ft6tuMw2eU/9GLQgrTk41DNmIEzp/LI83K9J9UUU8YBYQ==", "funding": { "type": "github", "url": "https://github.com/sponsors/codecalm" } }, "node_modules/@tabler/icons-react": { - "version": "3.24.0", - "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-3.24.0.tgz", - "integrity": "sha512-m9c7TmlcDmKqvZAasG5rv1YvazZDrVEhNdNFa2d1Bzotc0dh+iceFdiZCEcYPDb5UcRyLAMvOaOC9y/5sfMMWw==", - "license": "MIT", + "version": "3.28.1", + "resolved": "https://registry.npmjs.org/@tabler/icons-react/-/icons-react-3.28.1.tgz", + "integrity": "sha512-KNBpA2kbxr3/2YK5swt7b/kd/xpDP1FHYZCxDFIw54tX8slELRFEf95VMxsccQHZeIcUbdoojmUUuYSbt/sM5Q==", "dependencies": { - "@tabler/icons": "3.24.0" + "@tabler/icons": "3.28.1" }, "funding": { "type": "github", From 93ca2e6113ab0b47ff782bc04ace63fed7303107 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 23:13:12 +0100 Subject: [PATCH 1251/1397] chore(deps): bump @codemirror/view from 6.35.3 to 6.36.2 in /web/ui (#15816) Bumps [@codemirror/view](https://github.com/codemirror/view) from 6.35.3 to 6.36.2. - [Changelog](https://github.com/codemirror/view/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/view/compare/6.35.3...6.36.2) --- updated-dependencies: - dependency-name: "@codemirror/view" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 11 +++++------ 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 252df975f1..e14b945fd9 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -16,7 +16,7 @@ "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.4.1", - "@codemirror/view": "^6.34.1", + "@codemirror/view": "^6.36.2", "@floating-ui/dom": "^1.6.12", "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.1", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index aea4d22c62..3f2c7f314d 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -37,7 +37,7 @@ "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.34.1", + "@codemirror/view": "^6.36.2", "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 4675460743..06513f8ee3 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -30,7 +30,7 @@ "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.4.1", - "@codemirror/view": "^6.34.1", + "@codemirror/view": "^6.36.2", "@floating-ui/dom": "^1.6.12", "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.1", @@ -157,7 +157,7 @@ "@codemirror/language": "^6.10.6", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.34.1", + "@codemirror/view": "^6.36.2", "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.2", @@ -857,10 +857,9 @@ } }, "node_modules/@codemirror/view": { - "version": "6.35.3", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.35.3.tgz", - "integrity": "sha512-ScY7L8+EGdPl4QtoBiOzE4FELp7JmNUsBvgBcCakXWM2uiv/K89VAzU3BMDscf0DsACLvTKePbd5+cFDTcei6g==", - "license": "MIT", + "version": "6.36.2", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.36.2.tgz", + "integrity": "sha512-DZ6ONbs8qdJK0fdN7AB82CgI6tYXf4HWk1wSVa0+9bhVznCuuvhQtX8bFBoy3dv8rZSQqUd8GvhVAcielcidrA==", "dependencies": { "@codemirror/state": "^6.5.0", "style-mod": "^4.1.0", From 1e81fd216631913104b37fc6a43312cb3af0eec4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 23:14:05 +0100 Subject: [PATCH 1252/1397] chore(deps): bump @codemirror/language from 6.10.6 to 6.10.8 in /web/ui (#15742) Bumps [@codemirror/language](https://github.com/codemirror/language) from 6.10.6 to 6.10.8. - [Changelog](https://github.com/codemirror/language/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/language/compare/6.10.6...6.10.8) --- updated-dependencies: - dependency-name: "@codemirror/language" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/mantine-ui/package.json | 2 +- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index e14b945fd9..9cdb67aea5 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -13,7 +13,7 @@ }, "dependencies": { "@codemirror/autocomplete": "^6.18.4", - "@codemirror/language": "^6.10.6", + "@codemirror/language": "^6.10.8", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.4.1", "@codemirror/view": "^6.36.2", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 3f2c7f314d..270fa16b29 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -34,7 +34,7 @@ }, "devDependencies": { "@codemirror/autocomplete": "^6.18.4", - "@codemirror/language": "^6.10.6", + "@codemirror/language": "^6.10.8", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.36.2", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 06513f8ee3..f9beb96da0 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -27,7 +27,7 @@ "version": "0.301.0", "dependencies": { "@codemirror/autocomplete": "^6.18.4", - "@codemirror/language": "^6.10.6", + "@codemirror/language": "^6.10.8", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.4.1", "@codemirror/view": "^6.36.2", @@ -154,7 +154,7 @@ }, "devDependencies": { "@codemirror/autocomplete": "^6.18.4", - "@codemirror/language": "^6.10.6", + "@codemirror/language": "^6.10.8", "@codemirror/lint": "^6.8.4", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.36.2", @@ -801,9 +801,9 @@ } }, "node_modules/@codemirror/language": { - "version": "6.10.6", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.6.tgz", - "integrity": "sha512-KrsbdCnxEztLVbB5PycWXFxas4EOyk/fPAfruSOnDDppevQgid2XZ+KbJ9u+fDikP/e7MW7HPBTvTb8JlZK9vA==", + "version": "6.10.8", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.8.tgz", + "integrity": "sha512-wcP8XPPhDH2vTqf181U8MbZnW+tDyPYy0UzVOa+oHORjyT+mhhom9vBd7dApJwoDz9Nb/a8kHjJIsuA/t8vNFw==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.23.0", From e1324112aa85facaca5f9ec9b5b7b05103c6804b Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Mon, 13 Jan 2025 15:37:03 -0800 Subject: [PATCH 1253/1397] less context cancellation check for series API Signed-off-by: Ben Ye --- web/api/v1/api.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index ea7d5c5fe4..fd53288717 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -68,6 +68,9 @@ const ( // Non-standard status code (originally introduced by nginx) for the case when a client closes // the connection while the server is still processing the request. statusClientClosedConnection = 499 + + // checkContextEveryNIterations is used in some tight loops to check if the context is done. + checkContextEveryNIterations = 128 ) type errorType string @@ -962,10 +965,15 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { warnings := set.Warnings() + i := 1 for set.Next() { - if err := ctx.Err(); err != nil { - return apiFuncResult{nil, returnAPIError(err), warnings, closer} + if i%checkContextEveryNIterations == 0 { + if err := ctx.Err(); err != nil { + return apiFuncResult{nil, returnAPIError(err), warnings, closer} + } } + i++ + metrics = append(metrics, set.At().Labels()) if limit > 0 && len(metrics) > limit { From 5303e515afe40966ebac5451a5a27275a6cdfa1a Mon Sep 17 00:00:00 2001 From: sh0rez Date: Tue, 14 Jan 2025 15:33:31 +0100 Subject: [PATCH 1254/1397] remote/otlp: convert delta to cumulative (#15165) What Adds support for OTLP delta temporality to the OTLP endpoint. This is done by calling the deltatocumulative processor from the OpenTelemetry collector during OTLP conversion. Why Delta conversion is a naturally stateful process, which requires careful request routing when operated inside a collector. Prometheus is already stateful and doing the conversion in-server reduces the operational burden on the ingest architecture by only having one stateful component. How deltatocumulative is a OTel collector component that works as follows: * pmetric.Metrics come from a receiver or in this case from the HTTP client * It operates as an in-place update loop: * for each sample, if not delta, leave unmodified * if delta, do: * state += sample, where state is the in-memory sum of all previous samples * sample = state, sample value is now cumulative * this is supported for sums (counters), gauges, histograms (old histograms) and exponential histograms (native histograms) If a series receives no new samples for 5m, its state is removed from memory Performance Delta performance is a stateful operation and the OTel code is not highly optimized yet, e.g. it locks the entire processor for each request. Nonetheless, care has been taken to mitigate those effects: delta conversion is behind a feature flag. If disabled, no conversion code is ever invoked if enabled, conversion is not invoked if request not actually contains delta samples. This leads to no measureable performance difference between default-cumulative to convert-cumulative (only cumulative, feature on/off) Signed-off-by: sh0rez --- cmd/prometheus/main.go | 5 +- docs/command-line/prometheus.md | 2 +- docs/feature_flags.md | 22 ++ docs/querying/api.md | 9 + go.mod | 11 +- go.sum | 54 ++++- storage/remote/write_handler.go | 133 ++++++++--- storage/remote/write_test.go | 377 +++++++++++++++++++++++++++++++- web/api/v1/api.go | 4 +- web/api/v1/errors_test.go | 1 + web/web.go | 2 + 11 files changed, 586 insertions(+), 34 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 03c20dc52d..3c2b5ee0c1 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -275,6 +275,9 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error { case "old-ui": c.web.UseOldUI = true logger.Info("Serving previous version of the Prometheus web UI.") + case "otlp-deltatocumulative": + c.web.ConvertOTLPDelta = true + logger.Info("Converting delta OTLP metrics to cumulative") default: logger.Warn("Unknown option for --enable-feature", "option", o) } @@ -516,7 +519,7 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index dd207dc382..9b4ec8b736 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -60,7 +60,7 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature ... | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --enable-feature ... | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | --agent | Run Prometheus in 'Agent mode'. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 67f31c260f..0de3d2bf7d 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -151,3 +151,25 @@ Configuration reloads are triggered by detecting changes in the checksum of the main configuration file or any referenced files, such as rule and scrape configurations. To ensure consistency and avoid issues during reloads, it's recommended to update these files atomically. + +## OTLP Delta Conversion + +`--enable-feature=otlp-deltatocumulative` + +When enabled, Prometheus will convert OTLP metrics from delta temporality to their +cumulative equivalent, instead of dropping them. + +This uses +[deltatocumulative][d2c] +from the OTel collector, using its default settings. + +Delta conversion keeps in-memory state to aggregate delta changes per-series over time. +When Prometheus restarts, this state is lost, starting the aggregation from zero +again. This results in a counter reset in the cumulative series. + +This state is periodically ([`max_stale`][d2c]) cleared of inactive series. + +Enabling this _can_ have negative impact on performance, because the in-memory +state is mutex guarded. Cumulative-only OTLP requests are not affected. + +[d2c]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/deltatocumulativeprocessor diff --git a/docs/querying/api.md b/docs/querying/api.md index e3f97886dc..1366dc02c2 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -1424,6 +1424,15 @@ endpoint is `/api/v1/otlp/v1/metrics`. *New in v2.47* +### OTLP Delta + +Prometheus can convert incoming metrics from delta temporality to their cumulative equivalent. +This is done using [deltatocumulative](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/deltatocumulativeprocessor) from the OpenTelemetry Collector. + +To enable, pass `--enable-feature=otlp-deltatocumulative`. + +*New in v3.2* + ## Notifications The following endpoints provide information about active status notifications concerning the Prometheus server itself. diff --git a/go.mod b/go.mod index 93cd89f469..98075a56fa 100644 --- a/go.mod +++ b/go.mod @@ -48,6 +48,7 @@ require ( github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 github.com/ovh/go-ovh v1.6.0 github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/client_golang v1.20.5 @@ -60,7 +61,10 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.10.0 github.com/vultr/govultr/v2 v2.17.2 + go.opentelemetry.io/collector/component v0.116.0 + go.opentelemetry.io/collector/consumer v1.22.0 go.opentelemetry.io/collector/pdata v1.22.0 + go.opentelemetry.io/collector/processor v0.116.0 go.opentelemetry.io/collector/semconv v0.116.0 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 @@ -68,6 +72,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 + go.opentelemetry.io/otel/metric v1.33.0 go.opentelemetry.io/otel/sdk v1.33.0 go.opentelemetry.io/otel/trace v1.33.0 go.uber.org/atomic v1.11.0 @@ -168,6 +173,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runtime-spec v1.0.2 // indirect @@ -184,8 +191,10 @@ require ( github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.116.0 // indirect + go.opentelemetry.io/collector/pipeline v0.116.0 // indirect go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.22.0 // indirect diff --git a/go.sum b/go.sum index 515fb9c053..009b714060 100644 --- a/go.sum +++ b/go.sum @@ -165,6 +165,8 @@ github.com/go-resty/resty/v2 v2.16.2/go.mod h1:0fHAoK7JoBy/Ch36N8VFeMsK7xQOHhvWa github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= @@ -304,6 +306,12 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= +github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= +github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= +github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -348,6 +356,8 @@ github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJys github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= @@ -355,6 +365,8 @@ github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= @@ -383,6 +395,14 @@ github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 h1:Kxk5Ral+Dc6VB9UmTketVjs+rbMZP8JxQ4SXDx4RivQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0/go.mod h1:ctT6oQmGmWGGGgUIKyx2fDwqz77N9+04gqKkDyAzKCg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0 h1:RlEK9MbxWyBHbLel8EJ1L7DbYVLai9dZL6Ljl2cBgyA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0/go.mod h1:AVUEyIjPb+0ARr7mhIkZkdNg3fd0ZcRhzAi53oZhl1Q= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 h1:jwnZYRBuPJnsKXE5H6ZvTEm91bXW5VP8+tLewzl54eg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0/go.mod h1:NT3Ag+DdnIAZQfD7l7OHwlYqnaAJ19SoPZ0nhD9yx4s= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 h1:ZBmLuipJv7BT9fho/2yAFsS8AtMsCOCe4ON8oqkX3n8= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0/go.mod h1:f0GdYWGxUunyRZ088gHnoX78pc/gZc3dQlRtidiGXzg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -490,8 +510,36 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/component v0.116.0 h1:SQE1YeVfYCN7bw1n4hknUwJE5U/1qJL552sDhAdSlaA= +go.opentelemetry.io/collector/component v0.116.0/go.mod h1:MYgXFZWDTq0uPgF1mkLSFibtpNqksRVAOrmihckOQEs= +go.opentelemetry.io/collector/component/componentstatus v0.116.0 h1:wpgY0H2K9IPBzaNAvavKziK86VZ7TuNFQbS9OC4Z6Cs= +go.opentelemetry.io/collector/component/componentstatus v0.116.0/go.mod h1:ZRlVwHFMGNfcsAywEJqivOn5JzDZkpe3KZVSwMWu4tw= +go.opentelemetry.io/collector/component/componenttest v0.116.0 h1:UIcnx4Rrs/oDRYSAZNHRMUiYs2FBlwgV5Nc0oMYfR6A= +go.opentelemetry.io/collector/component/componenttest v0.116.0/go.mod h1:W40HaKPHdBFMVI7zzHE7dhdWC+CgAnAC9SmWetFBATY= +go.opentelemetry.io/collector/config/configtelemetry v0.116.0 h1:Vl49VCHQwBOeMswDpFwcl2HD8e9y94xlrfII3SR2VeQ= +go.opentelemetry.io/collector/config/configtelemetry v0.116.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/confmap v1.22.0 h1:ZKQzRuj5lKu+seKArAAZ1yPRroDPricaIVIREm/jr3w= +go.opentelemetry.io/collector/confmap v1.22.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/consumer v1.22.0 h1:QmfnNizyNZFt0uK3GG/EoT5h6PvZJ0dgVTc5hFEc1l0= +go.opentelemetry.io/collector/consumer v1.22.0/go.mod h1:tiz2khNceFAPokxxfzAuFfIpShBasMT2AL2Sbc7+m0I= +go.opentelemetry.io/collector/consumer/consumertest v0.116.0 h1:pIVR7FtQMNAzfxBUSMEIC2dX5Lfo3O9ZBfx+sAwrrrM= +go.opentelemetry.io/collector/consumer/consumertest v0.116.0/go.mod h1:cV3cNDiPnls5JdhnOJJFVlclrClg9kPs04cXgYP9Gmk= +go.opentelemetry.io/collector/consumer/xconsumer v0.116.0 h1:ZrWvq7HumB0jRYmS2ztZ3hhXRNpUVBWPKMbPhsVGmZM= +go.opentelemetry.io/collector/consumer/xconsumer v0.116.0/go.mod h1:C+VFMk8vLzPun6XK8aMts6h4RaDjmzXHCPaiOxzRQzQ= go.opentelemetry.io/collector/pdata v1.22.0 h1:3yhjL46NLdTMoP8rkkcE9B0pzjf2973crn0KKhX5UrI= go.opentelemetry.io/collector/pdata v1.22.0/go.mod h1:nLLf6uDg8Kn5g3WNZwGyu8+kf77SwOqQvMTb5AXEbEY= +go.opentelemetry.io/collector/pdata/pprofile v0.116.0 h1:iE6lqkO7Hi6lTIIml1RI7yQ55CKqW12R2qHinwF5Zuk= +go.opentelemetry.io/collector/pdata/pprofile v0.116.0/go.mod h1:xQiPpjzIiXRFb+1fPxUy/3ygEZgo0Bu/xmLKOWu8vMQ= +go.opentelemetry.io/collector/pdata/testdata v0.116.0 h1:zmn1zpeX2BvzL6vt2dBF4OuAyFF2ml/OXcqflNgFiP0= +go.opentelemetry.io/collector/pdata/testdata v0.116.0/go.mod h1:ytWzICFN4XTDP6o65B4+Ed52JGdqgk9B8CpLHCeCpMo= +go.opentelemetry.io/collector/pipeline v0.116.0 h1:o8eKEuWEszmRpfShy7ElBoQ3Jo6kCi9ucm3yRgdNb9s= +go.opentelemetry.io/collector/pipeline v0.116.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/processor v0.116.0 h1:Kyu4tPzTdWNHtZjcxvI/bGNAgyv8L8Kem2r/Mk4IDAw= +go.opentelemetry.io/collector/processor v0.116.0/go.mod h1:+/Ugy48RAxlZEXmN2cw51W8t5wdHS9No+GAoP+moskk= +go.opentelemetry.io/collector/processor/processortest v0.116.0 h1:+IqNEVEE0E2MsO2g7+Y/9dz35sDuvAXRXrLts9NdXrA= +go.opentelemetry.io/collector/processor/processortest v0.116.0/go.mod h1:DLaQDBxzgeeaUO0ULMn/efos9PmHZkmYCHuxwCsiVHI= +go.opentelemetry.io/collector/processor/xprocessor v0.116.0 h1:iin/UwuWvSLB7ZNfINFUYbZ5lxIi1NjZ2brkyyFdiRA= +go.opentelemetry.io/collector/processor/xprocessor v0.116.0/go.mod h1:cnA43/XpKDbaOmd8buqKp/LGJ2l/OoCqbR//u5DMfn8= go.opentelemetry.io/collector/semconv v0.116.0 h1:63xCZomsKJAWmKGWD3lnORiE3WKW6AO4LjnzcHzGx3Y= go.opentelemetry.io/collector/semconv v0.116.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 h1:xwH3QJv6zL4u+gkPUu59NeT1Gyw9nScWT8FQpKLUJJI= @@ -510,8 +558,8 @@ go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5W go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= -go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= -go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= @@ -524,6 +572,8 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 89433ae6f2..02585539c0 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -38,6 +38,13 @@ import ( writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage" otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite" + + deltatocumulative "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/otel/metric/noop" ) type writeHandler struct { @@ -517,56 +524,107 @@ func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref stora return ref, err } +type OTLPOptions struct { + // Convert delta samples to their cumulative equivalent by aggregating in-memory + ConvertDelta bool +} + // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. -func NewOTLPWriteHandler(logger *slog.Logger, appendable storage.Appendable, configFunc func() config.Config) http.Handler { - rwHandler := &writeHandler{ - logger: logger, - appendable: appendable, +func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler { + ex := &rwExporter{ + writeHandler: &writeHandler{ + logger: logger, + appendable: appendable, + }, + config: configFunc, } - return &otlpWriteHandler{ - logger: logger, - rwHandler: rwHandler, - configFunc: configFunc, + wh := &otlpWriteHandler{logger: logger, cumul: ex} + + if opts.ConvertDelta { + fac := deltatocumulative.NewFactory() + set := processor.Settings{TelemetrySettings: component.TelemetrySettings{MeterProvider: noop.NewMeterProvider()}} + d2c, err := fac.CreateMetrics(context.Background(), set, fac.CreateDefaultConfig(), wh.cumul) + if err != nil { + // fac.CreateMetrics directly calls [deltatocumulativeprocessor.createMetricsProcessor], + // which only errors if: + // - cfg.(type) != *Config + // - telemetry.New fails due to bad set.TelemetrySettings + // + // both cannot be the case, as we pass a valid *Config and valid TelemetrySettings. + // as such, we assume this error to never occur. + // if it is, our assumptions are broken in which case a panic seems acceptable. + panic(err) + } + if err := d2c.Start(context.Background(), nil); err != nil { + // deltatocumulative does not error on start. see above for panic reasoning + panic(err) + } + wh.delta = d2c } + + return wh } -type otlpWriteHandler struct { - logger *slog.Logger - rwHandler *writeHandler - configFunc func() config.Config +type rwExporter struct { + *writeHandler + config func() config.Config } -func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - req, err := DecodeOTLPWriteRequest(r) - if err != nil { - h.logger.Error("Error decoding remote write request", "err", err.Error()) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - otlpCfg := h.configFunc().OTLPConfig +func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { + otlpCfg := rw.config().OTLPConfig converter := otlptranslator.NewPrometheusConverter() - annots, err := converter.FromMetrics(r.Context(), req.Metrics(), otlptranslator.Settings{ + annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{ AddMetricSuffixes: true, AllowUTF8: otlpCfg.TranslationStrategy == config.NoUTF8EscapingWithSuffixes, PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes, }) if err != nil { - h.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) + rw.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) } ws, _ := annots.AsStrings("", 0, 0) if len(ws) > 0 { - h.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) + rw.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) } - err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{ + err = rw.write(ctx, &prompb.WriteRequest{ Timeseries: converter.TimeSeries(), Metadata: converter.Metadata(), }) + return err +} + +func (rw *rwExporter) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: false} +} + +type otlpWriteHandler struct { + logger *slog.Logger + + cumul consumer.Metrics // only cumulative + delta consumer.Metrics // delta capable +} + +func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + req, err := DecodeOTLPWriteRequest(r) + if err != nil { + h.logger.Error("Error decoding OTLP write request", "err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + md := req.Metrics() + // if delta conversion enabled AND delta samples exist, use slower delta capable path + if h.delta != nil && hasDelta(md) { + err = h.delta.ConsumeMetrics(r.Context(), md) + } else { + // deltatocumulative currently holds a sync.Mutex when entering ConsumeMetrics. + // This is slow and not necessary when no delta samples exist anyways + err = h.cumul.ConsumeMetrics(r.Context(), md) + } switch { case err == nil: @@ -583,6 +641,31 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) } +func hasDelta(md pmetric.Metrics) bool { + for i := range md.ResourceMetrics().Len() { + sms := md.ResourceMetrics().At(i).ScopeMetrics() + for i := range sms.Len() { + ms := sms.At(i).Metrics() + for i := range ms.Len() { + temporality := pmetric.AggregationTemporalityUnspecified + m := ms.At(i) + switch ms.At(i).Type() { + case pmetric.MetricTypeSum: + temporality = m.Sum().AggregationTemporality() + case pmetric.MetricTypeExponentialHistogram: + temporality = m.ExponentialHistogram().AggregationTemporality() + case pmetric.MetricTypeHistogram: + temporality = m.Histogram().AggregationTemporality() + } + if temporality == pmetric.AggregationTemporalityDelta { + return true + } + } + } + } + return false +} + type timeLimitAppender struct { storage.Appender diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go index 83dfffbaef..8125da7f6e 100644 --- a/storage/remote/write_test.go +++ b/storage/remote/write_test.go @@ -15,13 +15,23 @@ package remote import ( "bytes" + "context" "errors" + "fmt" + "log/slog" + "math/rand/v2" "net/http" "net/http/httptest" "net/url" + "os" + "reflect" + "runtime" + "strconv" + "sync" "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/prometheus/client_golang/prometheus" common_config "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -31,8 +41,10 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/storage" ) func testRemoteWriteConfig() *config.RemoteWriteConfig { @@ -379,11 +391,11 @@ func TestOTLPWriteHandler(t *testing.T) { req.Header.Set("Content-Type", "application/x-protobuf") appendable := &mockAppendable{} - handler := NewOTLPWriteHandler(nil, appendable, func() config.Config { + handler := NewOTLPWriteHandler(nil, nil, appendable, func() config.Config { return config.Config{ OTLPConfig: config.DefaultOTLPConfig, } - }) + }, OTLPOptions{}) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -476,3 +488,364 @@ func generateOTLPWriteRequest() pmetricotlp.ExportRequest { return pmetricotlp.NewExportRequestFromMetrics(d) } + +func TestOTLPDelta(t *testing.T) { + log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn})) + appendable := &mockAppendable{} + cfg := func() config.Config { + return config.Config{OTLPConfig: config.DefaultOTLPConfig} + } + handler := NewOTLPWriteHandler(log, nil, appendable, cfg, OTLPOptions{ConvertDelta: true}) + + md := pmetric.NewMetrics() + ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() + + m := ms.AppendEmpty() + m.SetName("some.delta.total") + + sum := m.SetEmptySum() + sum.SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + + ts := time.Date(2000, 1, 2, 3, 4, 0, 0, time.UTC) + for i := range 3 { + dp := sum.DataPoints().AppendEmpty() + dp.SetIntValue(int64(i)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts.Add(time.Duration(i) * time.Second))) + } + + proto, err := pmetricotlp.NewExportRequestFromMetrics(md).MarshalProto() + require.NoError(t, err) + + req, err := http.NewRequest("", "", bytes.NewReader(proto)) + require.NoError(t, err) + req.Header.Set("Content-Type", "application/x-protobuf") + + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Result().StatusCode) + + ls := labels.FromStrings("__name__", "some_delta_total") + milli := func(sec int) int64 { + return time.Date(2000, 1, 2, 3, 4, sec, 0, time.UTC).UnixMilli() + } + + want := []mockSample{ + {t: milli(0), l: ls, v: 0}, // +0 + {t: milli(1), l: ls, v: 1}, // +1 + {t: milli(2), l: ls, v: 3}, // +2 + } + if diff := cmp.Diff(want, appendable.samples, cmp.Exporter(func(_ reflect.Type) bool { return true })); diff != "" { + t.Fatal(diff) + } +} + +func BenchmarkOTLP(b *testing.B) { + start := time.Date(2000, 1, 2, 3, 4, 5, 0, time.UTC) + + type Type struct { + name string + data func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric + } + types := []Type{{ + name: "sum", + data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric { + cumul := make(map[int]float64) + return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric { + m := pmetric.NewMetric() + sum := m.SetEmptySum() + sum.SetAggregationTemporality(mode) + dps := sum.DataPoints() + for id := range dpc { + dp := dps.AppendEmpty() + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute))) + dp.Attributes().PutStr("id", strconv.Itoa(id)) + v := float64(rand.IntN(100)) / 10 + switch mode { + case pmetric.AggregationTemporalityDelta: + dp.SetDoubleValue(v) + case pmetric.AggregationTemporalityCumulative: + cumul[id] += v + dp.SetDoubleValue(cumul[id]) + } + } + return []pmetric.Metric{m} + } + }(), + }, { + name: "histogram", + data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric { + bounds := [4]float64{1, 10, 100, 1000} + type state struct { + counts [4]uint64 + count uint64 + sum float64 + } + var cumul []state + return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric { + if cumul == nil { + cumul = make([]state, dpc) + } + m := pmetric.NewMetric() + hist := m.SetEmptyHistogram() + hist.SetAggregationTemporality(mode) + dps := hist.DataPoints() + for id := range dpc { + dp := dps.AppendEmpty() + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute))) + dp.Attributes().PutStr("id", strconv.Itoa(id)) + dp.ExplicitBounds().FromRaw(bounds[:]) + + var obs *state + switch mode { + case pmetric.AggregationTemporalityDelta: + obs = new(state) + case pmetric.AggregationTemporalityCumulative: + obs = &cumul[id] + } + + for i := range obs.counts { + v := uint64(rand.IntN(10)) + obs.counts[i] += v + obs.count++ + obs.sum += float64(v) + } + + dp.SetCount(obs.count) + dp.SetSum(obs.sum) + dp.BucketCounts().FromRaw(obs.counts[:]) + } + return []pmetric.Metric{m} + } + }(), + }, { + name: "exponential", + data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric { + type state struct { + counts [4]uint64 + count uint64 + sum float64 + } + var cumul []state + return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric { + if cumul == nil { + cumul = make([]state, dpc) + } + m := pmetric.NewMetric() + ex := m.SetEmptyExponentialHistogram() + ex.SetAggregationTemporality(mode) + dps := ex.DataPoints() + for id := range dpc { + dp := dps.AppendEmpty() + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute))) + dp.Attributes().PutStr("id", strconv.Itoa(id)) + dp.SetScale(2) + + var obs *state + switch mode { + case pmetric.AggregationTemporalityDelta: + obs = new(state) + case pmetric.AggregationTemporalityCumulative: + obs = &cumul[id] + } + + for i := range obs.counts { + v := uint64(rand.IntN(10)) + obs.counts[i] += v + obs.count++ + obs.sum += float64(v) + } + + dp.Positive().BucketCounts().FromRaw(obs.counts[:]) + dp.SetCount(obs.count) + dp.SetSum(obs.sum) + } + + return []pmetric.Metric{m} + } + }(), + }} + + modes := []struct { + name string + data func(func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, int) []pmetric.Metric + }{{ + name: "cumulative", + data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric { + return data(pmetric.AggregationTemporalityCumulative, 10, epoch) + }, + }, { + name: "delta", + data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric { + return data(pmetric.AggregationTemporalityDelta, 10, epoch) + }, + }, { + name: "mixed", + data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric { + cumul := data(pmetric.AggregationTemporalityCumulative, 5, epoch) + delta := data(pmetric.AggregationTemporalityDelta, 5, epoch) + out := append(cumul, delta...) + rand.Shuffle(len(out), func(i, j int) { out[i], out[j] = out[j], out[i] }) + return out + }, + }} + + configs := []struct { + name string + opts OTLPOptions + }{ + {name: "default"}, + {name: "convert", opts: OTLPOptions{ConvertDelta: true}}, + } + + Workers := runtime.GOMAXPROCS(0) + for _, cs := range types { + for _, mode := range modes { + for _, cfg := range configs { + b.Run(fmt.Sprintf("type=%s/temporality=%s/cfg=%s", cs.name, mode.name, cfg.name), func(b *testing.B) { + if !cfg.opts.ConvertDelta && (mode.name == "delta" || mode.name == "mixed") { + b.Skip("not possible") + } + + var total int + + // reqs is a [b.N]*http.Request, divided across the workers. + // deltatocumulative requires timestamps to be strictly in + // order on a per-series basis. to ensure this, each reqs[k] + // contains samples of differently named series, sorted + // strictly in time order + reqs := make([][]*http.Request, Workers) + for n := range b.N { + k := n % Workers + + md := pmetric.NewMetrics() + ms := md.ResourceMetrics().AppendEmpty(). + ScopeMetrics().AppendEmpty(). + Metrics() + + for i, m := range mode.data(cs.data, n) { + m.SetName(fmt.Sprintf("benchmark_%d_%d", k, i)) + m.MoveTo(ms.AppendEmpty()) + } + + total += sampleCount(md) + + ex := pmetricotlp.NewExportRequestFromMetrics(md) + data, err := ex.MarshalProto() + require.NoError(b, err) + + req, err := http.NewRequest("", "", bytes.NewReader(data)) + require.NoError(b, err) + req.Header.Set("Content-Type", "application/x-protobuf") + + reqs[k] = append(reqs[k], req) + } + + log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn})) + mock := new(mockAppendable) + appendable := syncAppendable{Appendable: mock, lock: new(sync.Mutex)} + cfgfn := func() config.Config { + return config.Config{OTLPConfig: config.DefaultOTLPConfig} + } + handler := NewOTLPWriteHandler(log, nil, appendable, cfgfn, cfg.opts) + + fail := make(chan struct{}) + done := make(chan struct{}) + + b.ResetTimer() + b.ReportAllocs() + + // we use multiple workers to mimic a real-world scenario + // where multiple OTel collectors are sending their + // time-series in parallel. + // this is necessary to exercise potential lock-contention + // in this benchmark + for k := range Workers { + go func() { + rec := httptest.NewRecorder() + for _, req := range reqs[k] { + handler.ServeHTTP(rec, req) + if rec.Result().StatusCode != http.StatusOK { + fail <- struct{}{} + return + } + } + done <- struct{}{} + }() + } + + for range Workers { + select { + case <-fail: + b.FailNow() + case <-done: + } + } + + require.Equal(b, total, len(mock.samples)+len(mock.histograms)) + }) + } + } + } +} + +func sampleCount(md pmetric.Metrics) int { + var total int + rms := md.ResourceMetrics() + for i := range rms.Len() { + sms := rms.At(i).ScopeMetrics() + for i := range sms.Len() { + ms := sms.At(i).Metrics() + for i := range ms.Len() { + m := ms.At(i) + switch m.Type() { + case pmetric.MetricTypeSum: + total += m.Sum().DataPoints().Len() + case pmetric.MetricTypeGauge: + total += m.Gauge().DataPoints().Len() + case pmetric.MetricTypeHistogram: + dps := m.Histogram().DataPoints() + for i := range dps.Len() { + total += dps.At(i).BucketCounts().Len() + total++ // le=+Inf series + total++ // _sum series + total++ // _count series + } + case pmetric.MetricTypeExponentialHistogram: + total += m.ExponentialHistogram().DataPoints().Len() + case pmetric.MetricTypeSummary: + total += m.Summary().DataPoints().Len() + } + } + } + } + return total +} + +type syncAppendable struct { + lock sync.Locker + storage.Appendable +} + +type syncAppender struct { + lock sync.Locker + storage.Appender +} + +func (s syncAppendable) Appender(ctx context.Context) storage.Appender { + return syncAppender{Appender: s.Appendable.Appender(ctx), lock: s.lock} +} + +func (s syncAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + s.lock.Lock() + defer s.lock.Unlock() + return s.Appender.Append(ref, l, t, v) +} + +func (s syncAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, f *histogram.FloatHistogram) (storage.SeriesRef, error) { + s.lock.Lock() + defer s.lock.Unlock() + return s.Appender.AppendHistogram(ref, l, t, h, f) +} diff --git a/web/api/v1/api.go b/web/api/v1/api.go index ea7d5c5fe4..9821b26ec2 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -259,7 +259,7 @@ func NewAPI( statsRenderer StatsRenderer, rwEnabled bool, acceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg, - otlpEnabled bool, + otlpEnabled, otlpDeltaToCumulative bool, ctZeroIngestionEnabled bool, ) *API { a := &API{ @@ -307,7 +307,7 @@ func NewAPI( a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, ctZeroIngestionEnabled) } if otlpEnabled { - a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap, configFunc) + a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{ConvertDelta: otlpDeltaToCumulative}) } return a diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go index 0a5c76b48e..6ff0614c4c 100644 --- a/web/api/v1/errors_test.go +++ b/web/api/v1/errors_test.go @@ -143,6 +143,7 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable) *route config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2}, false, false, + false, ) promRouter := route.New().WithPrefix("/api/v1") diff --git a/web/web.go b/web/web.go index b3de603fb1..246108e5b1 100644 --- a/web/web.go +++ b/web/web.go @@ -289,6 +289,7 @@ type Options struct { RemoteReadBytesInFrame int EnableRemoteWriteReceiver bool EnableOTLPWriteReceiver bool + ConvertOTLPDelta bool IsAgent bool CTZeroIngestionEnabled bool AppName string @@ -387,6 +388,7 @@ func New(logger *slog.Logger, o *Options) *Handler { o.EnableRemoteWriteReceiver, o.AcceptRemoteWriteProtoMsgs, o.EnableOTLPWriteReceiver, + o.ConvertOTLPDelta, o.CTZeroIngestionEnabled, ) From 594b882e8b3aef70b6cc3ff44fc4ccdb8cf94c99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 14 Jan 2025 18:44:00 +0100 Subject: [PATCH 1255/1397] fix(remotewrite2): do not overwrite help text with unit in enqueue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While testing POC for https://github.com/grafana/mimir/issues/9072 I saw no unit or help metadata. Our test env: https://github.com/grafana/mimir/tree/main/development/mimir-monolithic-mode doesn't have units, so that was empty and cleared the help due to this bug. Signed-off-by: György Krajcsovits --- storage/remote/queue_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 4b966059f6..f0ea33fb52 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -1923,7 +1923,7 @@ func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, if d.metadata != nil { pendingData[nPending].Metadata.Type = writev2.FromMetadataType(d.metadata.Type) pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Help) - pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Unit) + pendingData[nPending].Metadata.UnitRef = symbolTable.Symbolize(d.metadata.Unit) nPendingMetadata++ } From b3a9fa3df91b9663a26f6393b726d19a0c12a37f Mon Sep 17 00:00:00 2001 From: dongjiang Date: Wed, 15 Jan 2025 02:22:22 +0800 Subject: [PATCH 1256/1397] chore: Upgrade to golangci-lint v1.63.4 (#15799) update golangci-lint and enable more lntiers This updates golangci-lint to v1.63.4 and enables linters `nilnesserr` and `exptostd` Signed-off-by: dongjiang --------- Signed-off-by: dongjiang --- .github/workflows/ci.yml | 2 +- .golangci.yml | 7 +++++-- Makefile.common | 2 +- scripts/golangci-lint.yml | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e96f77c114..3bf12b07c4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -195,7 +195,7 @@ jobs: with: args: --verbose # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. - version: v1.62.0 + version: v1.63.4 fuzzing: uses: ./.github/workflows/fuzzing.yml if: github.event_name == 'pull_request' diff --git a/.golangci.yml b/.golangci.yml index dfc74139f9..f294a4d76e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -5,25 +5,28 @@ output: sort-results: true linters: + # Keep this list sorted alphabetically enable: - depguard - errorlint + - exptostd - gocritic - godot - gofumpt - goimports + - loggercheck - misspell + - nilnesserr - nolintlint - perfsprint - predeclared - revive + - sloglint - testifylint - unconvert - unused - usestdlibvars - whitespace - - loggercheck - - sloglint issues: max-issues-per-linter: 0 diff --git a/Makefile.common b/Makefile.common index fc47bdbb21..d1576bb313 100644 --- a/Makefile.common +++ b/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.62.0 +GOLANGCI_LINT_VERSION ?= v1.63.4 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 01b943b9b5..0c00c410a4 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -36,4 +36,4 @@ jobs: uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 with: args: --verbose - version: v1.62.0 + version: v1.63.4 From b753629c391b54724a977424aa8cc546c7cf3445 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98rjan=20Ommundsen?= Date: Wed, 15 Jan 2025 00:58:47 +0100 Subject: [PATCH 1257/1397] replace use of 'cluster' with clusterLabel variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Ørjan Ommundsen --- .../prometheus-mixin/dashboards.libsonnet | 136 +++++++++--------- 1 file changed, 68 insertions(+), 68 deletions(-) diff --git a/documentation/prometheus-mixin/dashboards.libsonnet b/documentation/prometheus-mixin/dashboards.libsonnet index 22b8c92e6e..ae927c31a9 100644 --- a/documentation/prometheus-mixin/dashboards.libsonnet +++ b/documentation/prometheus-mixin/dashboards.libsonnet @@ -56,7 +56,7 @@ local row = panel.row; + variable.query.selectionOptions.withIncludeAll(true, '.+') + variable.query.selectionOptions.withMulti(true) + if showMultiCluster then - variable.query.queryTypes.withLabelValues('job', metric='prometheus_build_info{cluster=~"$cluster"}') + variable.query.queryTypes.withLabelValues('job', metric='prometheus_build_info{%(clusterLabel)s=~"$cluster"}' % $._config) else variable.query.queryTypes.withLabelValues('job', metric='prometheus_build_info{%(prometheusSelector)s}' % $._config) ; @@ -70,7 +70,7 @@ local row = panel.row; + variable.query.selectionOptions.withIncludeAll(true, '.+') + variable.query.selectionOptions.withMulti(true) + if showMultiCluster then - variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{cluster=~"$cluster", job=~"$job"}') + variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{%(clusterLabel)s=~"$cluster", job=~"$job"}' % $._config) else variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{job=~"$job"}') ; @@ -121,14 +121,14 @@ local row = panel.row; panel.table.queryOptions.withTargets([ prometheus.new( '$datasource', - 'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})' + 'count by (cluster, job, instance, version) (prometheus_build_info{%(clusterLabel)s=~"$cluster", job=~"$job", instance=~"$instance"})' % $._config ) + prometheus.withFormat('table') + prometheus.withInstant(true) + prometheus.withLegendFormat(''), prometheus.new( '$datasource', - 'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})' + 'max by (cluster, job, instance) (time() - process_start_time_seconds{%(clusterLabel)s=~"$cluster", job=~"$job", instance=~"$instance"})' % $._config ) + prometheus.withFormat('table') + prometheus.withInstant(true) @@ -163,10 +163,10 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3' + 'sum(rate(prometheus_target_sync_length_seconds_sum{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (%(clusterLabel)s, job, scrape_job, instance) * 1e3' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}}:{{scrape_job}}'), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -190,10 +190,10 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})' + 'sum by (%(clusterLabel)s, job, instance) (prometheus_sd_discovered_targets{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"})' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{cluster}}:{{job}}:{{instance}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}}'), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -216,10 +216,10 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3' + 'rate(prometheus_target_interval_length_seconds_sum{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{cluster}}:{{job}}:{{instance}} {{interval}} configured'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}} {{interval}} configured'), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -243,34 +243,34 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('exceeded body size limit: {{cluster}} {{job}} {{instance}}'), + + prometheus.withLegendFormat('exceeded body size limit: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), prometheus.new( '$datasource', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('exceeded sample limit: {{cluster}} {{job}} {{instance}}'), + + prometheus.withLegendFormat('exceeded sample limit: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), prometheus.new( '$datasource', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('duplicate timestamp: {{cluster}} {{job}} {{instance}}'), + + prometheus.withLegendFormat('duplicate timestamp: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), prometheus.new( '$datasource', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('out of bounds: {{cluster}} {{job}} {{instance}}'), + + prometheus.withLegendFormat('out of bounds: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), prometheus.new( '$datasource', - 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' + 'sum by (%(clusterLabel)s, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('out of order: {{cluster}} {{job}} {{instance}}'), + + prometheus.withLegendFormat('out of order: {{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -318,10 +318,10 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])' + 'rate(prometheus_tsdb_head_samples_appended_total{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}[5m])' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -345,10 +345,10 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' + 'prometheus_tsdb_head_series{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}} head series'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}} head series' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -372,10 +372,10 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' + 'prometheus_tsdb_head_chunks{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}} head chunks'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}} head chunks' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -399,10 +399,10 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])' + 'rate(prometheus_engine_query_duration_seconds_count{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{cluster}} {{job}} {{instance}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}} {{job}} {{instance}}' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -426,7 +426,7 @@ local row = panel.row; panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3' + 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3' % $._config ) + prometheus.withFormat('time_series') + prometheus.withLegendFormat('{{slice}}'), @@ -514,7 +514,7 @@ local row = panel.row; + variable.query.withDatasourceFromVariable(datasourceVariable) + variable.query.refresh.onTime() + variable.query.selectionOptions.withIncludeAll(true) - + variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{cluster=~"$cluster"}') + + variable.query.queryTypes.withLabelValues('instance', metric='prometheus_build_info{%(clusterLabel)s=~"$cluster"}' % $._config) ; local urlVariable = @@ -522,7 +522,7 @@ local row = panel.row; + variable.query.withDatasourceFromVariable(datasourceVariable) + variable.query.refresh.onTime() + variable.query.selectionOptions.withIncludeAll(true) - + variable.query.queryTypes.withLabelValues('url', metric='prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}') + + variable.query.queryTypes.withLabelValues('url', metric='prometheus_remote_storage_shards{%(clusterLabel)s=~"$cluster", instance=~"$instance"}' % $._config) ; local timestampComparison = @@ -534,15 +534,15 @@ local row = panel.row; '$datasource', ||| ( - prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"} + prometheus_remote_storage_highest_timestamp_in_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance"} - - ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"} != 0) + ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"} != 0) ) - ||| + ||| % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}::{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local timestampComparisonRate = @@ -554,15 +554,15 @@ local row = panel.row; '$datasource', ||| clamp_min( - rate(prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}[5m]) + rate(prometheus_remote_storage_highest_timestamp_in_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance"}[5m]) - - ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) + ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) , 0) - ||| + ||| % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local samplesRate = @@ -574,16 +574,16 @@ local row = panel.row; '$datasource', ||| rate( - prometheus_remote_storage_samples_in_total{cluster=~"$cluster", instance=~"$instance"}[5m]) + prometheus_remote_storage_samples_in_total{%(clusterLabel)s=~"$cluster", instance=~"$instance"}[5m]) - - ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) + ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) - - (rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) - ||| + (rate(prometheus_remote_storage_dropped_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])) + ||| % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local currentShards = @@ -593,11 +593,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + 'prometheus_remote_storage_shards{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local maxShards = @@ -607,11 +607,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_remote_storage_shards_max{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + 'prometheus_remote_storage_shards_max{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local minShards = @@ -621,11 +621,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_remote_storage_shards_min{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + 'prometheus_remote_storage_shards_min{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local desiredShards = @@ -635,11 +635,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_remote_storage_shards_desired{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + 'prometheus_remote_storage_shards_desired{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local shardsCapacity = @@ -649,11 +649,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + 'prometheus_remote_storage_shard_capacity{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local pendingSamples = @@ -663,11 +663,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_remote_storage_pending_samples{cluster=~"$cluster", instance=~"$instance", url=~"$url"} or prometheus_remote_storage_samples_pending{cluster=~"$cluster", instance=~"$instance", url=~"$url"}' + 'prometheus_remote_storage_pending_samples{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"} or prometheus_remote_storage_samples_pending{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local walSegment = @@ -679,11 +679,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_tsdb_wal_segment_current{cluster=~"$cluster", instance=~"$instance"}' + 'prometheus_tsdb_wal_segment_current{%(clusterLabel)s=~"$cluster", instance=~"$instance"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}}' % $._config), ]); local queueSegment = @@ -695,11 +695,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'prometheus_wal_watcher_current_segment{cluster=~"$cluster", instance=~"$instance"}' + 'prometheus_wal_watcher_current_segment{%(clusterLabel)s=~"$cluster", instance=~"$instance"}' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{consumer}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{consumer}}' % $._config), ]); local droppedSamples = @@ -710,11 +710,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' + 'rate(prometheus_remote_storage_dropped_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local failedSamples = @@ -725,11 +725,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'rate(prometheus_remote_storage_failed_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' + 'rate(prometheus_remote_storage_failed_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local retriedSamples = @@ -740,11 +740,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'rate(prometheus_remote_storage_retried_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' + 'rate(prometheus_remote_storage_retried_samples_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); local enqueueRetries = @@ -755,11 +755,11 @@ local row = panel.row; + panel.timeSeries.queryOptions.withTargets([ prometheus.new( '$datasource', - 'rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' + 'rate(prometheus_remote_storage_enqueue_retries_total{%(clusterLabel)s=~"$cluster", instance=~"$instance", url=~"$url"}[5m])' % $._config ) + prometheus.withFormat('time_series') + prometheus.withIntervalFactor(2) - + prometheus.withLegendFormat('{{cluster}}:{{instance}} {{remote_name}}:{{url}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{instance}} {{remote_name}}:{{url}}' % $._config), ]); dashboard.new('%(prefix)sRemote Write' % $._config.grafanaPrometheus) From 74edb1ff74d1eb27d12e40f018cc93c693b0a3b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98rjan=20Ommundsen?= Date: Wed, 15 Jan 2025 01:28:56 +0100 Subject: [PATCH 1258/1397] smol fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Ørjan Ommundsen --- documentation/prometheus-mixin/dashboards.libsonnet | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation/prometheus-mixin/dashboards.libsonnet b/documentation/prometheus-mixin/dashboards.libsonnet index ae927c31a9..50cbf72d1d 100644 --- a/documentation/prometheus-mixin/dashboards.libsonnet +++ b/documentation/prometheus-mixin/dashboards.libsonnet @@ -166,7 +166,7 @@ local row = panel.row; 'sum(rate(prometheus_target_sync_length_seconds_sum{%(clusterLabel)s=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (%(clusterLabel)s, job, scrape_job, instance) * 1e3' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}}:{{scrape_job}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}}:{{scrape_job}}' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -193,7 +193,7 @@ local row = panel.row; 'sum by (%(clusterLabel)s, job, instance) (prometheus_sd_discovered_targets{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"})' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}}'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}}' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ @@ -219,7 +219,7 @@ local row = panel.row; 'rate(prometheus_target_interval_length_seconds_sum{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{%(clusterLabel)s=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3' % $._config ) + prometheus.withFormat('time_series') - + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}} {{interval}} configured'), + + prometheus.withLegendFormat('{{%(clusterLabel)s}}:{{job}}:{{instance}} {{interval}} configured' % $._config), ]) else panel.timeSeries.queryOptions.withTargets([ From 5df6ea30420d94eec1f8f247367767053aabcb3c Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Wed, 15 Jan 2025 08:45:05 +0100 Subject: [PATCH 1259/1397] promtool: Support linting of scrape interval (#15719) * PromTool: Support Scrape Interval Lint Checking --------- Signed-off-by: zhaowang Signed-off-by: Arve Knudsen Co-authored-by: zhaowang --- CHANGELOG.md | 2 + cmd/promtool/main.go | 150 ++++++++++++------ cmd/promtool/main_test.go | 61 +++++-- ...s-config.lint.too_long_scrape_interval.yml | 3 + docs/command-line/promtool.md | 9 +- 5 files changed, 167 insertions(+), 58 deletions(-) create mode 100644 cmd/promtool/testdata/prometheus-config.lint.too_long_scrape_interval.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index d0a7ef6611..40be59f724 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## unreleased +* [ENHANCEMENT] promtool: Support linting of scrape interval, through lint option `too-long-scrape-interval`. #15719 + ## 3.1.0 / 2025-01-02 * [SECURITY] upgrade golang.org/x/crypto to address reported CVE-2024-45337. #15691 diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 62a1d4f906..54292bbfe5 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -73,14 +73,19 @@ const ( // Exit code 3 is used for "one or more lint issues detected". lintErrExitCode = 3 - lintOptionAll = "all" - lintOptionDuplicateRules = "duplicate-rules" - lintOptionNone = "none" - checkHealth = "/-/healthy" - checkReadiness = "/-/ready" + lintOptionAll = "all" + lintOptionDuplicateRules = "duplicate-rules" + lintOptionTooLongScrapeInterval = "too-long-scrape-interval" + lintOptionNone = "none" + checkHealth = "/-/healthy" + checkReadiness = "/-/ready" ) -var lintOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone} +var ( + lintRulesOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone} + // Same as lintRulesOptions, but including scrape config linting options as well. + lintConfigOptions = append(append([]string{}, lintRulesOptions...), lintOptionTooLongScrapeInterval) +) func main() { var ( @@ -97,6 +102,10 @@ func main() { app.HelpFlag.Short('h') checkCmd := app.Command("check", "Check the resources for validity.") + checkLookbackDelta := checkCmd.Flag( + "query.lookback-delta", + "The server's maximum query lookback duration.", + ).Default("5m").Duration() experimental := app.Flag("experimental", "Enable experimental commands.").Bool() @@ -113,7 +122,7 @@ func main() { checkConfigSyntaxOnly := checkConfigCmd.Flag("syntax-only", "Only check the config file syntax, ignoring file and content validation referenced in the config").Bool() checkConfigLint := checkConfigCmd.Flag( "lint", - "Linting checks to apply to the rules specified in the config. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting", + "Linting checks to apply to the rules/scrape configs specified in the config. Available options are: "+strings.Join(lintConfigOptions, ", ")+". Use --lint=none to disable linting", ).Default(lintOptionDuplicateRules).String() checkConfigLintFatal := checkConfigCmd.Flag( "lint-fatal", @@ -140,7 +149,7 @@ func main() { ).ExistingFiles() checkRulesLint := checkRulesCmd.Flag( "lint", - "Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting", + "Linting checks to apply. Available options are: "+strings.Join(lintRulesOptions, ", ")+". Use --lint=none to disable linting", ).Default(lintOptionDuplicateRules).String() checkRulesLintFatal := checkRulesCmd.Flag( "lint-fatal", @@ -339,7 +348,7 @@ func main() { os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer)) case checkConfigCmd.FullCommand(): - os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...)) + os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newConfigLintConfig(*checkConfigLint, *checkConfigLintFatal, model.Duration(*checkLookbackDelta)), *configFiles...)) case checkServerHealthCmd.FullCommand(): os.Exit(checkErr(CheckServerStatus(serverURL, checkHealth, httpRoundTripper))) @@ -351,7 +360,7 @@ func main() { os.Exit(CheckWebConfig(*webConfigFiles...)) case checkRulesCmd.FullCommand(): - os.Exit(CheckRules(newLintConfig(*checkRulesLint, *checkRulesLintFatal), *ruleFiles...)) + os.Exit(CheckRules(newRulesLintConfig(*checkRulesLint, *checkRulesLintFatal), *ruleFiles...)) case checkMetricsCmd.FullCommand(): os.Exit(CheckMetrics(*checkMetricsExtended)) @@ -445,15 +454,15 @@ func checkExperimental(f bool) { var errLint = errors.New("lint error") -type lintConfig struct { +type rulesLintConfig struct { all bool duplicateRules bool fatal bool } -func newLintConfig(stringVal string, fatal bool) lintConfig { +func newRulesLintConfig(stringVal string, fatal bool) rulesLintConfig { items := strings.Split(stringVal, ",") - ls := lintConfig{ + ls := rulesLintConfig{ fatal: fatal, } for _, setting := range items { @@ -464,16 +473,57 @@ func newLintConfig(stringVal string, fatal bool) lintConfig { ls.duplicateRules = true case lintOptionNone: default: - fmt.Printf("WARNING: unknown lint option %s\n", setting) + fmt.Printf("WARNING: unknown lint option: %q\n", setting) } } return ls } -func (ls lintConfig) lintDuplicateRules() bool { +func (ls rulesLintConfig) lintDuplicateRules() bool { return ls.all || ls.duplicateRules } +type configLintConfig struct { + rulesLintConfig + + lookbackDelta model.Duration +} + +func newConfigLintConfig(optionsStr string, fatal bool, lookbackDelta model.Duration) configLintConfig { + c := configLintConfig{ + rulesLintConfig: rulesLintConfig{ + fatal: fatal, + }, + } + + lintNone := false + var rulesOptions []string + for _, option := range strings.Split(optionsStr, ",") { + switch option { + case lintOptionAll, lintOptionTooLongScrapeInterval: + c.lookbackDelta = lookbackDelta + if option == lintOptionAll { + rulesOptions = append(rulesOptions, lintOptionAll) + } + case lintOptionNone: + lintNone = true + default: + rulesOptions = append(rulesOptions, option) + } + } + + if lintNone { + c.lookbackDelta = 0 + rulesOptions = nil + } + + if len(rulesOptions) > 0 { + c.rulesLintConfig = newRulesLintConfig(strings.Join(rulesOptions, ","), fatal) + } + + return c +} + // CheckServerStatus - healthy & ready. func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error { if serverURL.Scheme == "" { @@ -512,12 +562,12 @@ func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper ht } // CheckConfig validates configuration files. -func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files ...string) int { +func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings configLintConfig, files ...string) int { failed := false hasErrors := false for _, f := range files { - ruleFiles, err := checkConfig(agentMode, f, checkSyntaxOnly) + ruleFiles, scrapeConfigs, err := checkConfig(agentMode, f, checkSyntaxOnly) if err != nil { fmt.Fprintln(os.Stderr, " FAILED:", err) hasErrors = true @@ -530,12 +580,12 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files } fmt.Println() - rulesFailed, rulesHasErrors := checkRules(ruleFiles, lintSettings) - if rulesFailed { - failed = rulesFailed - } - if rulesHasErrors { - hasErrors = rulesHasErrors + if !checkSyntaxOnly { + scrapeConfigsFailed := lintScrapeConfigs(scrapeConfigs, lintSettings) + failed = failed || scrapeConfigsFailed + rulesFailed, rulesHaveErrors := checkRules(ruleFiles, lintSettings.rulesLintConfig) + failed = failed || rulesFailed + hasErrors = hasErrors || rulesHaveErrors } } if failed && hasErrors { @@ -574,12 +624,12 @@ func checkFileExists(fn string) error { return err } -func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) { +func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, []*config.ScrapeConfig, error) { fmt.Println("Checking", filename) cfg, err := config.LoadFile(filename, agentMode, promslog.NewNopLogger()) if err != nil { - return nil, err + return nil, nil, err } var ruleFiles []string @@ -587,15 +637,15 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin for _, rf := range cfg.RuleFiles { rfs, err := filepath.Glob(rf) if err != nil { - return nil, err + return nil, nil, err } // If an explicit file was given, error if it is not accessible. if !strings.Contains(rf, "*") { if len(rfs) == 0 { - return nil, fmt.Errorf("%q does not point to an existing file", rf) + return nil, nil, fmt.Errorf("%q does not point to an existing file", rf) } if err := checkFileExists(rfs[0]); err != nil { - return nil, fmt.Errorf("error checking rule file %q: %w", rfs[0], err) + return nil, nil, fmt.Errorf("error checking rule file %q: %w", rfs[0], err) } } ruleFiles = append(ruleFiles, rfs...) @@ -609,26 +659,26 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin var err error scfgs, err = cfg.GetScrapeConfigs() if err != nil { - return nil, fmt.Errorf("error loading scrape configs: %w", err) + return nil, nil, fmt.Errorf("error loading scrape configs: %w", err) } } for _, scfg := range scfgs { if !checkSyntaxOnly && scfg.HTTPClientConfig.Authorization != nil { if err := checkFileExists(scfg.HTTPClientConfig.Authorization.CredentialsFile); err != nil { - return nil, fmt.Errorf("error checking authorization credentials or bearer token file %q: %w", scfg.HTTPClientConfig.Authorization.CredentialsFile, err) + return nil, nil, fmt.Errorf("error checking authorization credentials or bearer token file %q: %w", scfg.HTTPClientConfig.Authorization.CredentialsFile, err) } } if err := checkTLSConfig(scfg.HTTPClientConfig.TLSConfig, checkSyntaxOnly); err != nil { - return nil, err + return nil, nil, err } for _, c := range scfg.ServiceDiscoveryConfigs { switch c := c.(type) { case *kubernetes.SDConfig: if err := checkTLSConfig(c.HTTPClientConfig.TLSConfig, checkSyntaxOnly); err != nil { - return nil, err + return nil, nil, err } case *file.SDConfig: if checkSyntaxOnly { @@ -637,17 +687,17 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin for _, file := range c.Files { files, err := filepath.Glob(file) if err != nil { - return nil, err + return nil, nil, err } if len(files) != 0 { for _, f := range files { var targetGroups []*targetgroup.Group targetGroups, err = checkSDFile(f) if err != nil { - return nil, fmt.Errorf("checking SD file %q: %w", file, err) + return nil, nil, fmt.Errorf("checking SD file %q: %w", file, err) } if err := checkTargetGroupsForScrapeConfig(targetGroups, scfg); err != nil { - return nil, err + return nil, nil, err } } continue @@ -656,7 +706,7 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin } case discovery.StaticConfig: if err := checkTargetGroupsForScrapeConfig(c, scfg); err != nil { - return nil, err + return nil, nil, err } } } @@ -673,18 +723,18 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin for _, file := range c.Files { files, err := filepath.Glob(file) if err != nil { - return nil, err + return nil, nil, err } if len(files) != 0 { for _, f := range files { var targetGroups []*targetgroup.Group targetGroups, err = checkSDFile(f) if err != nil { - return nil, fmt.Errorf("checking SD file %q: %w", file, err) + return nil, nil, fmt.Errorf("checking SD file %q: %w", file, err) } if err := checkTargetGroupsForAlertmanager(targetGroups, amcfg); err != nil { - return nil, err + return nil, nil, err } } continue @@ -693,12 +743,12 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin } case discovery.StaticConfig: if err := checkTargetGroupsForAlertmanager(c, amcfg); err != nil { - return nil, err + return nil, nil, err } } } } - return ruleFiles, nil + return ruleFiles, scfgs, nil } func checkTLSConfig(tlsConfig promconfig.TLSConfig, checkSyntaxOnly bool) error { @@ -760,7 +810,7 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) { } // CheckRules validates rule files. -func CheckRules(ls lintConfig, files ...string) int { +func CheckRules(ls rulesLintConfig, files ...string) int { failed := false hasErrors := false if len(files) == 0 { @@ -780,7 +830,7 @@ func CheckRules(ls lintConfig, files ...string) int { } // checkRulesFromStdin validates rule from stdin. -func checkRulesFromStdin(ls lintConfig) (bool, bool) { +func checkRulesFromStdin(ls rulesLintConfig) (bool, bool) { failed := false hasErrors := false fmt.Println("Checking standard input") @@ -818,7 +868,7 @@ func checkRulesFromStdin(ls lintConfig) (bool, bool) { } // checkRules validates rule files. -func checkRules(files []string, ls lintConfig) (bool, bool) { +func checkRules(files []string, ls rulesLintConfig) (bool, bool) { failed := false hasErrors := false for _, f := range files { @@ -852,7 +902,7 @@ func checkRules(files []string, ls lintConfig) (bool, bool) { return failed, hasErrors } -func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []error) { +func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings rulesLintConfig) (int, []error) { numRules := 0 for _, rg := range rgs.Groups { numRules += len(rg.Rules) @@ -876,6 +926,16 @@ func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []e return numRules, nil } +func lintScrapeConfigs(scrapeConfigs []*config.ScrapeConfig, lintSettings configLintConfig) bool { + for _, scfg := range scrapeConfigs { + if lintSettings.lookbackDelta > 0 && scfg.ScrapeInterval >= lintSettings.lookbackDelta { + fmt.Fprintf(os.Stderr, " FAILED: too long scrape interval found, data point will be marked as stale - job: %s, interval: %s\n", scfg.JobName, scfg.ScrapeInterval) + return true + } + } + return false +} + type compareRuleType struct { metric string label labels.Labels diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 9a07269188..92e5ff9e67 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -234,7 +234,7 @@ func TestCheckTargetConfig(t *testing.T) { for _, test := range cases { t.Run(test.name, func(t *testing.T) { t.Parallel() - _, err := checkConfig(false, "testdata/"+test.file, false) + _, _, err := checkConfig(false, "testdata/"+test.file, false) if test.err != "" { require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error()) return @@ -319,7 +319,7 @@ func TestCheckConfigSyntax(t *testing.T) { for _, test := range cases { t.Run(test.name, func(t *testing.T) { t.Parallel() - _, err := checkConfig(false, "testdata/"+test.file, test.syntaxOnly) + _, _, err := checkConfig(false, "testdata/"+test.file, test.syntaxOnly) expectedErrMsg := test.err if strings.Contains(runtime.GOOS, "windows") { expectedErrMsg = test.errWindows @@ -355,7 +355,7 @@ func TestAuthorizationConfig(t *testing.T) { for _, test := range cases { t.Run(test.name, func(t *testing.T) { t.Parallel() - _, err := checkConfig(false, "testdata/"+test.file, false) + _, _, err := checkConfig(false, "testdata/"+test.file, false) if test.err != "" { require.ErrorContains(t, err, test.err, "Expected error to contain %q, got %q", test.err, err.Error()) return @@ -508,7 +508,7 @@ func TestCheckRules(t *testing.T) { defer func(v *os.File) { os.Stdin = v }(os.Stdin) os.Stdin = r - exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false)) + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false)) require.Equal(t, successExitCode, exitCode, "") }) @@ -530,7 +530,7 @@ func TestCheckRules(t *testing.T) { defer func(v *os.File) { os.Stdin = v }(os.Stdin) os.Stdin = r - exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false)) + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false)) require.Equal(t, failureExitCode, exitCode, "") }) @@ -552,7 +552,7 @@ func TestCheckRules(t *testing.T) { defer func(v *os.File) { os.Stdin = v }(os.Stdin) os.Stdin = r - exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true)) + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true)) require.Equal(t, lintErrExitCode, exitCode, "") }) } @@ -560,23 +560,66 @@ func TestCheckRules(t *testing.T) { func TestCheckRulesWithRuleFiles(t *testing.T) { t.Run("rules-good", func(t *testing.T) { t.Parallel() - exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules.yml") + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false), "./testdata/rules.yml") require.Equal(t, successExitCode, exitCode, "") }) t.Run("rules-bad", func(t *testing.T) { t.Parallel() - exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules-bad.yml") + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false), "./testdata/rules-bad.yml") require.Equal(t, failureExitCode, exitCode, "") }) t.Run("rules-lint-fatal", func(t *testing.T) { t.Parallel() - exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true), "./testdata/prometheus-rules.lint.yml") + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true), "./testdata/prometheus-rules.lint.yml") require.Equal(t, lintErrExitCode, exitCode, "") }) } +func TestCheckScrapeConfigs(t *testing.T) { + for _, tc := range []struct { + name string + lookbackDelta model.Duration + expectError bool + }{ + { + name: "scrape interval less than lookback delta", + lookbackDelta: model.Duration(11 * time.Minute), + expectError: false, + }, + { + name: "scrape interval greater than lookback delta", + lookbackDelta: model.Duration(5 * time.Minute), + expectError: true, + }, + { + name: "scrape interval same as lookback delta", + lookbackDelta: model.Duration(10 * time.Minute), + expectError: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + // Non-fatal linting. + code := CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") + require.Equal(t, successExitCode, code, "Non-fatal linting should return success") + // Fatal linting. + code = CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") + if tc.expectError { + require.Equal(t, lintErrExitCode, code, "Fatal linting should return error") + } else { + require.Equal(t, successExitCode, code, "Fatal linting should return success when there are no problems") + } + // Check syntax only, no linting. + code = CheckConfig(false, true, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") + require.Equal(t, successExitCode, code, "Fatal linting should return success when checking syntax only") + // Lint option "none" should disable linting. + code = CheckConfig(false, false, newConfigLintConfig(lintOptionNone+","+lintOptionTooLongScrapeInterval, true, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") + require.Equal(t, successExitCode, code, `Fatal linting should return success when lint option "none" is specified`) + }) + } +} + func TestTSDBDumpCommand(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") diff --git a/cmd/promtool/testdata/prometheus-config.lint.too_long_scrape_interval.yml b/cmd/promtool/testdata/prometheus-config.lint.too_long_scrape_interval.yml new file mode 100644 index 0000000000..0c85d13f31 --- /dev/null +++ b/cmd/promtool/testdata/prometheus-config.lint.too_long_scrape_interval.yml @@ -0,0 +1,3 @@ +scrape_configs: + - job_name: too_long_scrape_interval_test + scrape_interval: 10m diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 5e2a8f6bb1..09800af748 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -59,9 +59,10 @@ Check the resources for validity. #### Flags -| Flag | Description | -| --- | --- | -| --extended | Print extended information related to the cardinality of the metrics. | +| Flag | Description | Default | +| --- | --- | --- | +| --query.lookback-delta | The server's maximum query lookback duration. | `5m` | +| --extended | Print extended information related to the cardinality of the metrics. | | @@ -102,7 +103,7 @@ Check if the config files are valid or not. | Flag | Description | Default | | --- | --- | --- | | --syntax-only | Only check the config file syntax, ignoring file and content validation referenced in the config | | -| --lint | Linting checks to apply to the rules specified in the config. Available options are: all, duplicate-rules, none. Use --lint=none to disable linting | `duplicate-rules` | +| --lint | Linting checks to apply to the rules/scrape configs specified in the config. Available options are: all, duplicate-rules, none, too-long-scrape-interval. Use --lint=none to disable linting | `duplicate-rules` | | --lint-fatal | Make lint errors exit with exit code 3. | `false` | | --agent | Check config file for Prometheus in Agent mode. | | From 92218ecb9b5c6e3c54abc909215d1da4cc8c9b75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 20 Dec 2024 14:56:51 +0200 Subject: [PATCH 1260/1397] promtool: add --ignore-unknown-fields MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add --ignore-unknown-fields that ignores unknown fields in rule group files. There are lots of tools in the ecosystem that "like" to extend the rule group file structure but they are currently unreadable by promtool if there's anything extra. The purpose of this flag is so that we could use the "vanilla" promtool instead of rolling our own. Some examples of tools/code: https://github.com/grafana/mimir/blob/main/pkg/mimirtool/rules/rwrulefmt/rulefmt.go https://github.com/grafana/cortex-tools/blob/8898eb3cc5355b917634c9da476045df7b0426af/pkg/rules/rules.go#L18-L25 Signed-off-by: Giedrius Statkevičius --- cmd/promtool/main.go | 28 +++++++++------- cmd/promtool/main_test.go | 24 +++++++------- cmd/promtool/rules.go | 2 +- cmd/promtool/testdata/rules_extrafields.yml | 33 +++++++++++++++++++ .../testdata/rules_run_extrafields.yml | 21 ++++++++++++ cmd/promtool/unittest.go | 16 ++++----- cmd/promtool/unittest_test.go | 23 +++++++++---- docs/command-line/promtool.md | 3 ++ model/rulefmt/rulefmt.go | 10 +++--- model/rulefmt/rulefmt_test.go | 8 ++--- rules/manager.go | 12 +++---- rules/manager_test.go | 26 +++++++-------- 12 files changed, 140 insertions(+), 66 deletions(-) create mode 100644 cmd/promtool/testdata/rules_extrafields.yml create mode 100644 cmd/promtool/testdata/rules_run_extrafields.yml diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 54292bbfe5..81ba93d2de 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -127,6 +127,7 @@ func main() { checkConfigLintFatal := checkConfigCmd.Flag( "lint-fatal", "Make lint errors exit with exit code 3.").Default("false").Bool() + checkConfigIgnoreUnknownFields := checkConfigCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the rule groups read by the config files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool() checkWebConfigCmd := checkCmd.Command("web-config", "Check if the web config files are valid or not.") webConfigFiles := checkWebConfigCmd.Arg( @@ -154,6 +155,7 @@ func main() { checkRulesLintFatal := checkRulesCmd.Flag( "lint-fatal", "Make lint errors exit with exit code 3.").Default("false").Bool() + checkRulesIgnoreUnknownFields := checkRulesCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the rule files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool() checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage) checkMetricsExtended := checkCmd.Flag("extended", "Print extended information related to the cardinality of the metrics.").Bool() @@ -227,6 +229,7 @@ func main() { ).Required().ExistingFiles() testRulesDebug := testRulesCmd.Flag("debug", "Enable unit test debugging.").Default("false").Bool() testRulesDiff := testRulesCmd.Flag("diff", "[Experimental] Print colored differential output between expected & received output.").Default("false").Bool() + testRulesIgnoreUnknownFields := testRulesCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the test files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool() defaultDBPath := "data/" tsdbCmd := app.Command("tsdb", "Run tsdb commands.") @@ -348,7 +351,7 @@ func main() { os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer)) case checkConfigCmd.FullCommand(): - os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newConfigLintConfig(*checkConfigLint, *checkConfigLintFatal, model.Duration(*checkLookbackDelta)), *configFiles...)) + os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newConfigLintConfig(*checkConfigLint, *checkConfigLintFatal, *checkConfigIgnoreUnknownFields, model.Duration(*checkLookbackDelta)), *configFiles...)) case checkServerHealthCmd.FullCommand(): os.Exit(checkErr(CheckServerStatus(serverURL, checkHealth, httpRoundTripper))) @@ -360,7 +363,7 @@ func main() { os.Exit(CheckWebConfig(*webConfigFiles...)) case checkRulesCmd.FullCommand(): - os.Exit(CheckRules(newRulesLintConfig(*checkRulesLint, *checkRulesLintFatal), *ruleFiles...)) + os.Exit(CheckRules(newRulesLintConfig(*checkRulesLint, *checkRulesLintFatal, *checkRulesIgnoreUnknownFields), *ruleFiles...)) case checkMetricsCmd.FullCommand(): os.Exit(CheckMetrics(*checkMetricsExtended)) @@ -402,6 +405,7 @@ func main() { *testRulesRun, *testRulesDiff, *testRulesDebug, + *testRulesIgnoreUnknownFields, *testRulesFiles...), ) @@ -455,15 +459,17 @@ func checkExperimental(f bool) { var errLint = errors.New("lint error") type rulesLintConfig struct { - all bool - duplicateRules bool - fatal bool + all bool + duplicateRules bool + fatal bool + ignoreUnknownFields bool } -func newRulesLintConfig(stringVal string, fatal bool) rulesLintConfig { +func newRulesLintConfig(stringVal string, fatal, ignoreUnknownFields bool) rulesLintConfig { items := strings.Split(stringVal, ",") ls := rulesLintConfig{ - fatal: fatal, + fatal: fatal, + ignoreUnknownFields: ignoreUnknownFields, } for _, setting := range items { switch setting { @@ -489,7 +495,7 @@ type configLintConfig struct { lookbackDelta model.Duration } -func newConfigLintConfig(optionsStr string, fatal bool, lookbackDelta model.Duration) configLintConfig { +func newConfigLintConfig(optionsStr string, fatal, ignoreUnknownFields bool, lookbackDelta model.Duration) configLintConfig { c := configLintConfig{ rulesLintConfig: rulesLintConfig{ fatal: fatal, @@ -518,7 +524,7 @@ func newConfigLintConfig(optionsStr string, fatal bool, lookbackDelta model.Dura } if len(rulesOptions) > 0 { - c.rulesLintConfig = newRulesLintConfig(strings.Join(rulesOptions, ","), fatal) + c.rulesLintConfig = newRulesLintConfig(strings.Join(rulesOptions, ","), fatal, ignoreUnknownFields) } return c @@ -839,7 +845,7 @@ func checkRulesFromStdin(ls rulesLintConfig) (bool, bool) { fmt.Fprintln(os.Stderr, " FAILED:", err) return true, true } - rgs, errs := rulefmt.Parse(data) + rgs, errs := rulefmt.Parse(data, ls.ignoreUnknownFields) if errs != nil { failed = true fmt.Fprintln(os.Stderr, " FAILED:") @@ -873,7 +879,7 @@ func checkRules(files []string, ls rulesLintConfig) (bool, bool) { hasErrors := false for _, f := range files { fmt.Println("Checking", f) - rgs, errs := rulefmt.ParseFile(f) + rgs, errs := rulefmt.ParseFile(f, ls.ignoreUnknownFields) if errs != nil { failed = true fmt.Fprintln(os.Stderr, " FAILED:") diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 92e5ff9e67..48bed9a2df 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -185,7 +185,7 @@ func TestCheckDuplicates(t *testing.T) { c := test t.Run(c.name, func(t *testing.T) { t.Parallel() - rgs, err := rulefmt.ParseFile(c.ruleFile) + rgs, err := rulefmt.ParseFile(c.ruleFile, false) require.Empty(t, err) dups := checkDuplicates(rgs.Groups) require.Equal(t, c.expectedDups, dups) @@ -194,7 +194,7 @@ func TestCheckDuplicates(t *testing.T) { } func BenchmarkCheckDuplicates(b *testing.B) { - rgs, err := rulefmt.ParseFile("./testdata/rules_large.yml") + rgs, err := rulefmt.ParseFile("./testdata/rules_large.yml", false) require.Empty(b, err) b.ResetTimer() @@ -508,7 +508,7 @@ func TestCheckRules(t *testing.T) { defer func(v *os.File) { os.Stdin = v }(os.Stdin) os.Stdin = r - exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false)) + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false)) require.Equal(t, successExitCode, exitCode, "") }) @@ -530,7 +530,7 @@ func TestCheckRules(t *testing.T) { defer func(v *os.File) { os.Stdin = v }(os.Stdin) os.Stdin = r - exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false)) + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false)) require.Equal(t, failureExitCode, exitCode, "") }) @@ -552,7 +552,7 @@ func TestCheckRules(t *testing.T) { defer func(v *os.File) { os.Stdin = v }(os.Stdin) os.Stdin = r - exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true)) + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false)) require.Equal(t, lintErrExitCode, exitCode, "") }) } @@ -560,19 +560,19 @@ func TestCheckRules(t *testing.T) { func TestCheckRulesWithRuleFiles(t *testing.T) { t.Run("rules-good", func(t *testing.T) { t.Parallel() - exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false), "./testdata/rules.yml") + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules.yml") require.Equal(t, successExitCode, exitCode, "") }) t.Run("rules-bad", func(t *testing.T) { t.Parallel() - exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false), "./testdata/rules-bad.yml") + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules-bad.yml") require.Equal(t, failureExitCode, exitCode, "") }) t.Run("rules-lint-fatal", func(t *testing.T) { t.Parallel() - exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true), "./testdata/prometheus-rules.lint.yml") + exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false), "./testdata/prometheus-rules.lint.yml") require.Equal(t, lintErrExitCode, exitCode, "") }) } @@ -601,20 +601,20 @@ func TestCheckScrapeConfigs(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { // Non-fatal linting. - code := CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") + code := CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, false, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") require.Equal(t, successExitCode, code, "Non-fatal linting should return success") // Fatal linting. - code = CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") + code = CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") if tc.expectError { require.Equal(t, lintErrExitCode, code, "Fatal linting should return error") } else { require.Equal(t, successExitCode, code, "Fatal linting should return success when there are no problems") } // Check syntax only, no linting. - code = CheckConfig(false, true, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") + code = CheckConfig(false, true, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") require.Equal(t, successExitCode, code, "Fatal linting should return success when checking syntax only") // Lint option "none" should disable linting. - code = CheckConfig(false, false, newConfigLintConfig(lintOptionNone+","+lintOptionTooLongScrapeInterval, true, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") + code = CheckConfig(false, false, newConfigLintConfig(lintOptionNone+","+lintOptionTooLongScrapeInterval, true, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml") require.Equal(t, successExitCode, code, `Fatal linting should return success when lint option "none" is specified`) }) } diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index adb214b812..b2eb18ca8e 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -69,7 +69,7 @@ func newRuleImporter(logger *slog.Logger, config ruleImporterConfig, apiClient q // loadGroups parses groups from a list of recording rule files. func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) (errs []error) { - groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...) + groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, false, filenames...) if errs != nil { return errs } diff --git a/cmd/promtool/testdata/rules_extrafields.yml b/cmd/promtool/testdata/rules_extrafields.yml new file mode 100644 index 0000000000..85ef079bb8 --- /dev/null +++ b/cmd/promtool/testdata/rules_extrafields.yml @@ -0,0 +1,33 @@ +# This is the rules file. It has an extra "ownership" +# field in the second group. promtool should ignore this field +# and not return an error with --ignore-unknown-fields. + +groups: + - name: alerts + namespace: "foobar" + rules: + - alert: InstanceDown + expr: up == 0 + for: 5m + labels: + severity: page + annotations: + summary: "Instance {{ $labels.instance }} down" + description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes." + - alert: AlwaysFiring + expr: 1 + + - name: rules + ownership: + service: "test" + rules: + - record: job:test:count_over_time1m + expr: sum without(instance) (count_over_time(test[1m])) + + # A recording rule that doesn't depend on input series. + - record: fixed_data + expr: 1 + + # Subquery with default resolution test. + - record: suquery_interval_test + expr: count_over_time(up[5m:]) diff --git a/cmd/promtool/testdata/rules_run_extrafields.yml b/cmd/promtool/testdata/rules_run_extrafields.yml new file mode 100644 index 0000000000..86879fc396 --- /dev/null +++ b/cmd/promtool/testdata/rules_run_extrafields.yml @@ -0,0 +1,21 @@ +# Minimal test case to see that --ignore-unknown-fields +# is working as expected. It should not return an error +# when any extra fields are present in the rules file. +rule_files: + - rules_extrafields.yml + +evaluation_interval: 1m + + +tests: + - name: extra ownership field test + input_series: + - series: test + values: 1 + + promql_expr_test: + - expr: test + eval_time: 0 + exp_samples: + - value: 1 + labels: test diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 78dacdc569..7f494e27aa 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -46,11 +46,11 @@ import ( // RulesUnitTest does unit testing of rules based on the unit testing files provided. // More info about the file format can be found in the docs. -func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug bool, files ...string) int { - return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, debug, files...) +func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug, ignoreUnknownFields bool, files ...string) int { + return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, debug, ignoreUnknownFields, files...) } -func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug bool, files ...string) int { +func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug, ignoreUnknownFields bool, files ...string) int { failed := false junit := &junitxml.JUnitXML{} @@ -60,7 +60,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, } for _, f := range files { - if errs := ruleUnitTest(f, queryOpts, run, diffFlag, debug, junit.Suite(f)); errs != nil { + if errs := ruleUnitTest(f, queryOpts, run, diffFlag, debug, ignoreUnknownFields, junit.Suite(f)); errs != nil { fmt.Fprintln(os.Stderr, " FAILED:") for _, e := range errs { fmt.Fprintln(os.Stderr, e.Error()) @@ -82,7 +82,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, return successExitCode } -func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag, debug bool, ts *junitxml.TestSuite) []error { +func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag, debug, ignoreUnknownFields bool, ts *junitxml.TestSuite) []error { b, err := os.ReadFile(filename) if err != nil { ts.Abort(err) @@ -131,7 +131,7 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg if t.Interval == 0 { t.Interval = unitTestInp.EvaluationInterval } - ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, unitTestInp.RuleFiles...) + ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, ignoreUnknownFields, unitTestInp.RuleFiles...) if ers != nil { for _, e := range ers { tc.Fail(e.Error()) @@ -198,7 +198,7 @@ type testGroup struct { } // test performs the unit tests. -func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug bool, ruleFiles ...string) (outErr []error) { +func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug, ignoreUnknownFields bool, ruleFiles ...string) (outErr []error) { if debug { testStart := time.Now() fmt.Printf("DEBUG: Starting test %s\n", testname) @@ -228,7 +228,7 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde Logger: promslog.NewNopLogger(), } m := rules.NewManager(opts) - groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...) + groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ignoreUnknownFields, ruleFiles...) if ers != nil { return ers } diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go index ec34ad3185..7466b222ca 100644 --- a/cmd/promtool/unittest_test.go +++ b/cmd/promtool/unittest_test.go @@ -143,7 +143,7 @@ func TestRulesUnitTest(t *testing.T) { } t.Run(tt.name, func(t *testing.T) { t.Parallel() - if got := RulesUnitTest(tt.queryOpts, nil, false, false, tt.args.files...); got != tt.want { + if got := RulesUnitTest(tt.queryOpts, nil, false, false, false, tt.args.files...); got != tt.want { t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want) } }) @@ -151,7 +151,7 @@ func TestRulesUnitTest(t *testing.T) { t.Run("Junit xml output ", func(t *testing.T) { t.Parallel() var buf bytes.Buffer - if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, false, reuseFiles...); got != 1 { + if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, false, false, reuseFiles...); got != 1 { t.Errorf("RulesUnitTestResults() = %v, want 1", got) } var test junitxml.JUnitXML @@ -194,10 +194,11 @@ func TestRulesUnitTestRun(t *testing.T) { files []string } tests := []struct { - name string - args args - queryOpts promqltest.LazyLoaderOpts - want int + name string + args args + queryOpts promqltest.LazyLoaderOpts + want int + ignoreUnknownFields bool }{ { name: "Test all without run arg", @@ -231,11 +232,19 @@ func TestRulesUnitTestRun(t *testing.T) { }, want: 1, }, + { + name: "Test all with extra fields", + args: args{ + files: []string{"./testdata/rules_run_extrafields.yml"}, + }, + ignoreUnknownFields: true, + want: 0, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() - got := RulesUnitTest(tt.queryOpts, tt.args.run, false, false, tt.args.files...) + got := RulesUnitTest(tt.queryOpts, tt.args.run, false, false, tt.ignoreUnknownFields, tt.args.files...) require.Equal(t, tt.want, got) }) } diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 09800af748..ab675e6345 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -105,6 +105,7 @@ Check if the config files are valid or not. | --syntax-only | Only check the config file syntax, ignoring file and content validation referenced in the config | | | --lint | Linting checks to apply to the rules/scrape configs specified in the config. Available options are: all, duplicate-rules, none, too-long-scrape-interval. Use --lint=none to disable linting | `duplicate-rules` | | --lint-fatal | Make lint errors exit with exit code 3. | `false` | +| --ignore-unknown-fields | Ignore unknown fields in the rule groups read by the config files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default. | `false` | | --agent | Check config file for Prometheus in Agent mode. | | @@ -178,6 +179,7 @@ Check if the rule files are valid or not. | --- | --- | --- | | --lint | Linting checks to apply. Available options are: all, duplicate-rules, none. Use --lint=none to disable linting | `duplicate-rules` | | --lint-fatal | Make lint errors exit with exit code 3. | `false` | +| --ignore-unknown-fields | Ignore unknown fields in the rule files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default. | `false` | @@ -465,6 +467,7 @@ Unit tests for rules. | --run ... | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | | | --debug | Enable unit test debugging. | `false` | | --diff | [Experimental] Print colored differential output between expected & received output. | `false` | +| --ignore-unknown-fields | Ignore unknown fields in the test files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default. | `false` | diff --git a/model/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go index bb36a21208..76b12f0e5c 100644 --- a/model/rulefmt/rulefmt.go +++ b/model/rulefmt/rulefmt.go @@ -314,7 +314,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) { } // Parse parses and validates a set of rules. -func Parse(content []byte) (*RuleGroups, []error) { +func Parse(content []byte, ignoreUnknownFields bool) (*RuleGroups, []error) { var ( groups RuleGroups node ruleGroups @@ -322,7 +322,9 @@ func Parse(content []byte) (*RuleGroups, []error) { ) decoder := yaml.NewDecoder(bytes.NewReader(content)) - decoder.KnownFields(true) + if !ignoreUnknownFields { + decoder.KnownFields(true) + } err := decoder.Decode(&groups) // Ignore io.EOF which happens with empty input. if err != nil && !errors.Is(err, io.EOF) { @@ -341,12 +343,12 @@ func Parse(content []byte) (*RuleGroups, []error) { } // ParseFile reads and parses rules from a file. -func ParseFile(file string) (*RuleGroups, []error) { +func ParseFile(file string, ignoreUnknownFields bool) (*RuleGroups, []error) { b, err := os.ReadFile(file) if err != nil { return nil, []error{fmt.Errorf("%s: %w", file, err)} } - rgs, errs := Parse(b) + rgs, errs := Parse(b, ignoreUnknownFields) for i := range errs { errs[i] = fmt.Errorf("%s: %w", file, errs[i]) } diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go index 73ea174594..286e760a41 100644 --- a/model/rulefmt/rulefmt_test.go +++ b/model/rulefmt/rulefmt_test.go @@ -24,7 +24,7 @@ import ( ) func TestParseFileSuccess(t *testing.T) { - _, errs := ParseFile("testdata/test.yaml") + _, errs := ParseFile("testdata/test.yaml", false) require.Empty(t, errs, "unexpected errors parsing file") } @@ -84,7 +84,7 @@ func TestParseFileFailure(t *testing.T) { } for _, c := range table { - _, errs := ParseFile(filepath.Join("testdata", c.filename)) + _, errs := ParseFile(filepath.Join("testdata", c.filename), false) require.NotEmpty(t, errs, "Expected error parsing %s but got none", c.filename) require.ErrorContainsf(t, errs[0], c.errMsg, "Expected error for %s.", c.filename) } @@ -179,7 +179,7 @@ groups: } for _, tst := range tests { - rgs, errs := Parse([]byte(tst.ruleString)) + rgs, errs := Parse([]byte(tst.ruleString), false) require.NotNil(t, rgs, "Rule parsing, rule=\n"+tst.ruleString) passed := (tst.shouldPass && len(errs) == 0) || (!tst.shouldPass && len(errs) > 0) require.True(t, passed, "Rule validation failed, rule=\n"+tst.ruleString) @@ -206,7 +206,7 @@ groups: annotations: summary: "Instance {{ $labels.instance }} up" ` - _, errs := Parse([]byte(group)) + _, errs := Parse([]byte(group), false) require.Len(t, errs, 2, "Expected two errors") var err00 *Error require.ErrorAs(t, errs[0], &err00) diff --git a/rules/manager.go b/rules/manager.go index a3ae716e2b..2b06dee8b5 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -207,7 +207,7 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels default: } - groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, files...) + groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, false, files...) if errs != nil { for _, e := range errs { @@ -276,7 +276,7 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels // GroupLoader is responsible for loading rule groups from arbitrary sources and parsing them. type GroupLoader interface { - Load(identifier string) (*rulefmt.RuleGroups, []error) + Load(identifier string, ignoreUnknownFields bool) (*rulefmt.RuleGroups, []error) Parse(query string) (parser.Expr, error) } @@ -284,22 +284,22 @@ type GroupLoader interface { // and parser.ParseExpr. type FileLoader struct{} -func (FileLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) { - return rulefmt.ParseFile(identifier) +func (FileLoader) Load(identifier string, ignoreUnknownFields bool) (*rulefmt.RuleGroups, []error) { + return rulefmt.ParseFile(identifier, ignoreUnknownFields) } func (FileLoader) Parse(query string) (parser.Expr, error) { return parser.ParseExpr(query) } // LoadGroups reads groups from a list of files. func (m *Manager) LoadGroups( - interval time.Duration, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc, filenames ...string, + interval time.Duration, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc, ignoreUnknownFields bool, filenames ...string, ) (map[string]*Group, []error) { groups := make(map[string]*Group) shouldRestore := !m.restored for _, fn := range filenames { - rgs, errs := m.opts.GroupLoader.Load(fn) + rgs, errs := m.opts.GroupLoader.Load(fn, ignoreUnknownFields) if errs != nil { return nil, errs } diff --git a/rules/manager_test.go b/rules/manager_test.go index 45da7a44b0..d7c767a3cd 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -808,7 +808,7 @@ func TestUpdate(t *testing.T) { } // Groups will be recreated if updated. - rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml") + rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml", false) require.Empty(t, errs, "file parsing failures") tmpFile, err := os.CreateTemp("", "rules.test.*.yaml") @@ -1532,7 +1532,7 @@ func TestManager_LoadGroups_ShouldCheckWhetherEachRuleHasDependentsAndDependenci }) t.Run("load a mix of dependent and independent rules", func(t *testing.T) { - groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_multiple.yaml"}...) + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple.yaml"}...) require.Empty(t, errs) require.Len(t, groups, 1) @@ -1567,7 +1567,7 @@ func TestManager_LoadGroups_ShouldCheckWhetherEachRuleHasDependentsAndDependenci }) t.Run("load only independent rules", func(t *testing.T) { - groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_multiple_independent.yaml"}...) + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_independent.yaml"}...) require.Empty(t, errs) require.Len(t, groups, 1) @@ -1975,7 +1975,7 @@ func TestAsyncRuleEvaluation(t *testing.T) { t.Cleanup(cancel) ruleManager := NewManager(optsFactory(storage, &maxInflight, &inflightQueries, 0)) - groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_multiple.yaml"}...) + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple.yaml"}...) require.Empty(t, errs) require.Len(t, groups, 1) @@ -2021,7 +2021,7 @@ func TestAsyncRuleEvaluation(t *testing.T) { opts.RuleConcurrencyController = nil ruleManager := NewManager(opts) - groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_multiple.yaml"}...) + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple.yaml"}...) require.Empty(t, errs) require.Len(t, groups, 1) @@ -2059,7 +2059,7 @@ func TestAsyncRuleEvaluation(t *testing.T) { opts.RuleConcurrencyController = nil ruleManager := NewManager(opts) - groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_multiple_independent.yaml"}...) + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_independent.yaml"}...) require.Empty(t, errs) require.Len(t, groups, 1) @@ -2103,7 +2103,7 @@ func TestAsyncRuleEvaluation(t *testing.T) { opts.RuleConcurrencyController = nil ruleManager := NewManager(opts) - groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_multiple_independent.yaml"}...) + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_independent.yaml"}...) require.Empty(t, errs) require.Len(t, groups, 1) @@ -2150,7 +2150,7 @@ func TestAsyncRuleEvaluation(t *testing.T) { opts.RuleConcurrencyController = nil ruleManager := NewManager(opts) - groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_indeterminates.yaml"}...) + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_indeterminates.yaml"}...) require.Empty(t, errs) require.Len(t, groups, 1) @@ -2189,7 +2189,7 @@ func TestAsyncRuleEvaluation(t *testing.T) { opts.RuleConcurrencyController = nil ruleManager := NewManager(opts) - groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_multiple_dependents_on_base.yaml"}...) + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_dependents_on_base.yaml"}...) require.Empty(t, errs) require.Len(t, groups, 1) var group *Group @@ -2235,7 +2235,7 @@ func TestAsyncRuleEvaluation(t *testing.T) { opts.RuleConcurrencyController = nil ruleManager := NewManager(opts) - groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_chain.yaml"}...) + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_chain.yaml"}...) require.Empty(t, errs) require.Len(t, groups, 1) var group *Group @@ -2279,7 +2279,7 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) { ruleManager := NewManager(optsFactory(storage, &maxInflight, &inflightQueries, maxConcurrency)) - groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, files...) + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, files...) require.Empty(t, errs) require.Len(t, groups, groupCount) @@ -2521,7 +2521,7 @@ func TestRuleDependencyController_AnalyseRules(t *testing.T) { QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil }, }) - groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, tc.ruleFile) + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, tc.ruleFile) require.Empty(t, errs) require.Len(t, groups, 1) @@ -2550,7 +2550,7 @@ func BenchmarkRuleDependencyController_AnalyseRules(b *testing.B) { QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil }, }) - groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, "fixtures/rules_multiple.yaml") + groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, "fixtures/rules_multiple.yaml") require.Empty(b, errs) require.Len(b, groups, 1) From 482a7fd89091fd23316c2eccda5c2bdb66bed95b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Wed, 15 Jan 2025 12:42:52 +0100 Subject: [PATCH 1261/1397] fix(remotewrite2): do not send uninitialized garbage if there's no metadata MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Found during testing for https://github.com/grafana/mimir/issues/9072 Debug printout showed: KRAJO: seriesName=cortex_request_duration_seconds_bucket, metricFamily=cortex_request_duration_seconds_bucket, type=GAUGE, help=cortex_bucket_index_load_duration_seconds_sum, unit= which is nonsense. I can imagine more cases where this is the case and makes actual sense. Some targets might miss metadata and if there's a pipeline that loses it. Signed-off-by: György Krajcsovits --- storage/remote/queue_manager.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index f0ea33fb52..731a0f405f 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -1919,12 +1919,17 @@ func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, var nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata int for nPending, d := range batch { pendingData[nPending].Samples = pendingData[nPending].Samples[:0] - // todo: should we also safeguard against empty metadata here? if d.metadata != nil { pendingData[nPending].Metadata.Type = writev2.FromMetadataType(d.metadata.Type) pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Help) pendingData[nPending].Metadata.UnitRef = symbolTable.Symbolize(d.metadata.Unit) nPendingMetadata++ + } else { + // Safeguard against sending garbage in case of not having metadata + // for whatever reason. + pendingData[nPending].Metadata.Type = writev2.Metadata_METRIC_TYPE_UNSPECIFIED + pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize("") + pendingData[nPending].Metadata.UnitRef = symbolTable.Symbolize("") } if sendExemplars { From 9385f31147897efb147ee05f88a9aca7eecd527c Mon Sep 17 00:00:00 2001 From: bwplotka Date: Wed, 15 Jan 2025 11:33:42 +0000 Subject: [PATCH 1262/1397] scrape: Fix metadata in WAL not working for histograms and summaries. The was a bug (due to confusion?) on the local metadata cache that is cached by metric family not the series metric name. The fix is to NOT use that local cache at all (it's still needed for current metadata API implementation, added TODO on how we can get rid of it). I went ahead and also rename Metric field in metadata structs to MetricFamily to make clear it's not always __name__. Signed-off-by: bwplotka --- scrape/helpers_test.go | 27 ++- scrape/scrape.go | 207 ++++++++++++------- scrape/scrape_test.go | 129 +++++++++++- scrape/target.go | 16 +- storage/remote/metadata_watcher_test.go | 28 +-- storage/remote/queue_manager.go | 2 +- storage/remote/queue_manager_test.go | 10 +- web/api/v1/api.go | 28 +-- web/api/v1/api_test.go | 252 ++++++++++++------------ 9 files changed, 442 insertions(+), 257 deletions(-) diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go index 7bc9e3f7d4..2719a467bc 100644 --- a/scrape/helpers_test.go +++ b/scrape/helpers_test.go @@ -90,6 +90,27 @@ type histogramSample struct { fh *histogram.FloatHistogram } +type metadataEntry struct { + m metadata.Metadata + metric labels.Labels +} + +func metadataEntryEqual(a, b metadataEntry) bool { + if !labels.Equal(a.metric, b.metric) { + return false + } + if a.m.Type != b.m.Type { + return false + } + if a.m.Unit != b.m.Unit { + return false + } + if a.m.Help != b.m.Help { + return false + } + return true +} + type collectResultAppendable struct { *collectResultAppender } @@ -112,8 +133,8 @@ type collectResultAppender struct { rolledbackHistograms []histogramSample resultExemplars []exemplar.Exemplar pendingExemplars []exemplar.Exemplar - resultMetadata []metadata.Metadata - pendingMetadata []metadata.Metadata + resultMetadata []metadataEntry + pendingMetadata []metadataEntry } func (a *collectResultAppender) SetOptions(opts *storage.AppendOptions) {} @@ -173,7 +194,7 @@ func (a *collectResultAppender) AppendHistogramCTZeroSample(ref storage.SeriesRe func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { a.mtx.Lock() defer a.mtx.Unlock() - a.pendingMetadata = append(a.pendingMetadata, m) + a.pendingMetadata = append(a.pendingMetadata, metadataEntry{metric: l, m: m}) if ref == 0 { ref = storage.SeriesRef(rand.Uint64()) } diff --git a/scrape/scrape.go b/scrape/scrape.go index 2da07d719e..1f41f50689 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -29,6 +29,7 @@ import ( "strings" "sync" "time" + "unsafe" "github.com/klauspost/compress/gzip" config_util "github.com/prometheus/common/config" @@ -931,6 +932,7 @@ type scrapeLoop struct { // scrapeCache tracks mappings of exposed metric strings to label sets and // storage references. Additionally, it tracks staleness of series between // scrapes. +// Cache is meant to be used per a single target. type scrapeCache struct { iter uint64 // Current scrape iteration. @@ -951,8 +953,10 @@ type scrapeCache struct { seriesCur map[uint64]labels.Labels seriesPrev map[uint64]labels.Labels - metaMtx sync.Mutex - metadata map[string]*metaEntry + // TODO(bwplotka): Consider moving Metadata API to use WAL instead of scrape loop to + // avoid locking (using metadata API can block scraping). + metaMtx sync.Mutex // Mutex is needed due to api touching it when metadata is queried. + metadata map[string]*metaEntry // metadata by metric family name. metrics *scrapeMetrics } @@ -1078,73 +1082,79 @@ func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) { } } -func (c *scrapeCache) setType(metric []byte, t model.MetricType) { - c.metaMtx.Lock() +func yoloString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} - e, ok := c.metadata[string(metric)] +func (c *scrapeCache) setType(mfName []byte, t model.MetricType) ([]byte, *metaEntry) { + c.metaMtx.Lock() + defer c.metaMtx.Unlock() + + e, ok := c.metadata[yoloString(mfName)] if !ok { e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} - c.metadata[string(metric)] = e + c.metadata[string(mfName)] = e } if e.Type != t { e.Type = t e.lastIterChange = c.iter } e.lastIter = c.iter - - c.metaMtx.Unlock() + return mfName, e } -func (c *scrapeCache) setHelp(metric, help []byte) { +func (c *scrapeCache) setHelp(mfName, help []byte) ([]byte, *metaEntry) { c.metaMtx.Lock() + defer c.metaMtx.Unlock() - e, ok := c.metadata[string(metric)] + e, ok := c.metadata[yoloString(mfName)] if !ok { e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} - c.metadata[string(metric)] = e + c.metadata[string(mfName)] = e } if e.Help != string(help) { e.Help = string(help) e.lastIterChange = c.iter } e.lastIter = c.iter - - c.metaMtx.Unlock() + return mfName, e } -func (c *scrapeCache) setUnit(metric, unit []byte) { +func (c *scrapeCache) setUnit(mfName, unit []byte) ([]byte, *metaEntry) { c.metaMtx.Lock() + defer c.metaMtx.Unlock() - e, ok := c.metadata[string(metric)] + e, ok := c.metadata[yoloString(mfName)] if !ok { e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} - c.metadata[string(metric)] = e + c.metadata[string(mfName)] = e } if e.Unit != string(unit) { e.Unit = string(unit) e.lastIterChange = c.iter } e.lastIter = c.iter - - c.metaMtx.Unlock() + return mfName, e } -func (c *scrapeCache) GetMetadata(metric string) (MetricMetadata, bool) { +// GetMetadata returns metadata given the metric family name. +func (c *scrapeCache) GetMetadata(mfName string) (MetricMetadata, bool) { c.metaMtx.Lock() defer c.metaMtx.Unlock() - m, ok := c.metadata[metric] + m, ok := c.metadata[mfName] if !ok { return MetricMetadata{}, false } return MetricMetadata{ - Metric: metric, - Type: m.Type, - Help: m.Help, - Unit: m.Unit, + MetricFamily: mfName, + Type: m.Type, + Help: m.Help, + Unit: m.Unit, }, true } +// ListMetadata lists metadata. func (c *scrapeCache) ListMetadata() []MetricMetadata { c.metaMtx.Lock() defer c.metaMtx.Unlock() @@ -1153,16 +1163,16 @@ func (c *scrapeCache) ListMetadata() []MetricMetadata { for m, e := range c.metadata { res = append(res, MetricMetadata{ - Metric: m, - Type: e.Type, - Help: e.Help, - Unit: e.Unit, + MetricFamily: m, + Type: e.Type, + Help: e.Help, + Unit: e.Unit, }) } return res } -// MetadataSize returns the size of the metadata cache. +// SizeMetadata returns the size of the metadata cache. func (c *scrapeCache) SizeMetadata() (s int) { c.metaMtx.Lock() defer c.metaMtx.Unlock() @@ -1173,7 +1183,7 @@ func (c *scrapeCache) SizeMetadata() (s int) { return s } -// MetadataLen returns the number of metadata entries in the cache. +// LengthMetadata returns the number of metadata entries in the cache. func (c *scrapeCache) LengthMetadata() int { c.metaMtx.Lock() defer c.metaMtx.Unlock() @@ -1607,39 +1617,17 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ) } var ( - appErrs = appendErrors{} - sampleLimitErr error - bucketLimitErr error - lset labels.Labels // escapes to heap so hoisted out of loop - e exemplar.Exemplar // escapes to heap so hoisted out of loop - meta metadata.Metadata - metadataChanged bool + appErrs = appendErrors{} + sampleLimitErr error + bucketLimitErr error + lset labels.Labels // escapes to heap so hoisted out of loop + e exemplar.Exemplar // escapes to heap so hoisted out of loop + lastMeta *metaEntry + lastMFName []byte ) exemplars := make([]exemplar.Exemplar, 0, 1) - // updateMetadata updates the current iteration's metadata object and the - // metadataChanged value if we have metadata in the scrape cache AND the - // labelset is for a new series or the metadata for this series has just - // changed. It returns a boolean based on whether the metadata was updated. - updateMetadata := func(lset labels.Labels, isNewSeries bool) bool { - if !sl.appendMetadataToWAL { - return false - } - - sl.cache.metaMtx.Lock() - defer sl.cache.metaMtx.Unlock() - metaEntry, metaOk := sl.cache.metadata[lset.Get(labels.MetricName)] - if metaOk && (isNewSeries || metaEntry.lastIterChange == sl.cache.iter) { - metadataChanged = true - meta.Type = metaEntry.Type - meta.Unit = metaEntry.Unit - meta.Help = metaEntry.Help - return true - } - return false - } - // Take an appender with limits. app = appender(app, sl.sampleLimit, sl.bucketLimit, sl.maxSchema) @@ -1669,14 +1657,18 @@ loop: break } switch et { + // TODO(bwplotka): Consider changing parser to give metadata at once instead of type, help and unit in separation, ideally on `Series()/Histogram() + // otherwise we can expose metadata without series on metadata API. case textparse.EntryType: - sl.cache.setType(p.Type()) + // TODO(bwplotka): Build meta entry directly instead of locking and updating the map. This will + // allow to properly update metadata when e.g unit was added, then removed; + lastMFName, lastMeta = sl.cache.setType(p.Type()) continue case textparse.EntryHelp: - sl.cache.setHelp(p.Help()) + lastMFName, lastMeta = sl.cache.setHelp(p.Help()) continue case textparse.EntryUnit: - sl.cache.setUnit(p.Unit()) + lastMFName, lastMeta = sl.cache.setUnit(p.Unit()) continue case textparse.EntryComment: continue @@ -1699,10 +1691,6 @@ loop: t = *parsedTimestamp } - // Zero metadata out for current iteration until it's resolved. - meta = metadata.Metadata{} - metadataChanged = false - if sl.cache.getDropped(met) { continue } @@ -1716,9 +1704,6 @@ loop: ref = ce.ref lset = ce.lset hash = ce.hash - - // Update metadata only if it changed in the current iteration. - updateMetadata(lset, false) } else { p.Metric(&lset) hash = lset.Hash() @@ -1747,9 +1732,6 @@ loop: sl.metrics.targetScrapePoolExceededLabelLimits.Inc() break loop } - - // Append metadata for new series if they were present. - updateMetadata(lset, true) } if seriesAlreadyScraped && parsedTimestamp == nil { @@ -1857,10 +1839,18 @@ loop: sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars)) } - if sl.appendMetadataToWAL && metadataChanged { - if _, merr := app.UpdateMetadata(ref, lset, meta); merr != nil { - // No need to fail the scrape on errors appending metadata. - sl.l.Debug("Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr) + if sl.appendMetadataToWAL && lastMeta != nil { + // Is it new series OR did metadata change for this family? + if !ok || lastMeta.lastIterChange == sl.cache.iter { + // In majority cases we can trust that the current series/histogram is matching the lastMeta and lastMFName. + // However, optional TYPE etc metadata and broken OM text can break this, detect those cases here. + // TODO(bwplotka): Consider moving this to parser as many parser users end up doing this (e.g. CT and NHCB parsing). + if isSeriesPartOfFamily(lset.Get(labels.MetricName), lastMFName, lastMeta.Type) { + if _, merr := app.UpdateMetadata(ref, lset, lastMeta.Metadata); merr != nil { + // No need to fail the scrape on errors appending metadata. + sl.l.Debug("Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", lastMeta.Metadata), "err", merr) + } + } } } } @@ -1896,6 +1886,71 @@ loop: return } +func isSeriesPartOfFamily(mName string, mfName []byte, typ model.MetricType) bool { + mfNameStr := yoloString(mfName) + if !strings.HasPrefix(mName, mfNameStr) { // Fast path. + return false + } + + var ( + gotMFName string + ok bool + ) + switch typ { + case model.MetricTypeCounter: + // Prometheus allows _total, cut it from mf name to support this case. + mfNameStr, _ = strings.CutSuffix(mfNameStr, "_total") + + gotMFName, ok = strings.CutSuffix(mName, "_total") + if !ok { + gotMFName = mName + } + case model.MetricTypeHistogram: + gotMFName, ok = strings.CutSuffix(mName, "_bucket") + if !ok { + gotMFName, ok = strings.CutSuffix(mName, "_sum") + if !ok { + gotMFName, ok = strings.CutSuffix(mName, "_count") + if !ok { + gotMFName = mName + } + } + } + case model.MetricTypeGaugeHistogram: + gotMFName, ok = strings.CutSuffix(mName, "_bucket") + if !ok { + gotMFName, ok = strings.CutSuffix(mName, "_gsum") + if !ok { + gotMFName, ok = strings.CutSuffix(mName, "_gcount") + if !ok { + gotMFName = mName + } + } + } + case model.MetricTypeSummary: + gotMFName, ok = strings.CutSuffix(mName, "_sum") + if !ok { + gotMFName, ok = strings.CutSuffix(mName, "_count") + if !ok { + gotMFName = mName + } + } + case model.MetricTypeInfo: + // Technically prometheus text does not support info type, but we might + // accidentally allow info type in prom parse, so support metric family names + // with the _info explicitly too. + mfNameStr, _ = strings.CutSuffix(mfNameStr, "_info") + + gotMFName, ok = strings.CutSuffix(mName, "_info") + if !ok { + gotMFName = mName + } + default: + gotMFName = mName + } + return mfNameStr == gotMFName +} + // Adds samples to the appender, checking the error, and then returns the # of samples added, // whether the caller should continue to process more samples, and any sample or bucket limit errors. func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) { diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index a67d52e5cc..c1fca54c6a 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -50,6 +50,7 @@ import ( "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/model/timestamp" @@ -96,7 +97,9 @@ func TestStorageHandlesOutOfOrderTimestamps(t *testing.T) { // Test with default OutOfOrderTimeWindow (0) t.Run("Out-Of-Order Sample Disabled", func(t *testing.T) { s := teststorage.New(t) - defer s.Close() + t.Cleanup(func() { + _ = s.Close() + }) runScrapeLoopTest(t, s, false) }) @@ -104,7 +107,9 @@ func TestStorageHandlesOutOfOrderTimestamps(t *testing.T) { // Test with specific OutOfOrderTimeWindow (600000) t.Run("Out-Of-Order Sample Enabled", func(t *testing.T) { s := teststorage.New(t, 600000) - defer s.Close() + t.Cleanup(func() { + _ = s.Close() + }) runScrapeLoopTest(t, s, true) }) @@ -126,13 +131,13 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde timestampInorder2 := now.Add(5 * time.Minute) slApp := sl.appender(context.Background()) - _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "text/plain", timestampInorder1) + _, _, _, err := sl.append(slApp, []byte(`metric_total{a="1",b="1"} 1`), "text/plain", timestampInorder1) require.NoError(t, err) - _, _, _, err = sl.append(slApp, []byte(`metric_a{a="1",b="1"} 2`), "text/plain", timestampOutOfOrder) + _, _, _, err = sl.append(slApp, []byte(`metric_total{a="1",b="1"} 2`), "text/plain", timestampOutOfOrder) require.NoError(t, err) - _, _, _, err = sl.append(slApp, []byte(`metric_a{a="1",b="1"} 3`), "text/plain", timestampInorder2) + _, _, _, err = sl.append(slApp, []byte(`metric_total{a="1",b="1"} 3`), "text/plain", timestampInorder2) require.NoError(t, err) require.NoError(t, slApp.Commit()) @@ -145,7 +150,7 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde defer q.Close() // Use a matcher to filter the metric name. - series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "metric_a")) + series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "metric_total")) var results []floatSample for series.Next() { @@ -165,12 +170,12 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde // Define the expected results want := []floatSample{ { - metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), + metric: labels.FromStrings("__name__", "metric_total", "a", "1", "b", "1"), t: timestamp.FromTime(timestampInorder1), f: 1, }, { - metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"), + metric: labels.FromStrings("__name__", "metric_total", "a", "1", "b", "1"), t: timestamp.FromTime(timestampInorder2), f: 3, }, @@ -183,6 +188,110 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde } } +// Regression test against https://github.com/prometheus/prometheus/issues/15831. +func TestScrapeAppendMetadataUpdate(t *testing.T) { + const ( + scrape1 = `# TYPE test_metric counter +# HELP test_metric some help text +# UNIT test_metric metric +test_metric_total 1 +# TYPE test_metric2 gauge +# HELP test_metric2 other help text +test_metric2{foo="bar"} 2 +# TYPE test_metric3 gauge +# HELP test_metric3 this represents tricky case of "broken" text that is not trivial to detect +test_metric3_metric4{foo="bar"} 2 +# EOF` + scrape2 = `# TYPE test_metric counter +# HELP test_metric different help text +test_metric_total 11 +# TYPE test_metric2 gauge +# HELP test_metric2 other help text +# UNIT test_metric2 metric2 +test_metric2{foo="bar"} 22 +# EOF` + ) + + // Create an appender for adding samples to the storage. + capp := &collectResultAppender{next: nopAppender{}} + sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0) + + now := time.Now() + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte(scrape1), "application/openmetrics-text", now) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + testutil.RequireEqualWithOptions(t, []metadataEntry{ + {metric: labels.FromStrings("__name__", "test_metric_total"), m: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}}, + {metric: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), m: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}}, + }, capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)}) + capp.resultMetadata = nil + + // Next (the same) scrape should not add new metadata entries. + slApp = sl.appender(context.Background()) + _, _, _, err = sl.append(slApp, []byte(scrape1), "application/openmetrics-text", now.Add(15*time.Second)) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + testutil.RequireEqualWithOptions(t, []metadataEntry(nil), capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)}) + + slApp = sl.appender(context.Background()) + _, _, _, err = sl.append(slApp, []byte(scrape2), "application/openmetrics-text", now.Add(15*time.Second)) + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + testutil.RequireEqualWithOptions(t, []metadataEntry{ + {metric: labels.FromStrings("__name__", "test_metric_total"), m: metadata.Metadata{Type: "counter", Unit: "metric", Help: "different help text"}}, // Here, technically we should have no unit, but it's a known limitation of the current implementation. + {metric: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), m: metadata.Metadata{Type: "gauge", Unit: "metric2", Help: "other help text"}}, + }, capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)}) +} + +func TestIsSeriesPartOfFamily(t *testing.T) { + t.Run("counter", func(t *testing.T) { + require.True(t, isSeriesPartOfFamily("http_requests_total", []byte("http_requests_total"), model.MetricTypeCounter)) // Prometheus text style. + require.True(t, isSeriesPartOfFamily("http_requests_total", []byte("http_requests"), model.MetricTypeCounter)) // OM text style. + require.True(t, isSeriesPartOfFamily("http_requests_total", []byte("http_requests_total"), model.MetricTypeUnknown)) + + require.False(t, isSeriesPartOfFamily("http_requests_total", []byte("http_requests"), model.MetricTypeUnknown)) // We don't know. + require.False(t, isSeriesPartOfFamily("http_requests2_total", []byte("http_requests_total"), model.MetricTypeCounter)) + require.False(t, isSeriesPartOfFamily("http_requests_requests_total", []byte("http_requests"), model.MetricTypeCounter)) + }) + + t.Run("gauge", func(t *testing.T) { + require.True(t, isSeriesPartOfFamily("http_requests_count", []byte("http_requests_count"), model.MetricTypeGauge)) + require.True(t, isSeriesPartOfFamily("http_requests_count", []byte("http_requests_count"), model.MetricTypeUnknown)) + + require.False(t, isSeriesPartOfFamily("http_requests_count2", []byte("http_requests_count"), model.MetricTypeCounter)) + }) + + t.Run("histogram", func(t *testing.T) { + require.True(t, isSeriesPartOfFamily("http_requests_seconds_sum", []byte("http_requests_seconds"), model.MetricTypeHistogram)) + require.True(t, isSeriesPartOfFamily("http_requests_seconds_count", []byte("http_requests_seconds"), model.MetricTypeHistogram)) + require.True(t, isSeriesPartOfFamily("http_requests_seconds_bucket", []byte("http_requests_seconds"), model.MetricTypeHistogram)) + require.True(t, isSeriesPartOfFamily("http_requests_seconds", []byte("http_requests_seconds"), model.MetricTypeHistogram)) + + require.False(t, isSeriesPartOfFamily("http_requests_seconds_sum", []byte("http_requests_seconds"), model.MetricTypeUnknown)) // We don't know. + require.False(t, isSeriesPartOfFamily("http_requests_seconds2_sum", []byte("http_requests_seconds"), model.MetricTypeHistogram)) + }) + + t.Run("summary", func(t *testing.T) { + require.True(t, isSeriesPartOfFamily("http_requests_seconds_sum", []byte("http_requests_seconds"), model.MetricTypeSummary)) + require.True(t, isSeriesPartOfFamily("http_requests_seconds_count", []byte("http_requests_seconds"), model.MetricTypeSummary)) + require.True(t, isSeriesPartOfFamily("http_requests_seconds", []byte("http_requests_seconds"), model.MetricTypeSummary)) + + require.False(t, isSeriesPartOfFamily("http_requests_seconds_sum", []byte("http_requests_seconds"), model.MetricTypeUnknown)) // We don't know. + require.False(t, isSeriesPartOfFamily("http_requests_seconds2_sum", []byte("http_requests_seconds"), model.MetricTypeSummary)) + }) + + t.Run("info", func(t *testing.T) { + require.True(t, isSeriesPartOfFamily("go_build_info", []byte("go_build_info"), model.MetricTypeInfo)) // Prometheus text style. + require.True(t, isSeriesPartOfFamily("go_build_info", []byte("go_build"), model.MetricTypeInfo)) // OM text style. + require.True(t, isSeriesPartOfFamily("go_build_info", []byte("go_build_info"), model.MetricTypeUnknown)) + + require.False(t, isSeriesPartOfFamily("go_build_info", []byte("go_build"), model.MetricTypeUnknown)) // We don't know. + require.False(t, isSeriesPartOfFamily("go_build2_info", []byte("go_build_info"), model.MetricTypeInfo)) + require.False(t, isSeriesPartOfFamily("go_build_build_info", []byte("go_build_info"), model.MetricTypeInfo)) + }) +} + func TestDroppedTargetsList(t *testing.T) { var ( app = &nopAppendable{} @@ -824,7 +933,7 @@ func newBasicScrapeLoopWithFallback(t testing.TB, ctx context.Context, scraper s false, false, false, - false, + true, nil, false, newTestScrapeMetrics(t), @@ -1131,7 +1240,7 @@ func TestScrapeLoopMetadata(t *testing.T) { total, _, _, err := sl.append(slApp, []byte(`# TYPE test_metric counter # HELP test_metric some help text # UNIT test_metric metric -test_metric 1 +test_metric_total 1 # TYPE test_metric_no_help gauge # HELP test_metric_no_type other help text # EOF`), "application/openmetrics-text", time.Now()) diff --git a/scrape/target.go b/scrape/target.go index 22cde01c05..4f576504f0 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -78,17 +78,17 @@ func (t *Target) String() string { // MetricMetadataStore represents a storage for metadata. type MetricMetadataStore interface { ListMetadata() []MetricMetadata - GetMetadata(metric string) (MetricMetadata, bool) + GetMetadata(mfName string) (MetricMetadata, bool) SizeMetadata() int LengthMetadata() int } -// MetricMetadata is a piece of metadata for a metric. +// MetricMetadata is a piece of metadata for a metric family. type MetricMetadata struct { - Metric string - Type model.MetricType - Help string - Unit string + MetricFamily string + Type model.MetricType + Help string + Unit string } func (t *Target) ListMetadata() []MetricMetadata { @@ -124,14 +124,14 @@ func (t *Target) LengthMetadata() int { } // GetMetadata returns type and help metadata for the given metric. -func (t *Target) GetMetadata(metric string) (MetricMetadata, bool) { +func (t *Target) GetMetadata(mfName string) (MetricMetadata, bool) { t.mtx.RLock() defer t.mtx.RUnlock() if t.metadata == nil { return MetricMetadata{}, false } - return t.metadata.GetMetadata(metric) + return t.metadata.GetMetadata(mfName) } func (t *Target) SetMetadataStore(s MetricMetadataStore) { diff --git a/storage/remote/metadata_watcher_test.go b/storage/remote/metadata_watcher_test.go index ce9b9d022e..d939ef8efb 100644 --- a/storage/remote/metadata_watcher_test.go +++ b/storage/remote/metadata_watcher_test.go @@ -40,9 +40,9 @@ func (s *TestMetaStore) ListMetadata() []scrape.MetricMetadata { return s.Metadata } -func (s *TestMetaStore) GetMetadata(metric string) (scrape.MetricMetadata, bool) { +func (s *TestMetaStore) GetMetadata(mfName string) (scrape.MetricMetadata, bool) { for _, m := range s.Metadata { - if metric == m.Metric { + if mfName == m.MetricFamily { return m, true } } @@ -106,26 +106,26 @@ func TestWatchScrapeManager_ReadyForCollection(t *testing.T) { metadata := &TestMetaStore{ Metadata: []scrape.MetricMetadata{ { - Metric: "prometheus_tsdb_head_chunks_created_total", - Type: model.MetricTypeCounter, - Help: "Total number", - Unit: "", + MetricFamily: "prometheus_tsdb_head_chunks_created", + Type: model.MetricTypeCounter, + Help: "Total number", + Unit: "", }, { - Metric: "prometheus_remote_storage_retried_samples_total", - Type: model.MetricTypeCounter, - Help: "Total number", - Unit: "", + MetricFamily: "prometheus_remote_storage_retried_samples", + Type: model.MetricTypeCounter, + Help: "Total number", + Unit: "", }, }, } metadataDup := &TestMetaStore{ Metadata: []scrape.MetricMetadata{ { - Metric: "prometheus_tsdb_head_chunks_created_total", - Type: model.MetricTypeCounter, - Help: "Total number", - Unit: "", + MetricFamily: "prometheus_tsdb_head_chunks_created", + Type: model.MetricTypeCounter, + Help: "Total number", + Unit: "", }, }, } diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index f0ea33fb52..50f10eebe3 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -550,7 +550,7 @@ func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scr mm := make([]prompb.MetricMetadata, 0, len(metadata)) for _, entry := range metadata { mm = append(mm, prompb.MetricMetadata{ - MetricFamilyName: entry.Metric, + MetricFamilyName: entry.MetricFamily, Help: entry.Help, Type: prompb.FromMetadataType(entry.Type), Unit: entry.Unit, diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 202c71c348..38eda81d97 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -342,10 +342,10 @@ func TestMetadataDelivery(t *testing.T) { numMetadata := 1532 for i := 0; i < numMetadata; i++ { metadata = append(metadata, scrape.MetricMetadata{ - Metric: "prometheus_remote_storage_sent_metadata_bytes_total_" + strconv.Itoa(i), - Type: model.MetricTypeCounter, - Help: "a nice help text", - Unit: "", + MetricFamily: "prometheus_remote_storage_sent_metadata_bytes_" + strconv.Itoa(i), + Type: model.MetricTypeCounter, + Help: "a nice help text", + Unit: "", }) } @@ -357,7 +357,7 @@ func TestMetadataDelivery(t *testing.T) { // fit into MaxSamplesPerSend. require.Equal(t, numMetadata/config.DefaultMetadataConfig.MaxSamplesPerSend+1, c.writesReceived) // Make sure the last samples were sent. - require.Equal(t, c.receivedMetadata[metadata[len(metadata)-1].Metric][0].MetricFamilyName, metadata[len(metadata)-1].Metric) + require.Equal(t, c.receivedMetadata[metadata[len(metadata)-1].MetricFamily][0].MetricFamilyName, metadata[len(metadata)-1].MetricFamily) } func TestWALMetadataDelivery(t *testing.T) { diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 9821b26ec2..7687264426 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1228,11 +1228,11 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { if metric == "" { for _, md := range t.ListMetadata() { res = append(res, metricMetadata{ - Target: targetLabels, - Metric: md.Metric, - Type: md.Type, - Help: md.Help, - Unit: md.Unit, + Target: targetLabels, + MetricFamily: md.MetricFamily, + Type: md.Type, + Help: md.Help, + Unit: md.Unit, }) } continue @@ -1253,11 +1253,11 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { } type metricMetadata struct { - Target labels.Labels `json:"target"` - Metric string `json:"metric,omitempty"` - Type model.MetricType `json:"type"` - Help string `json:"help"` - Unit string `json:"unit"` + Target labels.Labels `json:"target"` + MetricFamily string `json:"metric,omitempty"` + Type model.MetricType `json:"type"` + Help string `json:"help"` + Unit string `json:"unit"` } // AlertmanagerDiscovery has all the active Alertmanagers. @@ -1357,7 +1357,7 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { if metric == "" { for _, mm := range t.ListMetadata() { m := metadata.Metadata{Type: mm.Type, Help: mm.Help, Unit: mm.Unit} - ms, ok := metrics[mm.Metric] + ms, ok := metrics[mm.MetricFamily] if limitPerMetric > 0 && len(ms) >= limitPerMetric { continue @@ -1365,7 +1365,7 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { if !ok { ms = map[metadata.Metadata]struct{}{} - metrics[mm.Metric] = ms + metrics[mm.MetricFamily] = ms } ms[m] = struct{}{} } @@ -1374,7 +1374,7 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { if md, ok := t.GetMetadata(metric); ok { m := metadata.Metadata{Type: md.Type, Help: md.Help, Unit: md.Unit} - ms, ok := metrics[md.Metric] + ms, ok := metrics[md.MetricFamily] if limitPerMetric > 0 && len(ms) >= limitPerMetric { continue @@ -1382,7 +1382,7 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { if !ok { ms = map[metadata.Metadata]struct{}{} - metrics[md.Metric] = ms + metrics[md.MetricFamily] = ms } ms[m] = struct{}{} } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 37227d849d..f9bdbe3947 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -84,7 +84,7 @@ func (s *testMetaStore) ListMetadata() []scrape.MetricMetadata { func (s *testMetaStore) GetMetadata(metric string) (scrape.MetricMetadata, bool) { for _, m := range s.Metadata { - if metric == m.Metric { + if metric == m.MetricFamily { return m, true } } @@ -1891,10 +1891,10 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "test", metadata: []scrape.MetricMetadata{ { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Number of OS threads created.", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Number of OS threads created.", + Unit: "", }, }, }, @@ -1921,10 +1921,10 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "blackbox", metadata: []scrape.MetricMetadata{ { - Metric: "prometheus_tsdb_storage_blocks_bytes", - Type: model.MetricTypeGauge, - Help: "The number of bytes that are currently used for local storage by all blocks.", - Unit: "", + MetricFamily: "prometheus_tsdb_storage_blocks_bytes", + Type: model.MetricTypeGauge, + Help: "The number of bytes that are currently used for local storage by all blocks.", + Unit: "", }, }, }, @@ -1934,10 +1934,10 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E Target: labels.FromMap(map[string]string{ "job": "blackbox", }), - Metric: "prometheus_tsdb_storage_blocks_bytes", - Help: "The number of bytes that are currently used for local storage by all blocks.", - Type: model.MetricTypeGauge, - Unit: "", + MetricFamily: "prometheus_tsdb_storage_blocks_bytes", + Help: "The number of bytes that are currently used for local storage by all blocks.", + Type: model.MetricTypeGauge, + Unit: "", }, }, }, @@ -1949,10 +1949,10 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "test", metadata: []scrape.MetricMetadata{ { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Number of OS threads created.", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Number of OS threads created.", + Unit: "", }, }, }, @@ -1960,10 +1960,10 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "blackbox", metadata: []scrape.MetricMetadata{ { - Metric: "prometheus_tsdb_storage_blocks_bytes", - Type: model.MetricTypeGauge, - Help: "The number of bytes that are currently used for local storage by all blocks.", - Unit: "", + MetricFamily: "prometheus_tsdb_storage_blocks_bytes", + Type: model.MetricTypeGauge, + Help: "The number of bytes that are currently used for local storage by all blocks.", + Unit: "", }, }, }, @@ -1973,25 +1973,25 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E Target: labels.FromMap(map[string]string{ "job": "test", }), - Metric: "go_threads", - Help: "Number of OS threads created.", - Type: model.MetricTypeGauge, - Unit: "", + MetricFamily: "go_threads", + Help: "Number of OS threads created.", + Type: model.MetricTypeGauge, + Unit: "", }, { Target: labels.FromMap(map[string]string{ "job": "blackbox", }), - Metric: "prometheus_tsdb_storage_blocks_bytes", - Help: "The number of bytes that are currently used for local storage by all blocks.", - Type: model.MetricTypeGauge, - Unit: "", + MetricFamily: "prometheus_tsdb_storage_blocks_bytes", + Help: "The number of bytes that are currently used for local storage by all blocks.", + Type: model.MetricTypeGauge, + Unit: "", }, }, sorter: func(m interface{}) { sort.Slice(m.([]metricMetadata), func(i, j int) bool { s := m.([]metricMetadata) - return s[i].Metric < s[j].Metric + return s[i].MetricFamily < s[j].MetricFamily }) }, }, @@ -2026,16 +2026,16 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "test", metadata: []scrape.MetricMetadata{ { - Metric: "prometheus_engine_query_duration_seconds", - Type: model.MetricTypeSummary, - Help: "Query timings", - Unit: "", + MetricFamily: "prometheus_engine_query_duration_seconds", + Type: model.MetricTypeSummary, + Help: "Query timings", + Unit: "", }, { - Metric: "go_info", - Type: model.MetricTypeGauge, - Help: "Information about the Go environment.", - Unit: "", + MetricFamily: "go_info", + Type: model.MetricTypeGauge, + Help: "Information about the Go environment.", + Unit: "", }, }, }, @@ -2056,10 +2056,10 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "test", metadata: []scrape.MetricMetadata{ { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Number of OS threads created", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", }, }, }, @@ -2067,10 +2067,10 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "blackbox", metadata: []scrape.MetricMetadata{ { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Number of OS threads created", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", }, }, }, @@ -2089,10 +2089,10 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "test", metadata: []scrape.MetricMetadata{ { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Number of OS threads created", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", }, }, }, @@ -2100,10 +2100,10 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "blackbox", metadata: []scrape.MetricMetadata{ { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Number of OS threads that were created.", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Number of OS threads that were created.", + Unit: "", }, }, }, @@ -2136,16 +2136,16 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "test", metadata: []scrape.MetricMetadata{ { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Number of OS threads created", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", }, { - Metric: "prometheus_engine_query_duration_seconds", - Type: model.MetricTypeSummary, - Help: "Query Timings.", - Unit: "", + MetricFamily: "prometheus_engine_query_duration_seconds", + Type: model.MetricTypeSummary, + Help: "Query Timings.", + Unit: "", }, }, }, @@ -2153,10 +2153,10 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "blackbox", metadata: []scrape.MetricMetadata{ { - Metric: "go_gc_duration_seconds", - Type: model.MetricTypeSummary, - Help: "A summary of the GC invocation durations.", - Unit: "", + MetricFamily: "go_gc_duration_seconds", + Type: model.MetricTypeSummary, + Help: "A summary of the GC invocation durations.", + Unit: "", }, }, }, @@ -2172,22 +2172,22 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "test", metadata: []scrape.MetricMetadata{ { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Number of OS threads created", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", }, { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Repeated metadata", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Repeated metadata", + Unit: "", }, { - Metric: "go_gc_duration_seconds", - Type: model.MetricTypeSummary, - Help: "A summary of the GC invocation durations.", - Unit: "", + MetricFamily: "go_gc_duration_seconds", + Type: model.MetricTypeSummary, + Help: "A summary of the GC invocation durations.", + Unit: "", }, }, }, @@ -2211,22 +2211,22 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "test", metadata: []scrape.MetricMetadata{ { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Number of OS threads created", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", }, { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Repeated metadata", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Repeated metadata", + Unit: "", }, { - Metric: "go_gc_duration_seconds", - Type: model.MetricTypeSummary, - Help: "A summary of the GC invocation durations.", - Unit: "", + MetricFamily: "go_gc_duration_seconds", + Type: model.MetricTypeSummary, + Help: "A summary of the GC invocation durations.", + Unit: "", }, }, }, @@ -2244,22 +2244,22 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "test", metadata: []scrape.MetricMetadata{ { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Number of OS threads created", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", }, { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Repeated metadata", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Repeated metadata", + Unit: "", }, { - Metric: "go_gc_duration_seconds", - Type: model.MetricTypeSummary, - Help: "A summary of the GC invocation durations.", - Unit: "", + MetricFamily: "go_gc_duration_seconds", + Type: model.MetricTypeSummary, + Help: "A summary of the GC invocation durations.", + Unit: "", }, }, }, @@ -2267,16 +2267,16 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "secondTarget", metadata: []scrape.MetricMetadata{ { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Number of OS threads created, but from a different target", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Number of OS threads created, but from a different target", + Unit: "", }, { - Metric: "go_gc_duration_seconds", - Type: model.MetricTypeSummary, - Help: "A summary of the GC invocation durations, but from a different target.", - Unit: "", + MetricFamily: "go_gc_duration_seconds", + Type: model.MetricTypeSummary, + Help: "A summary of the GC invocation durations, but from a different target.", + Unit: "", }, }, }, @@ -2293,10 +2293,10 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "test", metadata: []scrape.MetricMetadata{ { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Number of OS threads created", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", }, }, }, @@ -2304,16 +2304,16 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "blackbox", metadata: []scrape.MetricMetadata{ { - Metric: "go_gc_duration_seconds", - Type: model.MetricTypeSummary, - Help: "A summary of the GC invocation durations.", - Unit: "", + MetricFamily: "go_gc_duration_seconds", + Type: model.MetricTypeSummary, + Help: "A summary of the GC invocation durations.", + Unit: "", }, { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Number of OS threads that were created.", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Number of OS threads that were created.", + Unit: "", }, }, }, @@ -2342,10 +2342,10 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E identifier: "test", metadata: []scrape.MetricMetadata{ { - Metric: "go_threads", - Type: model.MetricTypeGauge, - Help: "Number of OS threads created", - Unit: "", + MetricFamily: "go_threads", + Type: model.MetricTypeGauge, + Help: "Number of OS threads created", + Unit: "", }, }, }, From 5ed0edd3abc6d3799794f77c803289e755b81427 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Thu, 16 Jan 2025 08:12:33 +0100 Subject: [PATCH 1263/1397] Update storage/remote/queue_manager.go Co-authored-by: Bartlomiej Plotka Signed-off-by: George Krajcsovits --- storage/remote/queue_manager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 731a0f405f..7bf9600aff 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -1928,8 +1928,8 @@ func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, // Safeguard against sending garbage in case of not having metadata // for whatever reason. pendingData[nPending].Metadata.Type = writev2.Metadata_METRIC_TYPE_UNSPECIFIED - pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize("") - pendingData[nPending].Metadata.UnitRef = symbolTable.Symbolize("") + pendingData[nPending].Metadata.HelpRef = 0 + pendingData[nPending].Metadata.UnitRef = 0 } if sendExemplars { From af928a1ba42fd0e99b4c920142399b4da47c3d1f Mon Sep 17 00:00:00 2001 From: bwplotka Date: Thu, 16 Jan 2025 13:15:56 +0000 Subject: [PATCH 1264/1397] Addressed Krajo's comment. Signed-off-by: bwplotka --- scrape/scrape.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index 1f41f50689..f9178c507b 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1694,13 +1694,13 @@ loop: if sl.cache.getDropped(met) { continue } - ce, ok, seriesAlreadyScraped := sl.cache.get(met) + ce, seriesCached, seriesAlreadyScraped := sl.cache.get(met) var ( ref storage.SeriesRef hash uint64 ) - if ok { + if seriesCached { ref = ce.ref lset = ce.lset hash = ce.hash @@ -1781,7 +1781,7 @@ loop: break loop } - if !ok { + if !seriesCached { if parsedTimestamp == nil || sl.trackTimestampsStaleness { // Bypass staleness logic if there is an explicit timestamp. sl.cache.trackStaleness(hash, lset) @@ -1841,7 +1841,7 @@ loop: if sl.appendMetadataToWAL && lastMeta != nil { // Is it new series OR did metadata change for this family? - if !ok || lastMeta.lastIterChange == sl.cache.iter { + if !seriesCached || lastMeta.lastIterChange == sl.cache.iter { // In majority cases we can trust that the current series/histogram is matching the lastMeta and lastMFName. // However, optional TYPE etc metadata and broken OM text can break this, detect those cases here. // TODO(bwplotka): Consider moving this to parser as many parser users end up doing this (e.g. CT and NHCB parsing). From 3119567d5bd32591bbb7db927cf21a2f79a44a16 Mon Sep 17 00:00:00 2001 From: bwplotka Date: Thu, 16 Jan 2025 19:47:32 +0000 Subject: [PATCH 1265/1397] scrape: Add metadata for automatic metrics. Signed-off-by: bwplotka --- scrape/scrape.go | 125 ++++++++++++++++++++++++++++++++---------- scrape/scrape_test.go | 24 ++++++++ 2 files changed, 121 insertions(+), 28 deletions(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index f9178c507b..cf330fb75d 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1989,17 +1989,80 @@ func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucke } } +// reportSample represents automatically generated timeseries documented in +// https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series +type reportSample struct { + metadata.Metadata + name []byte +} + // The constants are suffixed with the invalid \xff unicode rune to avoid collisions // with scraped metrics in the cache. var ( - scrapeHealthMetricName = []byte("up" + "\xff") - scrapeDurationMetricName = []byte("scrape_duration_seconds" + "\xff") - scrapeSamplesMetricName = []byte("scrape_samples_scraped" + "\xff") - samplesPostRelabelMetricName = []byte("scrape_samples_post_metric_relabeling" + "\xff") - scrapeSeriesAddedMetricName = []byte("scrape_series_added" + "\xff") - scrapeTimeoutMetricName = []byte("scrape_timeout_seconds" + "\xff") - scrapeSampleLimitMetricName = []byte("scrape_sample_limit" + "\xff") - scrapeBodySizeBytesMetricName = []byte("scrape_body_size_bytes" + "\xff") + scrapeHealthMetric = reportSample{ + name: []byte("up" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "Health of the scrape target. 1 means the target is healthy, 0 if the scrape failed.", + Unit: "targets", + }, + } + scrapeDurationMetric = reportSample{ + name: []byte("scrape_duration_seconds" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "Duration of the last scrape in seconds.", + Unit: "seconds", + }, + } + scrapeSamplesMetric = reportSample{ + name: []byte("scrape_samples_scraped" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "Number of samples last scraped.", + Unit: "samples", + }, + } + samplesPostRelabelMetric = reportSample{ + name: []byte("scrape_samples_post_metric_relabeling" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "Number of samples remaining after metric relabeling was applied.", + Unit: "samples", + }, + } + scrapeSeriesAddedMetric = reportSample{ + name: []byte("scrape_series_added" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "Number of series in the last scrape.", + Unit: "series", + }, + } + scrapeTimeoutMetric = reportSample{ + name: []byte("scrape_timeout_seconds" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "The configured scrape timeout for a target.", + Unit: "seconds", + }, + } + scrapeSampleLimitMetric = reportSample{ + name: []byte("scrape_sample_limit" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: "The configured sample limit for a target. Returns zero if there is no limit configured.", + Unit: "samples", + }, + } + scrapeBodySizeBytesMetric = reportSample{ + name: []byte("scrape_body_size_bytes" + "\xff"), + Metadata: metadata.Metadata{ + Type: model.MetricTypeGauge, + Help: " The uncompressed size of the last scrape response, if successful. Scrapes failing because body_size_limit is exceeded report -1, other scrape failures report 0.", + Unit: "bytes", + }, + } ) func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) { @@ -2013,29 +2076,29 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration tim } b := labels.NewBuilderWithSymbolTable(sl.symbolTable) - if err = sl.addReportSample(app, scrapeHealthMetricName, ts, health, b); err != nil { + if err = sl.addReportSample(app, scrapeHealthMetric, ts, health, b); err != nil { return } - if err = sl.addReportSample(app, scrapeDurationMetricName, ts, duration.Seconds(), b); err != nil { + if err = sl.addReportSample(app, scrapeDurationMetric, ts, duration.Seconds(), b); err != nil { return } - if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, float64(scraped), b); err != nil { + if err = sl.addReportSample(app, scrapeSamplesMetric, ts, float64(scraped), b); err != nil { return } - if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, float64(added), b); err != nil { + if err = sl.addReportSample(app, samplesPostRelabelMetric, ts, float64(added), b); err != nil { return } - if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, float64(seriesAdded), b); err != nil { + if err = sl.addReportSample(app, scrapeSeriesAddedMetric, ts, float64(seriesAdded), b); err != nil { return } if sl.reportExtraMetrics { - if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, sl.timeout.Seconds(), b); err != nil { + if err = sl.addReportSample(app, scrapeTimeoutMetric, ts, sl.timeout.Seconds(), b); err != nil { return } - if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, float64(sl.sampleLimit), b); err != nil { + if err = sl.addReportSample(app, scrapeSampleLimitMetric, ts, float64(sl.sampleLimit), b); err != nil { return } - if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, float64(bytes), b); err != nil { + if err = sl.addReportSample(app, scrapeBodySizeBytesMetric, ts, float64(bytes), b); err != nil { return } } @@ -2048,37 +2111,37 @@ func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err er stale := math.Float64frombits(value.StaleNaN) b := labels.NewBuilder(labels.EmptyLabels()) - if err = sl.addReportSample(app, scrapeHealthMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, scrapeHealthMetric, ts, stale, b); err != nil { return } - if err = sl.addReportSample(app, scrapeDurationMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, scrapeDurationMetric, ts, stale, b); err != nil { return } - if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, scrapeSamplesMetric, ts, stale, b); err != nil { return } - if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, samplesPostRelabelMetric, ts, stale, b); err != nil { return } - if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, scrapeSeriesAddedMetric, ts, stale, b); err != nil { return } if sl.reportExtraMetrics { - if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, scrapeTimeoutMetric, ts, stale, b); err != nil { return } - if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, scrapeSampleLimitMetric, ts, stale, b); err != nil { return } - if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, stale, b); err != nil { + if err = sl.addReportSample(app, scrapeBodySizeBytesMetric, ts, stale, b); err != nil { return } } return } -func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v float64, b *labels.Builder) error { - ce, ok, _ := sl.cache.get(s) +func (sl *scrapeLoop) addReportSample(app storage.Appender, s reportSample, t int64, v float64, b *labels.Builder) error { + ce, ok, _ := sl.cache.get(s.name) var ref storage.SeriesRef var lset labels.Labels if ok { @@ -2089,7 +2152,7 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v // with scraped metrics in the cache. // We have to drop it when building the actual metric. b.Reset(labels.EmptyLabels()) - b.Set(labels.MetricName, string(s[:len(s)-1])) + b.Set(labels.MetricName, string(s.name[:len(s.name)-1])) lset = sl.reportSampleMutator(b.Labels()) } @@ -2097,7 +2160,13 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v switch { case err == nil: if !ok { - sl.cache.addRef(s, ref, lset, lset.Hash()) + sl.cache.addRef(s.name, ref, lset, lset.Hash()) + // We only need to add metadata once a scrape target appears. + if sl.appendMetadataToWAL { + if _, merr := app.UpdateMetadata(ref, lset, s.Metadata); merr != nil { + sl.l.Debug("Error when appending metadata in addReportSample", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", s.Metadata), "err", merr) + } + } } return nil case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index c1fca54c6a..2bb9c7247d 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -244,6 +244,30 @@ test_metric2{foo="bar"} 22 }, capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)}) } +type nopScraper struct { + scraper +} + +func (n nopScraper) Report(start time.Time, dur time.Duration, err error) {} + +func TestScrapeReportMetadataUpdate(t *testing.T) { + // Create an appender for adding samples to the storage. + capp := &collectResultAppender{next: nopAppender{}} + sl := newBasicScrapeLoop(t, context.Background(), nopScraper{}, func(ctx context.Context) storage.Appender { return capp }, 0) + now := time.Now() + slApp := sl.appender(context.Background()) + + require.NoError(t, sl.report(slApp, now, 2*time.Second, 1, 1, 1, 512, nil)) + require.NoError(t, slApp.Commit()) + testutil.RequireEqualWithOptions(t, []metadataEntry{ + {metric: labels.FromStrings("__name__", "up"), m: scrapeHealthMetric.Metadata}, + {metric: labels.FromStrings("__name__", "scrape_duration_seconds"), m: scrapeDurationMetric.Metadata}, + {metric: labels.FromStrings("__name__", "scrape_samples_scraped"), m: scrapeSamplesMetric.Metadata}, + {metric: labels.FromStrings("__name__", "scrape_samples_post_metric_relabeling"), m: samplesPostRelabelMetric.Metadata}, + {metric: labels.FromStrings("__name__", "scrape_series_added"), m: scrapeSeriesAddedMetric.Metadata}, + }, capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)}) +} + func TestIsSeriesPartOfFamily(t *testing.T) { t.Run("counter", func(t *testing.T) { require.True(t, isSeriesPartOfFamily("http_requests_total", []byte("http_requests_total"), model.MetricTypeCounter)) // Prometheus text style. From 6f1ad64e272eb434018647f3f3e4e5cf2f716481 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Fri, 17 Jan 2025 09:09:28 +0000 Subject: [PATCH 1266/1397] Update scrape/scrape.go Co-authored-by: Julius Volz Signed-off-by: Bartlomiej Plotka --- scrape/scrape.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index cf330fb75d..85eb07a1cc 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -2059,7 +2059,7 @@ var ( name: []byte("scrape_body_size_bytes" + "\xff"), Metadata: metadata.Metadata{ Type: model.MetricTypeGauge, - Help: " The uncompressed size of the last scrape response, if successful. Scrapes failing because body_size_limit is exceeded report -1, other scrape failures report 0.", + Help: "The uncompressed size of the last scrape response, if successful. Scrapes failing because body_size_limit is exceeded report -1, other scrape failures report 0.", Unit: "bytes", }, } From f46b984dd12fb69b239af0a8385aaf61efd5600b Mon Sep 17 00:00:00 2001 From: Fiona Liao Date: Fri, 17 Jan 2025 16:17:11 +0000 Subject: [PATCH 1267/1397] Add additional incompatible nhcb schemas tests for functions and comparison operators (#15813) promql: Add additional incompatible nhcb schemas tests for functions and comparison operators * Add agg_over_time tests for nhcb with incompatible schemas * Add more function and comparison operator tests --------- Signed-off-by: Fiona Liao --- .../testdata/native_histograms.test | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 414619d5cd..f03b39a9f6 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1163,6 +1163,51 @@ eval_warn range from 0 to 12m step 6m metric{series="2"} - ignoring (series) met clear +# Test incompatible schemas with comparison binary operators +load 6m + metric1 {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric2 {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +eval range from 0 to 6m step 6m metric1 == metric2 +metric1{} _ {{schema:-53 count:1 sum:1 custom_values:[5 10] buckets:[1]}} + +eval range from 0 to 6m step 6m metric1 != metric2 +metric1{} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ + +eval_info range from 0 to 6m step 6m metric2 > metric2 + +clear + +load 6m + nhcb_metric {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +eval_warn instant at 12m sum_over_time(nhcb_metric[13m]) + +eval_warn instant at 12m avg_over_time(nhcb_metric[13m]) + +eval instant at 12m last_over_time(nhcb_metric[13m]) +nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +eval instant at 12m count_over_time(nhcb_metric[13m]) +{} 3 + +eval instant at 12m present_over_time(nhcb_metric[13m]) +{} 1 + +eval instant at 12m changes(nhcb_metric[13m]) +{} 1 + +eval_warn instant at 12m delta(nhcb_metric[13m]) + +eval_warn instant at 12m increase(nhcb_metric[13m]) + +eval_warn instant at 12m rate(nhcb_metric[13m]) + +eval instant at 12m resets(nhcb_metric[13m]) +{} 1 + +clear + load 1m metric{group="just-floats", series="1"} 2 metric{group="just-floats", series="2"} 3 From 2a17b99ee2b9ead6729e3c3aed179792399e4ea0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 16:25:23 +0000 Subject: [PATCH 1268/1397] chore(deps): bump nanoid from 3.3.7 to 3.3.8 in /web/ui/react-app (#15842) Bumps [nanoid](https://github.com/ai/nanoid) from 3.3.7 to 3.3.8. - [Release notes](https://github.com/ai/nanoid/releases) - [Changelog](https://github.com/ai/nanoid/blob/main/CHANGELOG.md) - [Commits](https://github.com/ai/nanoid/compare/3.3.7...3.3.8) --- updated-dependencies: - dependency-name: nanoid dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index ff74defe6e..77283e7817 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -16413,9 +16413,9 @@ } }, "node_modules/nanoid": { - "version": "3.3.7", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", - "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "version": "3.3.8", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz", + "integrity": "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==", "funding": [ { "type": "github", From 6f1669e017583a899675d11fefaca443c88f28cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 16:29:16 +0000 Subject: [PATCH 1269/1397] chore(deps): bump golang.org/x/crypto (#15840) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.30.0 to 0.31.0. - [Commits](https://github.com/golang/crypto/compare/v0.30.0...v0.31.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- documentation/examples/remote_storage/go.mod | 2 +- documentation/examples/remote_storage/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index d2b15dbe52..c1fb2e3564 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -55,7 +55,7 @@ require ( go.opentelemetry.io/otel/trace v1.27.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.30.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/net v0.32.0 // indirect golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/sys v0.28.0 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 860b9a944b..c360e28e76 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -323,8 +323,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= -golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= From a75760c3d2d8384acc1edb017dbbb7f9a65b6366 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 16:34:27 +0000 Subject: [PATCH 1270/1397] chore(deps-dev): bump webpack from 5.91.0 to 5.97.1 in /web/ui/react-app (#15843) Bumps [webpack](https://github.com/webpack/webpack) from 5.91.0 to 5.97.1. - [Release notes](https://github.com/webpack/webpack/releases) - [Commits](https://github.com/webpack/webpack/compare/v5.91.0...v5.97.1) --- updated-dependencies: - dependency-name: webpack dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/react-app/package-lock.json | 258 ++++++++++++++--------------- 1 file changed, 124 insertions(+), 134 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 77283e7817..c17bf60abf 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -5224,9 +5224,9 @@ } }, "node_modules/@types/estree": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", - "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", + "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", "dev": true }, "node_modules/@types/express": { @@ -5866,148 +5866,148 @@ "dev": true }, "node_modules/@webassemblyjs/ast": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.12.1.tgz", - "integrity": "sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.14.1.tgz", + "integrity": "sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==", "dev": true, "dependencies": { - "@webassemblyjs/helper-numbers": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + "@webassemblyjs/helper-numbers": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2" } }, "node_modules/@webassemblyjs/floating-point-hex-parser": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz", - "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz", + "integrity": "sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==", "dev": true }, "node_modules/@webassemblyjs/helper-api-error": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz", - "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz", + "integrity": "sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==", "dev": true }, "node_modules/@webassemblyjs/helper-buffer": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.12.1.tgz", - "integrity": "sha512-nzJwQw99DNDKr9BVCOZcLuJJUlqkJh+kVzVl6Fmq/tI5ZtEyWT1KZMyOXltXLZJmDtvLCDgwsyrkohEtopTXCw==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.14.1.tgz", + "integrity": "sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==", "dev": true }, "node_modules/@webassemblyjs/helper-numbers": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz", - "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz", + "integrity": "sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==", "dev": true, "dependencies": { - "@webassemblyjs/floating-point-hex-parser": "1.11.6", - "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/floating-point-hex-parser": "1.13.2", + "@webassemblyjs/helper-api-error": "1.13.2", "@xtuc/long": "4.2.2" } }, "node_modules/@webassemblyjs/helper-wasm-bytecode": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz", - "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz", + "integrity": "sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==", "dev": true }, "node_modules/@webassemblyjs/helper-wasm-section": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.12.1.tgz", - "integrity": "sha512-Jif4vfB6FJlUlSbgEMHUyk1j234GTNG9dBJ4XJdOySoj518Xj0oGsNi59cUQF4RRMS9ouBUxDDdyBVfPTypa5g==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.14.1.tgz", + "integrity": "sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==", "dev": true, "dependencies": { - "@webassemblyjs/ast": "1.12.1", - "@webassemblyjs/helper-buffer": "1.12.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/wasm-gen": "1.12.1" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/wasm-gen": "1.14.1" } }, "node_modules/@webassemblyjs/ieee754": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz", - "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.13.2.tgz", + "integrity": "sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==", "dev": true, "dependencies": { "@xtuc/ieee754": "^1.2.0" } }, "node_modules/@webassemblyjs/leb128": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz", - "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.13.2.tgz", + "integrity": "sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==", "dev": true, "dependencies": { "@xtuc/long": "4.2.2" } }, "node_modules/@webassemblyjs/utf8": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz", - "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.13.2.tgz", + "integrity": "sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==", "dev": true }, "node_modules/@webassemblyjs/wasm-edit": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.12.1.tgz", - "integrity": "sha512-1DuwbVvADvS5mGnXbE+c9NfA8QRcZ6iKquqjjmR10k6o+zzsRVesil54DKexiowcFCPdr/Q0qaMgB01+SQ1u6g==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.14.1.tgz", + "integrity": "sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==", "dev": true, "dependencies": { - "@webassemblyjs/ast": "1.12.1", - "@webassemblyjs/helper-buffer": "1.12.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/helper-wasm-section": "1.12.1", - "@webassemblyjs/wasm-gen": "1.12.1", - "@webassemblyjs/wasm-opt": "1.12.1", - "@webassemblyjs/wasm-parser": "1.12.1", - "@webassemblyjs/wast-printer": "1.12.1" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/helper-wasm-section": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-opt": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1", + "@webassemblyjs/wast-printer": "1.14.1" } }, "node_modules/@webassemblyjs/wasm-gen": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.12.1.tgz", - "integrity": "sha512-TDq4Ojh9fcohAw6OIMXqiIcTq5KUXTGRkVxbSo1hQnSy6lAM5GSdfwWeSxpAo0YzgsgF182E/U0mDNhuA0tW7w==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.14.1.tgz", + "integrity": "sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==", "dev": true, "dependencies": { - "@webassemblyjs/ast": "1.12.1", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/ieee754": "1.11.6", - "@webassemblyjs/leb128": "1.11.6", - "@webassemblyjs/utf8": "1.11.6" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" } }, "node_modules/@webassemblyjs/wasm-opt": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.12.1.tgz", - "integrity": "sha512-Jg99j/2gG2iaz3hijw857AVYekZe2SAskcqlWIZXjji5WStnOpVoat3gQfT/Q5tb2djnCjBtMocY/Su1GfxPBg==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.14.1.tgz", + "integrity": "sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==", "dev": true, "dependencies": { - "@webassemblyjs/ast": "1.12.1", - "@webassemblyjs/helper-buffer": "1.12.1", - "@webassemblyjs/wasm-gen": "1.12.1", - "@webassemblyjs/wasm-parser": "1.12.1" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1" } }, "node_modules/@webassemblyjs/wasm-parser": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.12.1.tgz", - "integrity": "sha512-xikIi7c2FHXysxXe3COrVUPSheuBtpcfhbpFj4gmu7KRLYOzANztwUU0IbsqvMqzuNK2+glRGWCEqZo1WCLyAQ==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.14.1.tgz", + "integrity": "sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==", "dev": true, "dependencies": { - "@webassemblyjs/ast": "1.12.1", - "@webassemblyjs/helper-api-error": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/ieee754": "1.11.6", - "@webassemblyjs/leb128": "1.11.6", - "@webassemblyjs/utf8": "1.11.6" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-api-error": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" } }, "node_modules/@webassemblyjs/wast-printer": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.12.1.tgz", - "integrity": "sha512-+X4WAlOisVWQMikjbcvY2e0rwPsKQ9F688lksZhBcPycBBuii3O7m8FACbDMWDojpAqvjIncrG8J0XHKyQfVeA==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.14.1.tgz", + "integrity": "sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==", "dev": true, "dependencies": { - "@webassemblyjs/ast": "1.12.1", + "@webassemblyjs/ast": "1.14.1", "@xtuc/long": "4.2.2" } }, @@ -6084,9 +6084,9 @@ } }, "node_modules/acorn": { - "version": "8.11.3", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", - "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "version": "8.14.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz", + "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -6117,15 +6117,6 @@ "node": ">=0.4.0" } }, - "node_modules/acorn-import-assertions": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz", - "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", - "dev": true, - "peerDependencies": { - "acorn": "^8" - } - }, "node_modules/acorn-jsx": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", @@ -7115,9 +7106,9 @@ "dev": true }, "node_modules/browserslist": { - "version": "4.23.0", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.0.tgz", - "integrity": "sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ==", + "version": "4.24.4", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.4.tgz", + "integrity": "sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A==", "dev": true, "funding": [ { @@ -7134,10 +7125,10 @@ } ], "dependencies": { - "caniuse-lite": "^1.0.30001587", - "electron-to-chromium": "^1.4.668", - "node-releases": "^2.0.14", - "update-browserslist-db": "^1.0.13" + "caniuse-lite": "^1.0.30001688", + "electron-to-chromium": "^1.5.73", + "node-releases": "^2.0.19", + "update-browserslist-db": "^1.1.1" }, "bin": { "browserslist": "cli.js" @@ -7265,9 +7256,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001610", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001610.tgz", - "integrity": "sha512-QFutAY4NgaelojVMjY63o6XlZyORPaLfyMnsl3HgnWdJUcX6K0oaJymHjH8PT5Gk7sTm8rvC/c5COUQKXqmOMA==", + "version": "1.0.30001692", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001692.tgz", + "integrity": "sha512-A95VKan0kdtrsnMubMKxEKUKImOPSuCpYgxSQBo036P5YYgVIcOYJEgt/txJWqObiRQeISNCfef9nvlQ0vbV7A==", "dev": true, "funding": [ { @@ -8906,9 +8897,9 @@ } }, "node_modules/electron-to-chromium": { - "version": "1.4.736", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.736.tgz", - "integrity": "sha512-Rer6wc3ynLelKNM4lOCg7/zPQj8tPOCB2hzD32PX9wd3hgRRi9MxEbmkFCokzcEhRVMiOVLjnL9ig9cefJ+6+Q==", + "version": "1.5.83", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.83.tgz", + "integrity": "sha512-LcUDPqSt+V0QmI47XLzZrz5OqILSMGsPFkDYus22rIbgorSvBYEFqq854ltTmUdHkY92FSdAAvsh4jWEULMdfQ==", "dev": true }, "node_modules/emittery": { @@ -8949,9 +8940,9 @@ } }, "node_modules/enhanced-resolve": { - "version": "5.16.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.16.0.tgz", - "integrity": "sha512-O+QWCviPNSSLAD9Ucn8Awv+poAkqn3T1XY5/N7kR7rQO9yfSGWkYZDwpJ+iKF7B8rxaQKWngSqACpgzeapSyoA==", + "version": "5.18.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.0.tgz", + "integrity": "sha512-0/r0MySGYG8YqlayBZ6MuCfECmHFdJ5qyPh8s8wa5Hnm6SaFLSK1VYCbj+NKp090Nm1caZhD+QTnmxO7esYGyQ==", "dev": true, "dependencies": { "graceful-fs": "^4.2.4", @@ -9228,9 +9219,9 @@ } }, "node_modules/escalade": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", - "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", "dev": true, "engines": { "node": ">=6" @@ -16555,9 +16546,9 @@ "dev": true }, "node_modules/node-releases": { - "version": "2.0.14", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", - "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==", + "version": "2.0.19", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz", + "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==", "dev": true }, "node_modules/normalize-path": { @@ -17096,9 +17087,9 @@ "dev": true }, "node_modules/picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==" }, "node_modules/picomatch": { "version": "2.3.1", @@ -23165,9 +23156,9 @@ } }, "node_modules/update-browserslist-db": { - "version": "1.0.13", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", - "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.2.tgz", + "integrity": "sha512-PPypAm5qvlD7XMZC3BujecnaOxwhrtoFR+Dqkk5Aa/6DssiH0ibKoketaj9w8LP7Bont1rYeoV5plxD7RTEPRg==", "dev": true, "funding": [ { @@ -23184,8 +23175,8 @@ } ], "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" + "escalade": "^3.2.0", + "picocolors": "^1.1.1" }, "bin": { "update-browserslist-db": "cli.js" @@ -23360,21 +23351,20 @@ "dev": true }, "node_modules/webpack": { - "version": "5.91.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.91.0.tgz", - "integrity": "sha512-rzVwlLeBWHJbmgTC/8TvAcu5vpJNII+MelQpylD4jNERPwpBJOE2lEcko1zJX3QJeLjTTAnQxn/OJ8bjDzVQaw==", + "version": "5.97.1", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.97.1.tgz", + "integrity": "sha512-EksG6gFY3L1eFMROS/7Wzgrii5mBAFe4rIr3r2BTfo7bcc+DWwFZ4OJ/miOuHJO/A85HwyI4eQ0F6IKXesO7Fg==", "dev": true, "dependencies": { - "@types/eslint-scope": "^3.7.3", - "@types/estree": "^1.0.5", - "@webassemblyjs/ast": "^1.12.1", - "@webassemblyjs/wasm-edit": "^1.12.1", - "@webassemblyjs/wasm-parser": "^1.12.1", - "acorn": "^8.7.1", - "acorn-import-assertions": "^1.9.0", - "browserslist": "^4.21.10", + "@types/eslint-scope": "^3.7.7", + "@types/estree": "^1.0.6", + "@webassemblyjs/ast": "^1.14.1", + "@webassemblyjs/wasm-edit": "^1.14.1", + "@webassemblyjs/wasm-parser": "^1.14.1", + "acorn": "^8.14.0", + "browserslist": "^4.24.0", "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^5.16.0", + "enhanced-resolve": "^5.17.1", "es-module-lexer": "^1.2.1", "eslint-scope": "5.1.1", "events": "^3.2.0", From 3a5bf37d4bfcb8f19f6b22e0ea89959300f7ac6c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 16:51:12 +0000 Subject: [PATCH 1271/1397] chore(deps): bump github.com/envoyproxy/go-control-plane (#15773) Bumps [github.com/envoyproxy/go-control-plane](https://github.com/envoyproxy/go-control-plane) from 0.13.1 to 0.13.2. - [Release notes](https://github.com/envoyproxy/go-control-plane/releases) - [Changelog](https://github.com/envoyproxy/go-control-plane/blob/main/CHANGELOG.md) - [Commits](https://github.com/envoyproxy/go-control-plane/compare/v0.13.1...v0.13.2) --- updated-dependencies: - dependency-name: github.com/envoyproxy/go-control-plane dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: Arve Knudsen Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 98075a56fa..5065092f2f 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/digitalocean/godo v1.132.0 github.com/docker/docker v27.4.1+incompatible github.com/edsrzf/mmap-go v1.2.0 - github.com/envoyproxy/go-control-plane v0.13.1 + github.com/envoyproxy/go-control-plane/envoy v1.32.2 github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/fsnotify/fsnotify v1.8.0 diff --git a/go.sum b/go.sum index 009b714060..8dafac6b17 100644 --- a/go.sum +++ b/go.sum @@ -110,8 +110,8 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= -github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= +github.com/envoyproxy/go-control-plane/envoy v1.32.2 h1:zidqwmijfcbyKqVxjQDFx042PgX+p9U+/fu/f9VtSk8= +github.com/envoyproxy/go-control-plane/envoy v1.32.2/go.mod h1:eR2SOX2IedqlPvmiKjUH7Wu//S602JKI7HPC/L3SRq8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= From 9e14669c49a05cc1c814c9a9535d0c451f88e963 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 16:57:29 +0000 Subject: [PATCH 1272/1397] chore(deps): bump golang.org/x/net (#15841) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.32.0 to 0.33.0. - [Commits](https://github.com/golang/net/compare/v0.32.0...v0.33.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- documentation/examples/remote_storage/go.mod | 2 +- documentation/examples/remote_storage/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index c1fb2e3564..643fbc901c 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -56,7 +56,7 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.31.0 // indirect - golang.org/x/net v0.32.0 // indirect + golang.org/x/net v0.33.0 // indirect golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index c360e28e76..23ac21c12c 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -344,8 +344,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= -golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= From 5bf7c3718bd08a0b3598badd237b5b91949fc1ae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 16:59:06 +0000 Subject: [PATCH 1273/1397] chore(deps): bump github.com/hashicorp/consul/api from 1.30.0 to 1.31.0 (#15766) Bumps [github.com/hashicorp/consul/api](https://github.com/hashicorp/consul) from 1.30.0 to 1.31.0. - [Release notes](https://github.com/hashicorp/consul/releases) - [Changelog](https://github.com/hashicorp/consul/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/consul/compare/api/v1.30.0...api/v1.31.0) --- updated-dependencies: - dependency-name: github.com/hashicorp/consul/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5065092f2f..3eaca8f35d 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/gophercloud/gophercloud v1.14.1 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.30.0 + github.com/hashicorp/consul/api v1.31.0 github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec github.com/hetznercloud/hcloud-go/v2 v2.17.1 github.com/ionos-cloud/sdk-go/v6 v6.3.0 diff --git a/go.sum b/go.sum index 8dafac6b17..d26f4079c5 100644 --- a/go.sum +++ b/go.sum @@ -227,8 +227,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= -github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ= -github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM= +github.com/hashicorp/consul/api v1.31.0 h1:32BUNLembeSRek0G/ZAM6WNfdEwYdYo8oQ4+JoqGkNQ= +github.com/hashicorp/consul/api v1.31.0/go.mod h1:2ZGIiXM3A610NmDULmCHd/aqBJj8CkMfOhswhOafxRg= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= From f405c78e4abebda5e7a462754f9d0abc4b5e222d Mon Sep 17 00:00:00 2001 From: Owen Williams Date: Fri, 17 Jan 2025 12:00:30 -0500 Subject: [PATCH 1274/1397] docs: clarify PromQL interval changes (#15824) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit docs: Clarify PromQL interval changes --------- Signed-off-by: Owen Williams Signed-off-by: Owen Williams Co-authored-by: Björn Rabenstein --- docs/migration.md | 87 +++++++++++++++++++++++++++++++---------------- 1 file changed, 57 insertions(+), 30 deletions(-) diff --git a/docs/migration.md b/docs/migration.md index e2d53472f3..34dae93e85 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -60,36 +60,63 @@ This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0 ## PromQL -- The `.` pattern in regular expressions in PromQL matches newline characters. - With this change a regular expressions like `.*` matches strings that include - `\n`. This applies to matchers in queries and relabel configs. - - For example, the following regular expressions now match the accompanying - strings, whereas in Prometheus v2 these combinations didn't match. - - `.*` additionally matches `foo\n` and `Foo\nBar` - - `foo.?bar` additionally matches `foo\nbar` - - `foo.+bar` additionally matches `foo\nbar` - - If you want Prometheus v3 to behave like v2, you will have to change your - regular expressions by replacing all `.` patterns with `[^\n]`, e.g. - `foo[^\n]*`. -- Lookback and range selectors are left open and right closed (previously left - closed and right closed). This change affects queries when the evaluation time - perfectly aligns with the sample timestamps. For example assume querying a - timeseries with evenly spaced samples exactly 1 minute apart. Before Prometheus - v3, a range query with `5m` would usually return 5 samples. But if the query - evaluation aligns perfectly with a scrape, it would return 6 samples. In - Prometheus v3 queries like this will always return 5 samples. - This change has likely few effects for everyday use, except for some subquery - use cases. - Query front-ends that align queries usually align subqueries to multiples of - the step size. These subqueries will likely be affected. - Tests are more likely to affected. To fix those either adjust the expected - number of samples or extend the range by less than one sample interval. -- The `holt_winters` function has been renamed to `double_exponential_smoothing` - and is now guarded by the `promql-experimental-functions` feature flag. - If you want to keep using `holt_winters`, you have to do both of these things: - - Rename `holt_winters` to `double_exponential_smoothing` in your queries. - - Pass `--enable-feature=promql-experimental-functions` in your Prometheus - CLI invocation. +### Regular expressions match newlines + +The `.` pattern in regular expressions in PromQL matches newline characters. +With this change a regular expressions like `.*` matches strings that include +`\n`. This applies to matchers in queries and relabel configs. + +For example, the following regular expressions now match the accompanying +strings, whereas in Prometheus v2 these combinations didn't match. + - `.*` additionally matches `foo\n` and `Foo\nBar` + - `foo.?bar` additionally matches `foo\nbar` + - `foo.+bar` additionally matches `foo\nbar` + +If you want Prometheus v3 to behave like v2, you will have to change your +regular expressions by replacing all `.` patterns with `[^\n]`, e.g. +`foo[^\n]*`. + +### Range selectors and lookback exclude samples coinciding with the left boundary + +Lookback and range selectors are now left-open and right-closed (previously +left-closed and right-closed), which makes their behavior more consistent. This +change affects queries where the left boundary of a range or the lookback delta +coincides with the timestamp of one or more samples. + +For example, assume we are querying a timeseries with evenly spaced samples +exactly 1 minute apart. Before Prometheus v3, a range query with `5m` would +usually return 5 samples. But if the query evaluation aligns perfectly with a +scrape, it would return 6 samples. In Prometheus v3 queries like this will +always return 5 samples given even spacing. + +This change will typically affect subqueries because their evaluation timing is +naturally perfectly evenly spaced and aligned with timestamps that are multiples +of the subquery resolution. Furthermore, query frontends often align subqueries +to multiples of the step size. In combination, this easily creates a situation +of perfect mutual alignment, often unintended and unknown by the user, so that +the new behavior might come as a surprise. Before Prometheus V3, a subquery of +`foo[1m:1m]` on such a system might have always returned two points, allowing +for rate calculations. In Prometheus V3, however, such a subquery will only +return one point, which is insufficient for a rate or increase calculation, +resulting in No Data returned. + +Such queries will need to be rewritten to extend the window to properly cover +more than one point. In this example, `foo[2m:1m]` would always return two +points no matter the query alignment. The exact form of the rewritten query may +depend on the intended results and there is no universal drop-in replacement for +queries whose behavior has changed. + +Tests are similarly more likely to affected. To fix those either adjust the +expected number of samples or extend the range. + +### holt_winters function renamed + +The `holt_winters` function has been renamed to `double_exponential_smoothing` +and is now guarded by the `promql-experimental-functions` feature flag. +If you want to keep using `holt_winters`, you have to do both of these things: + - Rename `holt_winters` to `double_exponential_smoothing` in your queries. + - Pass `--enable-feature=promql-experimental-functions` in your Prometheus + CLI invocation. ## Scrape protocols Prometheus v3 is more strict concerning the Content-Type header received when From c47465c0c529ee8c2f6aac979ebd0b4f55b1a891 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 17:24:26 +0000 Subject: [PATCH 1275/1397] chore(deps): bump nanoid from 3.3.7 to 3.3.8 in /web/ui (#15844) Bumps [nanoid](https://github.com/ai/nanoid) from 3.3.7 to 3.3.8. - [Release notes](https://github.com/ai/nanoid/releases) - [Changelog](https://github.com/ai/nanoid/blob/main/CHANGELOG.md) - [Commits](https://github.com/ai/nanoid/compare/3.3.7...3.3.8) --- updated-dependencies: - dependency-name: nanoid dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/package-lock.json | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index f9beb96da0..07b35914d3 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -7080,16 +7080,15 @@ "license": "MIT" }, "node_modules/nanoid": { - "version": "3.3.7", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", - "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "version": "3.3.8", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz", + "integrity": "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/ai" } ], - "license": "MIT", "bin": { "nanoid": "bin/nanoid.cjs" }, From 5e2b75ee5e8da0029362faa4bead50f1fdd05a24 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 18:16:30 +0000 Subject: [PATCH 1276/1397] chore(deps-dev): bump @typescript-eslint/parser from 6.21.0 to 8.20.0 in /web/ui (#15821) * chore(deps-dev): bump @typescript-eslint/parser in /web/ui Bumps [@typescript-eslint/parser](https://github.com/typescript-eslint/typescript-eslint/tree/HEAD/packages/parser) from 6.21.0 to 8.20.0. - [Release notes](https://github.com/typescript-eslint/typescript-eslint/releases) - [Changelog](https://github.com/typescript-eslint/typescript-eslint/blob/main/packages/parser/CHANGELOG.md) - [Commits](https://github.com/typescript-eslint/typescript-eslint/commits/v8.20.0/packages/parser) --- updated-dependencies: - dependency-name: "@typescript-eslint/parser" dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: Arve Knudsen * Fix lint errors/warnings Signed-off-by: Arve Knudsen --------- Signed-off-by: dependabot[bot] Signed-off-by: Arve Knudsen Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Arve Knudsen --- web/ui/mantine-ui/package.json | 4 +- .../src/components/EndpointLink.tsx | 2 +- .../src/components/ReadinessWrapper.tsx | 2 +- web/ui/mantine-ui/src/pages/query/Graph.tsx | 4 +- .../mantine-ui/src/pages/query/TableTab.tsx | 4 +- .../mantine-ui/src/pages/query/TreeNode.tsx | 16 +- .../src/pages/query/uPlotStackHelpers.ts | 7 +- .../ServiceDiscoveryPage.tsx | 4 +- .../src/pages/targets/TargetsPage.tsx | 4 +- web/ui/package-lock.json | 306 +++++++----------- web/ui/package.json | 4 +- .../src/pages/graph/DataTable.test.tsx | 2 +- 12 files changed, 141 insertions(+), 218 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 9cdb67aea5..9d4afdfae5 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -56,8 +56,8 @@ "@eslint/js": "^9.17.0", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", - "@typescript-eslint/eslint-plugin": "^6.21.0", - "@typescript-eslint/parser": "^6.21.0", + "@typescript-eslint/eslint-plugin": "^8.20.0", + "@typescript-eslint/parser": "^8.20.0", "@vitejs/plugin-react": "^4.3.4", "eslint": "^9.18.0", "eslint-plugin-react-hooks": "^5.1.0", diff --git a/web/ui/mantine-ui/src/components/EndpointLink.tsx b/web/ui/mantine-ui/src/components/EndpointLink.tsx index c9b6a8989c..7e56a71abe 100644 --- a/web/ui/mantine-ui/src/components/EndpointLink.tsx +++ b/web/ui/mantine-ui/src/components/EndpointLink.tsx @@ -12,7 +12,7 @@ const EndpointLink: FC = ({ endpoint, globalUrl }) => { let invalidURL = false; try { url = new URL(endpoint); - } catch (err: unknown) { + } catch (_: unknown) { // In cases of IPv6 addresses with a Zone ID, URL may not be parseable. // See https://github.com/prometheus/prometheus/issues/9760 // In this case, we attempt to prepare a synthetic URL with the diff --git a/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx b/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx index 2e471de5e3..3be002e258 100644 --- a/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx +++ b/web/ui/mantine-ui/src/components/ReadinessWrapper.tsx @@ -48,7 +48,7 @@ const ReadinessLoader: FC = () => { default: throw new Error(res.statusText); } - } catch (error) { + } catch (_) { throw new Error("Unexpected error while fetching ready status"); } }, diff --git a/web/ui/mantine-ui/src/pages/query/Graph.tsx b/web/ui/mantine-ui/src/pages/query/Graph.tsx index c25d5a98eb..b461c4ff06 100644 --- a/web/ui/mantine-ui/src/pages/query/Graph.tsx +++ b/web/ui/mantine-ui/src/pages/query/Graph.tsx @@ -102,7 +102,9 @@ const Graph: FC = ({ // Re-execute the query when the user presses Enter (or hits the Execute button). useEffect(() => { - effectiveExpr !== "" && refetch(); + if (effectiveExpr !== "") { + refetch(); + } }, [retriggerIdx, refetch, effectiveExpr, endTime, range, resolution]); // The useElementSize hook above only gets a valid size on the second render, so this diff --git a/web/ui/mantine-ui/src/pages/query/TableTab.tsx b/web/ui/mantine-ui/src/pages/query/TableTab.tsx index cab93e229b..5e2f804243 100644 --- a/web/ui/mantine-ui/src/pages/query/TableTab.tsx +++ b/web/ui/mantine-ui/src/pages/query/TableTab.tsx @@ -40,7 +40,9 @@ const TableTab: FC = ({ panelIdx, retriggerIdx, expr }) => { }); useEffect(() => { - expr !== "" && refetch(); + if (expr !== "") { + refetch(); + } }, [retriggerIdx, refetch, expr, endTime]); useLayoutEffect(() => { diff --git a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx index 515e9baadf..25ffdce650 100644 --- a/web/ui/mantine-ui/src/pages/query/TreeNode.tsx +++ b/web/ui/mantine-ui/src/pages/query/TreeNode.tsx @@ -150,20 +150,20 @@ const TreeNode: FC<{ }); useEffect(() => { - if (mergedChildState === "error") { - reportNodeState && reportNodeState(childIdx, "error"); + if (mergedChildState === "error" && reportNodeState) { + reportNodeState(childIdx, "error"); } }, [mergedChildState, reportNodeState, childIdx]); useEffect(() => { - if (error) { - reportNodeState && reportNodeState(childIdx, "error"); + if (error && reportNodeState) { + reportNodeState(childIdx, "error"); } }, [error, reportNodeState, childIdx]); useEffect(() => { - if (isFetching) { - reportNodeState && reportNodeState(childIdx, "running"); + if (isFetching && reportNodeState) { + reportNodeState(childIdx, "running"); } }, [isFetching, reportNodeState, childIdx]); @@ -219,7 +219,9 @@ const TreeNode: FC<{ return; } - reportNodeState && reportNodeState(childIdx, "success"); + if (reportNodeState) { + reportNodeState(childIdx, "success"); + } let resultSeries = 0; const labelValuesByName: Record> = {}; diff --git a/web/ui/mantine-ui/src/pages/query/uPlotStackHelpers.ts b/web/ui/mantine-ui/src/pages/query/uPlotStackHelpers.ts index ac5d9ce688..ba9379bf69 100644 --- a/web/ui/mantine-ui/src/pages/query/uPlotStackHelpers.ts +++ b/web/ui/mantine-ui/src/pages/query/uPlotStackHelpers.ts @@ -24,10 +24,11 @@ function stack( } for (let i = 1; i < data.length; i++) { - !omit(i) && + if (!omit(i)) { bands.push({ series: [data.findIndex((_s, j) => j > i && !omit(j)), i], }); + } } bands = bands.filter((b) => b.series[1] > -1); @@ -65,7 +66,9 @@ export function setStackedOpts(opts: uPlot.Options, data: uPlot.AlignedData) { if (show) { const pts: number[] = []; data[seriesIdx].forEach((v, i) => { - v != null && pts.push(i); + if (v != null) { + pts.push(i); + } }); return pts; } diff --git a/web/ui/mantine-ui/src/pages/service-discovery/ServiceDiscoveryPage.tsx b/web/ui/mantine-ui/src/pages/service-discovery/ServiceDiscoveryPage.tsx index c0dbd1ca11..4bef1c8d92 100644 --- a/web/ui/mantine-ui/src/pages/service-discovery/ServiceDiscoveryPage.tsx +++ b/web/ui/mantine-ui/src/pages/service-discovery/ServiceDiscoveryPage.tsx @@ -84,7 +84,9 @@ export default function ServiceDiscoveryPage() { value={(limited && scrapePools[0]) || scrapePool || null} onChange={(value) => { setScrapePool(value); - showLimitAlert && dispatch(setShowLimitAlert(false)); + if (showLimitAlert) { + dispatch(setShowLimitAlert(false)); + } }} searchable /> diff --git a/web/ui/mantine-ui/src/pages/targets/TargetsPage.tsx b/web/ui/mantine-ui/src/pages/targets/TargetsPage.tsx index 60dae60541..399d1a458d 100644 --- a/web/ui/mantine-ui/src/pages/targets/TargetsPage.tsx +++ b/web/ui/mantine-ui/src/pages/targets/TargetsPage.tsx @@ -83,7 +83,9 @@ export default function TargetsPage() { value={(limited && scrapePools[0]) || scrapePool || null} onChange={(value) => { setScrapePool(value); - showLimitAlert && dispatch(setShowLimitAlert(false)); + if (showLimitAlert) { + dispatch(setShowLimitAlert(false)); + } }} searchable /> diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 07b35914d3..569866873d 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -13,8 +13,8 @@ ], "devDependencies": { "@types/jest": "^29.5.14", - "@typescript-eslint/eslint-plugin": "^6.21.0", - "@typescript-eslint/parser": "^6.21.0", + "@typescript-eslint/eslint-plugin": "^8.20.0", + "@typescript-eslint/parser": "^8.20.0", "eslint-config-prettier": "^9.1.0", "prettier": "^3.4.2", "ts-jest": "^29.2.2", @@ -70,8 +70,8 @@ "@eslint/js": "^9.17.0", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", - "@typescript-eslint/eslint-plugin": "^6.21.0", - "@typescript-eslint/parser": "^6.21.0", + "@typescript-eslint/eslint-plugin": "^8.20.0", + "@typescript-eslint/parser": "^8.20.0", "@vitejs/plugin-react": "^4.3.4", "eslint": "^9.18.0", "eslint-plugin-react-hooks": "^5.1.0", @@ -2960,12 +2960,6 @@ "htmlparser2": "^8.0.0" } }, - "node_modules/@types/semver": { - "version": "7.5.8", - "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.8.tgz", - "integrity": "sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==", - "dev": true - }, "node_modules/@types/stack-utils": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", @@ -2997,148 +2991,109 @@ "license": "MIT" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz", - "integrity": "sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==", + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.20.0.tgz", + "integrity": "sha512-naduuphVw5StFfqp4Gq4WhIBE2gN1GEmMUExpJYknZJdRnc+2gDzB8Z3+5+/Kv33hPQRDGzQO/0opHE72lZZ6A==", "dev": true, "license": "MIT", "dependencies": { - "@eslint-community/regexpp": "^4.5.1", - "@typescript-eslint/scope-manager": "6.21.0", - "@typescript-eslint/type-utils": "6.21.0", - "@typescript-eslint/utils": "6.21.0", - "@typescript-eslint/visitor-keys": "6.21.0", - "debug": "^4.3.4", + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "8.20.0", + "@typescript-eslint/type-utils": "8.20.0", + "@typescript-eslint/utils": "8.20.0", + "@typescript-eslint/visitor-keys": "8.20.0", "graphemer": "^1.4.0", - "ignore": "^5.2.4", + "ignore": "^5.3.1", "natural-compare": "^1.4.0", - "semver": "^7.5.4", - "ts-api-utils": "^1.0.1" + "ts-api-utils": "^2.0.0" }, "engines": { - "node": "^16.0.0 || >=18.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha", - "eslint": "^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/type-utils": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", - "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", - "dev": true, - "dependencies": { - "@typescript-eslint/typescript-estree": "6.21.0", - "@typescript-eslint/utils": "6.21.0", - "debug": "^4.3.4", - "ts-api-utils": "^1.0.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/utils": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", - "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", - "dev": true, - "dependencies": { - "@eslint-community/eslint-utils": "^4.4.0", - "@types/json-schema": "^7.0.12", - "@types/semver": "^7.5.0", - "@typescript-eslint/scope-manager": "6.21.0", - "@typescript-eslint/types": "6.21.0", - "@typescript-eslint/typescript-estree": "6.21.0", - "semver": "^7.5.4" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" + "@typescript-eslint/parser": "^8.0.0 || ^8.0.0-alpha.0", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.8.0" } }, "node_modules/@typescript-eslint/parser": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.21.0.tgz", - "integrity": "sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==", + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.20.0.tgz", + "integrity": "sha512-gKXG7A5HMyjDIedBi6bUrDcun8GIjnI8qOwVLiY3rx6T/sHP/19XLJOnIq/FgQvWLHja5JN/LSE7eklNBr612g==", "dev": true, - "license": "BSD-2-Clause", "dependencies": { - "@typescript-eslint/scope-manager": "6.21.0", - "@typescript-eslint/types": "6.21.0", - "@typescript-eslint/typescript-estree": "6.21.0", - "@typescript-eslint/visitor-keys": "6.21.0", + "@typescript-eslint/scope-manager": "8.20.0", + "@typescript-eslint/types": "8.20.0", + "@typescript-eslint/typescript-estree": "8.20.0", + "@typescript-eslint/visitor-keys": "8.20.0", "debug": "^4.3.4" }, "engines": { - "node": "^16.0.0 || >=18.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.8.0" } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz", - "integrity": "sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==", + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.20.0.tgz", + "integrity": "sha512-J7+VkpeGzhOt3FeG1+SzhiMj9NzGD/M6KoGn9f4dbz3YzK9hvbhVTmLj/HiTp9DazIzJ8B4XcM80LrR9Dm1rJw==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "6.21.0", - "@typescript-eslint/visitor-keys": "6.21.0" + "@typescript-eslint/types": "8.20.0", + "@typescript-eslint/visitor-keys": "8.20.0" }, "engines": { - "node": "^16.0.0 || >=18.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" } }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.20.0.tgz", + "integrity": "sha512-bPC+j71GGvA7rVNAHAtOjbVXbLN5PkwqMvy1cwGeaxUoRQXVuKCebRoLzm+IPW/NtFFpstn1ummSIasD5t60GA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/typescript-estree": "8.20.0", + "@typescript-eslint/utils": "8.20.0", + "debug": "^4.3.4", + "ts-api-utils": "^2.0.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.8.0" + } + }, "node_modules/@typescript-eslint/types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", - "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.20.0.tgz", + "integrity": "sha512-cqaMiY72CkP+2xZRrFt3ExRBu0WmVitN/rYPZErA80mHjHx/Svgp8yfbzkJmDoQ/whcytOPO9/IZXnOc+wigRA==", "dev": true, "license": "MIT", "engines": { - "node": "^16.0.0 || >=18.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "type": "opencollective", @@ -3146,32 +3101,30 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", - "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.20.0.tgz", + "integrity": "sha512-Y7ncuy78bJqHI35NwzWol8E0X7XkRVS4K4P4TCyzWkOJih5NDvtoRDW4Ba9YJJoB2igm9yXDdYI/+fkiiAxPzA==", "dev": true, - "license": "BSD-2-Clause", + "license": "MIT", "dependencies": { - "@typescript-eslint/types": "6.21.0", - "@typescript-eslint/visitor-keys": "6.21.0", + "@typescript-eslint/types": "8.20.0", + "@typescript-eslint/visitor-keys": "8.20.0", "debug": "^4.3.4", - "globby": "^11.1.0", + "fast-glob": "^3.3.2", "is-glob": "^4.0.3", - "minimatch": "9.0.3", - "semver": "^7.5.4", - "ts-api-utils": "^1.0.1" + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^2.0.0" }, "engines": { - "node": "^16.0.0 || >=18.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "peerDependencies": { + "typescript": ">=4.8.4 <5.8.0" } }, "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { @@ -3185,9 +3138,9 @@ } }, "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { - "version": "9.0.3", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", - "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", "dev": true, "license": "ISC", "dependencies": { @@ -3200,35 +3153,46 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/@typescript-eslint/visitor-keys": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz", - "integrity": "sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==", + "node_modules/@typescript-eslint/utils": { + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.20.0.tgz", + "integrity": "sha512-dq70RUw6UK9ei7vxc4KQtBRk7qkHZv447OUZ6RPQMQl71I3NZxQJX/f32Smr+iqWrB02pHKn2yAdHBb0KNrRMA==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "6.21.0", - "eslint-visitor-keys": "^3.4.1" + "@eslint-community/eslint-utils": "^4.4.0", + "@typescript-eslint/scope-manager": "8.20.0", + "@typescript-eslint/types": "8.20.0", + "@typescript-eslint/typescript-estree": "8.20.0" }, "engines": { - "node": "^16.0.0 || >=18.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <5.8.0" } }, - "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", - "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.20.0.tgz", + "integrity": "sha512-v/BpkeeYAsPkKCkR8BDwcno0llhzWVqPOamQrAEMdpZav2Y9OVjd9dwJyBLJWwf335B5DmlifECIkZRJCaGaHA==", "dev": true, - "license": "Apache-2.0", + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.20.0", + "eslint-visitor-keys": "^4.2.0" + }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { - "url": "https://opencollective.com/eslint" + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" } }, "node_modules/@uiw/codemirror-extensions-basic-setup": { @@ -3534,16 +3498,6 @@ "dequal": "^2.0.3" } }, - "node_modules/array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/assertion-error": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", @@ -4261,19 +4215,6 @@ "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "license": "MIT", - "dependencies": { - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/doctrine": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", @@ -5259,27 +5200,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", - "dev": true, - "license": "MIT", - "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/graceful-fs": { "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", @@ -7398,16 +7318,6 @@ "dev": true, "license": "MIT" }, - "node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/pathe": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", @@ -8770,16 +8680,16 @@ } }, "node_modules/ts-api-utils": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.3.0.tgz", - "integrity": "sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.0.0.tgz", + "integrity": "sha512-xCt/TOAc+EOHS1XPnijD3/yzpH6qg2xppZO1YDqGoVsNXfQfzHpOdNuXwrwOU8u4ITXJyDCTyt8w5g1sZv9ynQ==", "dev": true, "license": "MIT", "engines": { - "node": ">=16" + "node": ">=18.12" }, "peerDependencies": { - "typescript": ">=4.2.0" + "typescript": ">=4.8.4" } }, "node_modules/ts-jest": { diff --git a/web/ui/package.json b/web/ui/package.json index 10ba19538f..8539057eeb 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -16,8 +16,8 @@ ], "devDependencies": { "@types/jest": "^29.5.14", - "@typescript-eslint/eslint-plugin": "^6.21.0", - "@typescript-eslint/parser": "^6.21.0", + "@typescript-eslint/eslint-plugin": "^8.20.0", + "@typescript-eslint/parser": "^8.20.0", "eslint-config-prettier": "^9.1.0", "prettier": "^3.4.2", "ts-jest": "^29.2.2", diff --git a/web/ui/react-app/src/pages/graph/DataTable.test.tsx b/web/ui/react-app/src/pages/graph/DataTable.test.tsx index dbc1b18b8e..cf9b3a5331 100755 --- a/web/ui/react-app/src/pages/graph/DataTable.test.tsx +++ b/web/ui/react-app/src/pages/graph/DataTable.test.tsx @@ -1,5 +1,5 @@ import * as React from 'react'; -import { mount, shallow } from 'enzyme'; +import { shallow } from 'enzyme'; import DataTable, { DataTableProps } from './DataTable'; import { Alert, Table } from 'reactstrap'; import SeriesName from './SeriesName'; From 2f42bf2376bbfaed5427eb0708d64f2b0e002f15 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 18:57:39 +0000 Subject: [PATCH 1277/1397] chore(deps): bump react-dom and @types/react-dom in /web/ui (#15661) * chore(deps): bump react, react-dom and @types/react in /web/ui Bumps [react](https://github.com/facebook/react/tree/HEAD/packages/react), [react-dom](https://github.com/facebook/react/tree/HEAD/packages/react-dom) and [@types/react](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/react). These dependencies needed to be updated together. Updates `react` from 18.3.1 to 19.0.0 - [Release notes](https://github.com/facebook/react/releases) - [Changelog](https://github.com/facebook/react/blob/main/CHANGELOG.md) - [Commits](https://github.com/facebook/react/commits/v19.0.0/packages/react) Updates `react-dom` from 18.3.1 to 19.0.0 - [Release notes](https://github.com/facebook/react/releases) - [Changelog](https://github.com/facebook/react/blob/main/CHANGELOG.md) - [Commits](https://github.com/facebook/react/commits/v19.0.0/packages/react-dom) Updates `@types/react` from 18.3.5 to 19.0.6 - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/react) --- updated-dependencies: - dependency-name: react dependency-type: direct:production update-type: version-update:semver-major - dependency-name: react-dom dependency-type: direct:production update-type: version-update:semver-major - dependency-name: "@types/react" dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: Arve Knudsen * chore(deps): bump react-dom and @types/react-dom in /web/ui Bumps [react-dom](https://github.com/facebook/react/tree/HEAD/packages/react-dom) and [@types/react-dom](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/react-dom). These dependencies needed to be updated together. Updates `react-dom` from 18.3.1 to 19.0.0 - [Release notes](https://github.com/facebook/react/releases) - [Changelog](https://github.com/facebook/react/blob/main/CHANGELOG.md) - [Commits](https://github.com/facebook/react/commits/v19.0.0/packages/react-dom) Updates `@types/react-dom` from 18.3.0 to 19.0.2 - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/react-dom) --- updated-dependencies: - dependency-name: react-dom dependency-type: direct:production update-type: version-update:semver-major - dependency-name: "@types/react-dom" dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Fix up Signed-off-by: Arve Knudsen --------- Signed-off-by: dependabot[bot] Signed-off-by: Arve Knudsen Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Arve Knudsen --- web/ui/mantine-ui/package.json | 8 +-- web/ui/mantine-ui/src/promql/format.tsx | 2 +- web/ui/package-lock.json | 73 ++++++++----------------- 3 files changed, 29 insertions(+), 54 deletions(-) diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 9d4afdfae5..54546c76ad 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -40,8 +40,8 @@ "clsx": "^2.1.1", "dayjs": "^1.11.10", "lodash": "^4.17.21", - "react": "^18.3.1", - "react-dom": "^18.3.1", + "react": "^19.0.0", + "react-dom": "^19.0.0", "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.2.0", "react-router-dom": "^7.1.1", @@ -54,8 +54,8 @@ "@eslint/compat": "^1.2.4", "@eslint/eslintrc": "^3.2.0", "@eslint/js": "^9.17.0", - "@types/react": "^18.3.5", - "@types/react-dom": "^18.3.0", + "@types/react": "^19.0.6", + "@types/react-dom": "^19.0.0", "@typescript-eslint/eslint-plugin": "^8.20.0", "@typescript-eslint/parser": "^8.20.0", "@vitejs/plugin-react": "^4.3.4", diff --git a/web/ui/mantine-ui/src/promql/format.tsx b/web/ui/mantine-ui/src/promql/format.tsx index 3996444085..47cd259bd8 100644 --- a/web/ui/mantine-ui/src/promql/format.tsx +++ b/web/ui/mantine-ui/src/promql/format.tsx @@ -1,4 +1,4 @@ -import React, { ReactElement, ReactNode } from "react"; +import React, { ReactElement, ReactNode, JSX } from "react"; import ASTNode, { VectorSelector, matchType, diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 569866873d..dd251c660e 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -54,8 +54,8 @@ "clsx": "^2.1.1", "dayjs": "^1.11.10", "lodash": "^4.17.21", - "react": "^18.3.1", - "react-dom": "^18.3.1", + "react": "^19.0.0", + "react-dom": "^19.0.0", "react-infinite-scroll-component": "^6.1.0", "react-redux": "^9.2.0", "react-router-dom": "^7.1.1", @@ -68,8 +68,8 @@ "@eslint/compat": "^1.2.4", "@eslint/eslintrc": "^3.2.0", "@eslint/js": "^9.17.0", - "@types/react": "^18.3.5", - "@types/react-dom": "^18.3.0", + "@types/react": "^19.0.6", + "@types/react-dom": "^19.0.0", "@typescript-eslint/eslint-plugin": "^8.20.0", "@typescript-eslint/parser": "^8.20.0", "@vitejs/plugin-react": "^4.3.4", @@ -2916,32 +2916,23 @@ "undici-types": "~6.19.2" } }, - "node_modules/@types/prop-types": { - "version": "15.7.12", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.12.tgz", - "integrity": "sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q==", - "devOptional": true, - "license": "MIT" - }, "node_modules/@types/react": { - "version": "18.3.5", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.5.tgz", - "integrity": "sha512-WeqMfGJLGuLCqHGYRGHxnKrXcTitc6L/nBUWfWPcTarG3t9PsquqUMuVeXZeca+mglY4Vo5GZjCi0A3Or2lnxA==", + "version": "19.0.6", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.0.6.tgz", + "integrity": "sha512-gIlMztcTeDgXCUj0vCBOqEuSEhX//63fW9SZtCJ+agxoQTOklwDfiEMlTWn4mR/C/UK5VHlpwsCsOyf7/hc4lw==", "devOptional": true, - "license": "MIT", "dependencies": { - "@types/prop-types": "*", "csstype": "^3.0.2" } }, "node_modules/@types/react-dom": { - "version": "18.3.0", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.0.tgz", - "integrity": "sha512-EhwApuTmMBmXuFOikhQLIBUn6uFg81SwLMOAUgodJF14SOBOCMdU04gDoYi0WOJJHD144TL32z4yDqCW3dnkQg==", + "version": "19.0.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.0.3.tgz", + "integrity": "sha512-0Knk+HJiMP/qOZgMyNFamlIjw9OFCsyC2ZbigmEEyXXixgre6IQpm/4V+r3qH4GC1JPvRJKInw+on2rV6YZLeA==", "devOptional": true, "license": "MIT", - "dependencies": { - "@types/react": "*" + "peerDependencies": { + "@types/react": "^19.0.0" } }, "node_modules/@types/resolve": { @@ -4239,7 +4230,6 @@ "version": "5.2.1", "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-5.2.1.tgz", "integrity": "sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==", - "license": "MIT", "dependencies": { "@babel/runtime": "^7.8.7", "csstype": "^3.0.2" @@ -6825,7 +6815,6 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", - "license": "MIT", "dependencies": { "js-tokens": "^3.0.0 || ^4.0.0" }, @@ -7134,7 +7123,6 @@ "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", - "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -7667,7 +7655,6 @@ "version": "15.8.1", "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", - "license": "MIT", "dependencies": { "loose-envify": "^1.4.0", "object-assign": "^4.1.1", @@ -7677,8 +7664,7 @@ "node_modules/prop-types/node_modules/react-is": { "version": "16.13.1", "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", - "license": "MIT" + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" }, "node_modules/propagate": { "version": "2.0.1", @@ -7740,28 +7726,22 @@ "license": "MIT" }, "node_modules/react": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", - "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.1.0" - }, + "version": "19.0.0", + "resolved": "https://registry.npmjs.org/react/-/react-19.0.0.tgz", + "integrity": "sha512-V8AVnmPIICiWpGfm6GLzCR/W5FXLchHop40W4nXBmdlEceh16rCN8O8LNWm5bh5XUX91fh7KpA+W0TgMKmgTpQ==", "engines": { "node": ">=0.10.0" } }, "node_modules/react-dom": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", - "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", - "license": "MIT", + "version": "19.0.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.0.0.tgz", + "integrity": "sha512-4GV5sHFG0e/0AD4X+ySy6UJd3jVl1iNsNHdpad0qhABJ11twS3TTBnseqsKurKcsNqCEFeGL3uLpVChpIO3QfQ==", "dependencies": { - "loose-envify": "^1.1.0", - "scheduler": "^0.23.2" + "scheduler": "^0.25.0" }, "peerDependencies": { - "react": "^18.3.1" + "react": "^19.0.0" } }, "node_modules/react-infinite-scroll-component": { @@ -7949,7 +7929,6 @@ "version": "4.4.5", "resolved": "https://registry.npmjs.org/react-transition-group/-/react-transition-group-4.4.5.tgz", "integrity": "sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g==", - "license": "BSD-3-Clause", "dependencies": { "@babel/runtime": "^7.5.5", "dom-helpers": "^5.0.1", @@ -8206,13 +8185,9 @@ } }, "node_modules/scheduler": { - "version": "0.23.2", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", - "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", - "license": "MIT", - "dependencies": { - "loose-envify": "^1.1.0" - } + "version": "0.25.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.25.0.tgz", + "integrity": "sha512-xFVuu11jh+xcO7JOAGJNOXld8/TcEHK/4CituBUeUb5hqxJLj9YuemAEuvm9gQ/+pgXYfbQuqAkiYu+u7YEsNA==" }, "node_modules/semver": { "version": "7.6.3", From a82f2b8168d7670d13f209be09f50fe9f919414b Mon Sep 17 00:00:00 2001 From: piguagua Date: Sat, 18 Jan 2025 04:26:08 +0800 Subject: [PATCH 1278/1397] chore: fix function name and struct name in comment (#15827) Signed-off-by: piguagua --- tsdb/record/record_test.go | 2 +- web/api/v1/errors_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index dc625f0830..9b2eb89c5a 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -476,7 +476,7 @@ type recordsMaker struct { make refsCreateFn } -// BenchmarkWAL_HistogramLog measures efficiency of encoding classic +// BenchmarkWAL_HistogramEncoding measures efficiency of encoding classic // histograms and native historgrams with custom buckets (NHCB). func BenchmarkWAL_HistogramEncoding(b *testing.B) { initClassicRefs := func(labelCount, histograms, buckets int) (series []RefSeries, floatSamples []RefSample, histSamples []RefHistogramSample) { diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go index 6ff0614c4c..1cc90a4b62 100644 --- a/web/api/v1/errors_test.go +++ b/web/api/v1/errors_test.go @@ -217,7 +217,7 @@ func (t errorTestSeriesSet) Warnings() annotations.Annotations { return nil } -// DummyTargetRetriever implements github.com/prometheus/prometheus/web/api/v1.ScrapePoolsRetriever. +// DummyScrapePoolsRetriever implements github.com/prometheus/prometheus/web/api/v1.ScrapePoolsRetriever. type DummyScrapePoolsRetriever struct{} func (DummyScrapePoolsRetriever) ScrapePools() []string { From 9258e40589d971c0a009dccaa13462b87fb9454a Mon Sep 17 00:00:00 2001 From: Mikel Olasagasti Uranga Date: Sat, 18 Jan 2025 13:21:51 +0100 Subject: [PATCH 1279/1397] parser: fix non-constant format string call (#15835) Go 1.24 enhanced vet's printf analyzer to report calls of the form fmt.Printf(s), where s is a non-constant format string, with no other arguments. This change makes parser tests to fail. Signed-off-by: Mikel Olasagasti Uranga --- promql/parser/generated_parser.y | 2 +- promql/parser/generated_parser.y.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index ca710b1ab0..cdb4532d3b 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -488,7 +488,7 @@ matrix_selector : expr LEFT_BRACKET number_duration_literal RIGHT_BRACKET if errMsg != ""{ errRange := mergeRanges(&$2, &$4) - yylex.(*parser).addParseErrf(errRange, errMsg) + yylex.(*parser).addParseErrf(errRange, "%s", errMsg) } numLit, _ := $3.(*NumberLiteral) diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 04bc081f2f..78d5e15245 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -1385,7 +1385,7 @@ yydefault: if errMsg != "" { errRange := mergeRanges(&yyDollar[2].item, &yyDollar[4].item) - yylex.(*parser).addParseErrf(errRange, errMsg) + yylex.(*parser).addParseErrf(errRange, "%s", errMsg) } numLit, _ := yyDollar[3].node.(*NumberLiteral) From 2a8ae586f4f17e8616b02b40909abc3dde3733b3 Mon Sep 17 00:00:00 2001 From: Dimitar Dimitrov Date: Mon, 20 Jan 2025 21:26:58 +0100 Subject: [PATCH 1280/1397] ruler: stop all rule groups asynchronously on shutdown (#15804) * ruler: stop all rule groups asynchronously on shutdown During shutdown of the rules manager some rule groups have already stopped and are missing evaluations while we're waiting for other groups to finish their evaluation. When there are many groups (in the thousands), the whole shutdown process can take up to 10 minutes, during which we get miss evaluations. Signed-off-by: Dimitar Dimitrov * Use wrappers in stop(); rename awaitStopped() Signed-off-by: Dimitar Dimitrov * Add comment Signed-off-by: Dimitar Dimitrov --------- Signed-off-by: Dimitar Dimitrov --- rules/group.go | 10 +++++++++- rules/manager.go | 8 +++++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/rules/group.go b/rules/group.go index 4398d9211d..9ad9aab093 100644 --- a/rules/group.go +++ b/rules/group.go @@ -302,11 +302,19 @@ func (g *Group) run(ctx context.Context) { } } -func (g *Group) stop() { +func (g *Group) stopAsync() { close(g.done) +} + +func (g *Group) waitStopped() { <-g.terminated } +func (g *Group) stop() { + g.stopAsync() + g.waitStopped() +} + func (g *Group) hash() uint64 { l := labels.New( labels.Label{Name: "name", Value: g.name}, diff --git a/rules/manager.go b/rules/manager.go index 50b2a7e99d..b1d3e8e3d6 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -188,8 +188,14 @@ func (m *Manager) Stop() { m.logger.Info("Stopping rule manager...") + // Stop all groups asynchronously, then wait for them to finish. + // This is faster than stopping and waiting for each group in sequence. for _, eg := range m.groups { - eg.stop() + eg.stopAsync() + } + + for _, eg := range m.groups { + eg.waitStopped() } // Shut down the groups waiting multiple evaluation intervals to write From e8fab32ca289a6bbdafd7cc9739b349e565d613c Mon Sep 17 00:00:00 2001 From: Paulo Dias <44772900+paulojmdias@users.noreply.github.com> Date: Tue, 21 Jan 2025 10:40:15 +0000 Subject: [PATCH 1281/1397] discovery: move openstack floating ips function from deprecated Compute API /os-floating-ips to Network API /floatingips (#14367) --- discovery/openstack/instance.go | 56 +++- discovery/openstack/instance_test.go | 28 +- discovery/openstack/mock_test.go | 471 +++++++++++++++++++++------ 3 files changed, 446 insertions(+), 109 deletions(-) diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index 2a9e29f2ef..f25c19badb 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -22,8 +22,9 @@ import ( "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/pagination" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" @@ -72,8 +73,8 @@ func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gopherclou } type floatingIPKey struct { - id string - fixed string + deviceID string + fixed string } func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { @@ -90,9 +91,33 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, return nil, fmt.Errorf("could not create OpenStack compute session: %w", err) } + networkClient, err := openstack.NewNetworkV2(i.provider, gophercloud.EndpointOpts{ + Region: i.region, Availability: i.availability, + }) + if err != nil { + return nil, fmt.Errorf("could not create OpenStack network session: %w", err) + } + // OpenStack API reference - // https://developer.openstack.org/api-ref/compute/#list-floating-ips - pagerFIP := floatingips.List(client) + // https://docs.openstack.org/api-ref/network/v2/index.html#list-ports + portPages, err := ports.List(networkClient, ports.ListOpts{}).AllPages() + if err != nil { + return nil, fmt.Errorf("failed to list all ports: %w", err) + } + + allPorts, err := ports.ExtractPorts(portPages) + if err != nil { + return nil, fmt.Errorf("failed to extract Ports: %w", err) + } + + portList := make(map[string]string) + for _, port := range allPorts { + portList[port.ID] = port.DeviceID + } + + // OpenStack API reference + // https://docs.openstack.org/api-ref/network/v2/index.html#list-floating-ips + pagerFIP := floatingips.List(networkClient, floatingips.ListOpts{}) floatingIPList := make(map[floatingIPKey]string) floatingIPPresent := make(map[string]struct{}) err = pagerFIP.EachPage(func(page pagination.Page) (bool, error) { @@ -102,11 +127,24 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, } for _, ip := range result { // Skip not associated ips - if ip.InstanceID == "" || ip.FixedIP == "" { + if ip.PortID == "" || ip.FixedIP == "" { continue } - floatingIPList[floatingIPKey{id: ip.InstanceID, fixed: ip.FixedIP}] = ip.IP - floatingIPPresent[ip.IP] = struct{}{} + + // Fetch deviceID from portList + deviceID, ok := portList[ip.PortID] + if !ok { + i.logger.Warn("Floating IP PortID not found in portList", "PortID", ip.PortID) + continue + } + + key := floatingIPKey{ + deviceID: deviceID, + fixed: ip.FixedIP, + } + + floatingIPList[key] = ip.FloatingIP + floatingIPPresent[ip.FloatingIP] = struct{}{} } return true, nil }) @@ -198,7 +236,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, } lbls[openstackLabelAddressPool] = model.LabelValue(pool) lbls[openstackLabelPrivateIP] = model.LabelValue(addr) - if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok { + if val, ok := floatingIPList[floatingIPKey{deviceID: s.ID, fixed: addr}]; ok { lbls[openstackLabelPublicIP] = model.LabelValue(val) } addr = net.JoinHostPort(addr, strconv.Itoa(i.port)) diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index 2617baa4e3..0933b57067 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -32,6 +32,7 @@ func (s *OpenstackSDInstanceTestSuite) SetupTest(t *testing.T) { s.Mock.HandleServerListSuccessfully() s.Mock.HandleFloatingIPListSuccessfully() + s.Mock.HandlePortsListSuccessfully() s.Mock.HandleVersionsSuccessfully() s.Mock.HandleAuthSuccessfully() @@ -66,7 +67,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Len(t, tg.Targets, 4) + require.Len(t, tg.Targets, 6) for i, lbls := range []model.LabelSet{ { @@ -119,6 +120,31 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { "__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"), "__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"), }, + { + "__address__": model.LabelValue("10.0.0.33:0"), + "__meta_openstack_instance_flavor": model.LabelValue("m1.small"), + "__meta_openstack_instance_id": model.LabelValue("87caf8ed-d92a-41f6-9dcd-d1399e39899f"), + "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), + "__meta_openstack_instance_name": model.LabelValue("merp-project2"), + "__meta_openstack_private_ip": model.LabelValue("10.0.0.33"), + "__meta_openstack_address_pool": model.LabelValue("private"), + "__meta_openstack_tag_env": model.LabelValue("prod"), + "__meta_openstack_project_id": model.LabelValue("b78fef2305934dbbbeb9a10b4c326f7a"), + "__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"), + }, + { + "__address__": model.LabelValue("10.0.0.34:0"), + "__meta_openstack_instance_flavor": model.LabelValue("m1.small"), + "__meta_openstack_instance_id": model.LabelValue("87caf8ed-d92a-41f6-9dcd-d1399e39899f"), + "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), + "__meta_openstack_instance_name": model.LabelValue("merp-project2"), + "__meta_openstack_private_ip": model.LabelValue("10.0.0.34"), + "__meta_openstack_address_pool": model.LabelValue("private"), + "__meta_openstack_tag_env": model.LabelValue("prod"), + "__meta_openstack_public_ip": model.LabelValue("10.10.10.24"), + "__meta_openstack_project_id": model.LabelValue("b78fef2305934dbbbeb9a10b4c326f7a"), + "__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"), + }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) diff --git a/discovery/openstack/mock_test.go b/discovery/openstack/mock_test.go index 4518f41166..2d12dbc0df 100644 --- a/discovery/openstack/mock_test.go +++ b/discovery/openstack/mock_test.go @@ -124,7 +124,7 @@ func (m *SDMock) HandleAuthSuccessfully() { "type": "identity", "name": "keystone" }, - { + { "endpoints": [ { "id": "e2ffee808abc4a60916715b1d4b489dd", @@ -136,8 +136,20 @@ func (m *SDMock) HandleAuthSuccessfully() { ], "id": "b7f2a5b1a019459cb956e43a8cb41e31", "type": "compute" - } - + }, + { + "endpoints": [ + { + "id": "5448e46679564d7d95466c2bef54c296", + "interface": "public", + "region": "RegionOne", + "region_id": "RegionOne", + "url": "%s" + } + ], + "id": "589f3d99a3d94f5f871e9f5cf206d2e8", + "type": "network" + } ], "expires_at": "2013-02-27T18:30:59.999999Z", "is_domain": false, @@ -174,7 +186,7 @@ func (m *SDMock) HandleAuthSuccessfully() { } } } - `, m.Endpoint()) + `, m.Endpoint(), m.Endpoint()) }) } @@ -461,82 +473,159 @@ const serverListBody = ` "metadata": {} }, { - "status": "ACTIVE", - "updated": "2014-09-25T13:04:49Z", - "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", - "OS-EXT-SRV-ATTR:host": "devstack", - "addresses": { - "private": [ - { - "version": 4, - "addr": "10.0.0.33", - "OS-EXT-IPS:type": "fixed" - }, - { - "version": 4, - "addr": "10.0.0.34", - "OS-EXT-IPS:type": "fixed" - }, - { - "version": 4, - "addr": "10.10.10.4", - "OS-EXT-IPS:type": "floating" - } - ] - }, - "links": [ - { - "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", - "rel": "self" + "status": "ACTIVE", + "updated": "2014-09-25T13:04:49Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "version": 4, + "addr": "10.0.0.33", + "OS-EXT-IPS:type": "fixed" + }, + { + "version": 4, + "addr": "10.0.0.34", + "OS-EXT-IPS:type": "fixed" + }, + { + "version": 4, + "addr": "10.10.10.4", + "OS-EXT-IPS:type": "floating" + } + ] }, - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", - "rel": "bookmark" - } - ], - "key_name": null, - "image": "", - "OS-EXT-STS:task_state": null, - "OS-EXT-STS:vm_state": "active", - "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", - "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", - "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", - "flavor": { - "vcpus": 2, - "ram": 4096, - "disk": 0, - "ephemeral": 0, - "swap": 0, - "original_name": "m1.small", - "extra_specs": { - "aggregate_instance_extra_specs:general": "true", - "hw:mem_page_size": "large", - "hw:vif_multiqueue_enabled": "true" + "links": [ + { + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark" + } + ], + "key_name": null, + "image": "", + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", + "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "vcpus": 2, + "ram": 4096, + "disk": 0, + "ephemeral": 0, + "swap": 0, + "original_name": "m1.small", + "extra_specs": { + "aggregate_instance_extra_specs:general": "true", + "hw:mem_page_size": "large", + "hw:vif_multiqueue_enabled": "true" + } + }, + "id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "merp", + "created": "2014-09-25T13:04:41Z", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": { + "env": "prod" } }, - "id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", - "security_groups": [ - { - "name": "default" + { + "status": "ACTIVE", + "updated": "2014-09-25T13:04:49Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "version": 4, + "addr": "10.0.0.33", + "OS-EXT-IPS:type": "fixed" + }, + { + "version": 4, + "addr": "10.0.0.34", + "OS-EXT-IPS:type": "fixed" + }, + { + "version": 4, + "addr": "10.10.10.24", + "OS-EXT-IPS:type": "floating" + } + ] + }, + "links": [ + { + "href": "http://104.130.131.164:8774/v2/b78fef2305934dbbbeb9a10b4c326f7a/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/b78fef2305934dbbbeb9a10b4c326f7a/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark" + } + ], + "key_name": null, + "image": "", + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000002d", + "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "vcpus": 2, + "ram": 4096, + "disk": 0, + "ephemeral": 0, + "swap": 0, + "original_name": "m1.small", + "extra_specs": { + "aggregate_instance_extra_specs:general": "true", + "hw:mem_page_size": "large", + "hw:vif_multiqueue_enabled": "true" + } + }, + "id": "87caf8ed-d92a-41f6-9dcd-d1399e39899f", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "merp-project2", + "created": "2014-09-25T13:04:41Z", + "tenant_id": "b78fef2305934dbbbeb9a10b4c326f7a", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": { + "env": "prod" } - ], - "OS-SRV-USG:terminated_at": null, - "OS-EXT-AZ:availability_zone": "nova", - "user_id": "9349aff8be7545ac9d2f1d00999a23cd", - "name": "merp", - "created": "2014-09-25T13:04:41Z", - "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", - "OS-DCF:diskConfig": "MANUAL", - "os-extended-volumes:volumes_attached": [], - "accessIPv4": "", - "accessIPv6": "", - "progress": 0, - "OS-EXT-STS:power_state": 1, - "config_drive": "", - "metadata": { - "env": "prod" } - } ] } ` @@ -554,35 +643,82 @@ func (m *SDMock) HandleServerListSuccessfully() { const listOutput = ` { - "floating_ips": [ - { - "fixed_ip": null, - "id": "1", - "instance_id": null, - "ip": "10.10.10.1", - "pool": "nova" - }, - { - "fixed_ip": "10.0.0.32", - "id": "2", - "instance_id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", - "ip": "10.10.10.2", - "pool": "nova" - }, - { - "fixed_ip": "10.0.0.34", - "id": "3", - "instance_id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", - "ip": "10.10.10.4", - "pool": "nova" - } - ] + "floatingips": [ + { + "id": "03a77860-ae03-46c4-b502-caea11467a79", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "floating_ip_address": "10.10.10.1", + "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", + "port_id": "d5597901-48c8-4a69-a041-cfc5be158a04", + "fixed_ip_address": null, + "status": "ACTIVE", + "description": "", + "dns_domain": "", + "dns_name": "", + "port_forwardings": [], + "tags": [], + "created_at": "2023-08-30T16:30:27Z", + "updated_at": "2023-08-30T16:30:28Z" + }, + { + "id": "03e28c79-5a4c-491e-a4fe-3ff6bba830c6", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "floating_ip_address": "10.10.10.2", + "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", + "port_id": "4a45b012-0478-484d-8cf3-c8abdb194d08", + "fixed_ip_address": "10.0.0.32", + "status": "ACTIVE", + "description": "", + "dns_domain": "", + "dns_name": "", + "port_forwardings": [], + "tags": [], + "created_at": "2023-09-06T15:45:36Z", + "updated_at": "2023-09-06T15:45:36Z" + }, + { + "id": "087fcdd2-1d13-4f72-9c0e-c759e796d558", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "floating_ip_address": "10.10.10.4", + "floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2", + "port_id": "a0e244e8-7910-4427-b8d1-20470cad4f8a", + "fixed_ip_address": "10.0.0.34", + "status": "ACTIVE", + "description": "", + "dns_domain": "", + "dns_name": "", + "port_forwardings": [], + "tags": [], + "created_at": "2024-01-24T13:30:50Z", + "updated_at": "2024-01-24T13:30:51Z" + }, + { + "id": "b23df91a-a74a-4f75-b252-750aff4a5a0c", + "tenant_id": "b78fef2305934dbbbeb9a10b4c326f7a", + "floating_ip_address": "10.10.10.24", + "floating_network_id": "b19ff5bc-a49a-46cc-8d14-ca5f1e94791f", + "router_id": "65a5e5af-17f0-4124-9a81-c08b44f5b8a7", + "port_id": "b926ab68-ec54-46d8-8c50-1c07aafd5ae9", + "fixed_ip_address": "10.0.0.34", + "status": "ACTIVE", + "description": "", + "dns_domain": "", + "dns_name": "", + "port_forwardings": [], + "tags": [], + "created_at": "2024-01-24T13:30:50Z", + "updated_at": "2024-01-24T13:30:51Z" + } + ] } ` // HandleFloatingIPListSuccessfully mocks floating ips call. func (m *SDMock) HandleFloatingIPListSuccessfully() { - m.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) { + m.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) { testMethod(m.t, r, http.MethodGet) testHeader(m.t, r, "X-Auth-Token", tokenID) @@ -590,3 +726,140 @@ func (m *SDMock) HandleFloatingIPListSuccessfully() { fmt.Fprint(w, listOutput) }) } + +const portsListBody = ` +{ + "ports": [ + { + "id": "d5597901-48c8-4a69-a041-cfc5be158a04", + "name": "", + "network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "mac_address": "", + "admin_state_up": true, + "status": "DOWN", + "device_id": "", + "device_owner": "", + "fixed_ips": [], + "allowed_address_pairs": [], + "extra_dhcp_opts": [], + "security_groups": [], + "description": "", + "binding:vnic_type": "normal", + "port_security_enabled": true, + "dns_name": "", + "dns_assignment": [], + "dns_domain": "", + "tags": [], + "created_at": "2023-08-30T16:30:27Z", + "updated_at": "2023-08-30T16:30:28Z", + "revision_number": 0, + "project_id": "fcad67a6189847c4aecfa3c81a05783b" + }, + { + "id": "4a45b012-0478-484d-8cf3-c8abdb194d08", + "name": "ovn-lb-vip-0980c8de-58c3-481d-89e3-ed81f44286c0", + "network_id": "03200a39-b399-44f3-a778-6dbb93343a31", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "mac_address": "fa:16:3e:23:12:a3", + "admin_state_up": true, + "status": "ACTIVE", + "device_id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "device_owner": "", + "fixed_ips": [ + { + "subnet_id": "", + "ip_address": "10.10.10.2" + } + ], + "allowed_address_pairs": [], + "extra_dhcp_opts": [], + "security_groups": [], + "description": "", + "binding:vnic_type": "normal", + "port_security_enabled": true, + "dns_name": "", + "dns_assignment": [], + "dns_domain": "", + "tags": [], + "created_at": "2023-09-06T15:45:36Z", + "updated_at": "2023-09-06T15:45:36Z", + "revision_number": 0, + "project_id": "fcad67a6189847c4aecfa3c81a05783b" + }, + { + "id": "a0e244e8-7910-4427-b8d1-20470cad4f8a", + "name": "ovn-lb-vip-26c0ccb1-3036-4345-99e8-d8f34a8ba6b2", + "network_id": "03200a39-b399-44f3-a778-6dbb93343a31", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "mac_address": "fa:16:3e:5f:43:10", + "admin_state_up": true, + "status": "ACTIVE", + "device_id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", + "device_owner": "", + "fixed_ips": [ + { + "subnet_id": "", + "ip_address": "10.10.10.4" + } + ], + "allowed_address_pairs": [], + "extra_dhcp_opts": [], + "security_groups": [], + "description": "", + "binding:vnic_type": "normal", + "port_security_enabled": true, + "dns_name": "", + "dns_assignment": [], + "dns_domain": "", + "tags": [], + "created_at": "2024-01-24T13:30:50Z", + "updated_at": "2024-01-24T13:30:51Z", + "revision_number": 0, + "project_id": "fcad67a6189847c4aecfa3c81a05783b" + }, + { + "id": "b926ab68-ec54-46d8-8c50-1c07aafd5ae9", + "name": "dummy-port", + "network_id": "03200a39-b399-44f3-a778-6dbb93343a31", + "tenant_id": "b78fef2305934dbbbeb9a10b4c326f7a", + "mac_address": "fa:16:3e:5f:12:10", + "admin_state_up": true, + "status": "ACTIVE", + "device_id": "87caf8ed-d92a-41f6-9dcd-d1399e39899f", + "device_owner": "", + "fixed_ips": [ + { + "subnet_id": "", + "ip_address": "10.10.10.24" + } + ], + "allowed_address_pairs": [], + "extra_dhcp_opts": [], + "security_groups": [], + "description": "", + "binding:vnic_type": "normal", + "port_security_enabled": true, + "dns_name": "", + "dns_assignment": [], + "dns_domain": "", + "tags": [], + "created_at": "2024-01-24T13:30:50Z", + "updated_at": "2024-01-24T13:30:51Z", + "revision_number": 0, + "project_id": "b78fef2305934dbbbeb9a10b4c326f7a" + } + ] +} +` + +// HandlePortsListSuccessfully mocks the ports list API. +func (m *SDMock) HandlePortsListSuccessfully() { + m.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) { + testMethod(m.t, r, http.MethodGet) + testHeader(m.t, r, "X-Auth-Token", tokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, portsListBody) + }) +} From 1d49d11786017a0e5c2bea3bf35f5d492b366820 Mon Sep 17 00:00:00 2001 From: Paulo Dias Date: Tue, 21 Jan 2025 11:18:34 +0000 Subject: [PATCH 1282/1397] fix: fix testing Signed-off-by: Paulo Dias --- discovery/openstack/loadbalancer.go | 2 +- discovery/openstack/loadbalancer_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/discovery/openstack/loadbalancer.go b/discovery/openstack/loadbalancer.go index b3c05fada5..469fe84275 100644 --- a/discovery/openstack/loadbalancer.go +++ b/discovery/openstack/loadbalancer.go @@ -117,7 +117,7 @@ func (i *LoadBalancerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Gro } } - // Fetch all floating IPs with pagination + // Fetch all floating IPs fipPages, err := floatingips.List(networkClient, floatingips.ListOpts{}).AllPages() if err != nil { return nil, fmt.Errorf("failed to list all fips: %w", err) diff --git a/discovery/openstack/loadbalancer_test.go b/discovery/openstack/loadbalancer_test.go index c0719437d7..eee21b9831 100644 --- a/discovery/openstack/loadbalancer_test.go +++ b/discovery/openstack/loadbalancer_test.go @@ -32,7 +32,7 @@ func (s *OpenstackSDLoadBalancerTestSuite) SetupTest(t *testing.T) { s.Mock.HandleLoadBalancerListSuccessfully() s.Mock.HandleListenersListSuccessfully() - s.Mock.HandleFloatingIPsListSuccessfully() + s.Mock.HandleFloatingIPListSuccessfully() s.Mock.HandleVersionsSuccessfully() s.Mock.HandleAuthSuccessfully() From 803b1565a5d5ba8d6491840a1cc703e1900f9964 Mon Sep 17 00:00:00 2001 From: Paulo Dias Date: Tue, 21 Jan 2025 11:20:15 +0000 Subject: [PATCH 1283/1397] fix: fix network endpoint id Signed-off-by: Paulo Dias --- discovery/openstack/mock_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/discovery/openstack/mock_test.go b/discovery/openstack/mock_test.go index 864097568a..36620defeb 100644 --- a/discovery/openstack/mock_test.go +++ b/discovery/openstack/mock_test.go @@ -147,7 +147,7 @@ func (m *SDMock) HandleAuthSuccessfully() { "url": "%s" } ], - "id": "c609fc430175452290b62a4242e8a7e8", + "id": "589f3d99a3d94f5f871e9f5cf206d2e8", "type": "network" }, { @@ -725,7 +725,7 @@ const listOutput = ` "created_at": "2024-01-24T13:30:50Z", "updated_at": "2024-01-24T13:30:51Z" }, - { + { "id": "fea7332d-9027-4cf9-bf62-c3c4c6ebaf84", "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", "floating_ip_address": "192.168.1.2", From de0060803654cf7882add9ba6b5b5c4f286b2d7e Mon Sep 17 00:00:00 2001 From: bwplotka Date: Tue, 21 Jan 2025 18:09:51 +0000 Subject: [PATCH 1284/1397] Upgrade client_golang to 1.21.0-rc.0 This tests 1.21.0-rc.0 as per https://github.com/prometheus/client_golang/blob/main/RELEASE.md We could merge it too as an early adopter. Signed-off-by: bwplotka --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 3eaca8f35d..edac2e9d55 100644 --- a/go.mod +++ b/go.mod @@ -51,9 +51,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 github.com/ovh/go-ovh v1.6.0 github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/client_golang v1.20.5 + github.com/prometheus/client_golang v1.21.0-rc.0 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.61.0 + github.com/prometheus/common v0.62.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/exporter-toolkit v0.13.2 github.com/prometheus/sigv4 v0.1.1 diff --git a/go.sum b/go.sum index d26f4079c5..0d393c0dae 100644 --- a/go.sum +++ b/go.sum @@ -436,8 +436,8 @@ github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwv github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.21.0-rc.0 h1:bR+RxBlwcr4q8hXkgSOA/J18j6n0/qH0Gb0DH+8c+RY= +github.com/prometheus/client_golang v1.21.0-rc.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -446,8 +446,8 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= -github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ= From dd5ab743ea153db1e0fe7ec9e372a596bb451d5b Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Thu, 5 Dec 2024 06:47:15 +0100 Subject: [PATCH 1285/1397] chore(deps): use version.PrometheusUserAgent Signed-off-by: Matthieu MOREL --- cmd/promtool/main.go | 2 +- discovery/azure/azure.go | 22 +++++++++------------- discovery/digitalocean/digitalocean.go | 2 +- discovery/eureka/client.go | 2 +- discovery/hetzner/robot.go | 2 +- discovery/http/http.go | 2 +- discovery/ionos/server.go | 3 +-- discovery/kubernetes/kubernetes.go | 14 +++++--------- discovery/linode/linode.go | 2 +- discovery/moby/docker.go | 3 ++- discovery/moby/dockerswarm.go | 4 +--- discovery/puppetdb/puppetdb.go | 2 +- discovery/scaleway/baremetal.go | 2 +- discovery/scaleway/instance.go | 2 +- discovery/vultr/vultr.go | 2 +- discovery/xds/client.go | 2 +- notifier/notifier.go | 2 +- scrape/scrape.go | 2 +- storage/remote/client.go | 2 +- 19 files changed, 32 insertions(+), 42 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 81ba93d2de..4275b7b572 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -328,7 +328,7 @@ func main() { kingpin.Fatalf("Failed to load HTTP config file: %v", err) } - httpRoundTripper, err = promconfig.NewRoundTripperFromConfig(*httpConfig, "promtool", promconfig.WithUserAgent("promtool/"+version.Version)) + httpRoundTripper, err = promconfig.NewRoundTripperFromConfig(*httpConfig, "promtool", promconfig.WithUserAgent(version.ComponentUserAgent("promtool"))) if err != nil { kingpin.Fatalf("Failed to create a new HTTP round tripper: %v", err) } diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index ec1c51ace9..862d86859b 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -70,18 +70,14 @@ const ( authMethodManagedIdentity = "ManagedIdentity" ) -var ( - userAgent = fmt.Sprintf("Prometheus/%s", version.Version) - - // DefaultSDConfig is the default Azure SD configuration. - DefaultSDConfig = SDConfig{ - Port: 80, - RefreshInterval: model.Duration(5 * time.Minute), - Environment: "AzurePublicCloud", - AuthenticationMethod: authMethodOAuth, - HTTPClientConfig: config_util.DefaultHTTPClientConfig, - } -) +// DefaultSDConfig is the default Azure SD configuration. +var DefaultSDConfig = SDConfig{ + Port: 80, + RefreshInterval: model.Duration(5 * time.Minute), + Environment: "AzurePublicCloud", + AuthenticationMethod: authMethodOAuth, + HTTPClientConfig: config_util.DefaultHTTPClientConfig, +} var environments = map[string]cloud.Configuration{ "AZURECHINACLOUD": cloud.AzureChina, @@ -244,7 +240,7 @@ func (d *Discovery) createAzureClient() (client, error) { c.logger = d.logger telemetry := policy.TelemetryOptions{ - ApplicationID: userAgent, + ApplicationID: version.PrometheusUserAgent(), } credential, err := newCredential(*d.cfg, policy.ClientOptions{ diff --git a/discovery/digitalocean/digitalocean.go b/discovery/digitalocean/digitalocean.go index fce8d1a354..eeaedd8869 100644 --- a/discovery/digitalocean/digitalocean.go +++ b/discovery/digitalocean/digitalocean.go @@ -132,7 +132,7 @@ func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.Discove Transport: rt, Timeout: time.Duration(conf.RefreshInterval), }, - godo.SetUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)), + godo.SetUserAgent(version.PrometheusUserAgent()), ) if err != nil { return nil, fmt.Errorf("error setting up digital ocean agent: %w", err) diff --git a/discovery/eureka/client.go b/discovery/eureka/client.go index 52e8ce7b48..e4b54faae6 100644 --- a/discovery/eureka/client.go +++ b/discovery/eureka/client.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/common/version" ) -var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) +var userAgent = version.PrometheusUserAgent() type Applications struct { VersionsDelta int `xml:"versions__delta"` diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 958f8f710f..33aa2abcd8 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -39,7 +39,7 @@ const ( hetznerLabelRobotCancelled = hetznerRobotLabelPrefix + "cancelled" ) -var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) +var userAgent = version.PrometheusUserAgent() // Discovery periodically performs Hetzner Robot requests. It implements // the Discoverer interface. diff --git a/discovery/http/http.go b/discovery/http/http.go index 667fd36f6c..ebc1c31f61 100644 --- a/discovery/http/http.go +++ b/discovery/http/http.go @@ -44,7 +44,7 @@ var ( HTTPClientConfig: config.DefaultHTTPClientConfig, RefreshInterval: model.Duration(60 * time.Second), } - userAgent = fmt.Sprintf("Prometheus/%s", version.Version) + userAgent = version.PrometheusUserAgent() matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`) ) diff --git a/discovery/ionos/server.go b/discovery/ionos/server.go index 18e89b1d43..81bb497277 100644 --- a/discovery/ionos/server.go +++ b/discovery/ionos/server.go @@ -15,7 +15,6 @@ package ionos import ( "context" - "fmt" "log/slog" "net" "net/http" @@ -77,7 +76,7 @@ func newServerDiscovery(conf *SDConfig, _ *slog.Logger) (*serverDiscovery, error Transport: rt, Timeout: time.Duration(conf.RefreshInterval), } - cfg.UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) + cfg.UserAgent = version.PrometheusUserAgent() d.client = ionoscloud.NewAPIClient(cfg) diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index 9aff89e4fa..c6f0e445da 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -59,14 +59,10 @@ const ( presentValue = model.LabelValue("true") ) -var ( - // Http header. - userAgent = fmt.Sprintf("Prometheus/%s", version.Version) - // DefaultSDConfig is the default Kubernetes SD configuration. - DefaultSDConfig = SDConfig{ - HTTPClientConfig: config.DefaultHTTPClientConfig, - } -) +// DefaultSDConfig is the default Kubernetes SD configuration. +var DefaultSDConfig = SDConfig{ + HTTPClientConfig: config.DefaultHTTPClientConfig, +} func init() { discovery.RegisterConfig(&SDConfig{}) @@ -336,7 +332,7 @@ func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (* } } - kcfg.UserAgent = userAgent + kcfg.UserAgent = version.PrometheusUserAgent() kcfg.ContentType = "application/vnd.kubernetes.protobuf" c, err := kubernetes.NewForConfig(kcfg) diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 90a91fc920..8e6fd8bf00 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -165,7 +165,7 @@ func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.Discove Timeout: time.Duration(conf.RefreshInterval), }, ) - client.SetUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)) + client.SetUserAgent(version.PrometheusUserAgent()) d.client = &client d.Discovery = refresh.NewDiscovery( diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index 13cf20d6dd..de277a58db 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -33,6 +33,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/version" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" @@ -173,7 +174,7 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics disco }), client.WithScheme(hostURL.Scheme), client.WithHTTPHeaders(map[string]string{ - "User-Agent": userAgent, + "User-Agent": version.PrometheusUserAgent(), }), ) } diff --git a/discovery/moby/dockerswarm.go b/discovery/moby/dockerswarm.go index ba42253412..ae12116301 100644 --- a/discovery/moby/dockerswarm.go +++ b/discovery/moby/dockerswarm.go @@ -38,8 +38,6 @@ const ( swarmLabel = model.MetaLabelPrefix + "dockerswarm_" ) -var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) - // DefaultDockerSwarmSDConfig is the default Docker Swarm SD configuration. var DefaultDockerSwarmSDConfig = DockerSwarmSDConfig{ RefreshInterval: model.Duration(60 * time.Second), @@ -169,7 +167,7 @@ func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discov }), client.WithScheme(hostURL.Scheme), client.WithHTTPHeaders(map[string]string{ - "User-Agent": userAgent, + "User-Agent": version.PrometheusUserAgent(), }), ) } diff --git a/discovery/puppetdb/puppetdb.go b/discovery/puppetdb/puppetdb.go index 561bf78ba7..b71842ff52 100644 --- a/discovery/puppetdb/puppetdb.go +++ b/discovery/puppetdb/puppetdb.go @@ -64,7 +64,7 @@ var ( HTTPClientConfig: config.DefaultHTTPClientConfig, } matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`) - userAgent = fmt.Sprintf("Prometheus/%s", version.Version) + userAgent = version.PrometheusUserAgent() ) func init() { diff --git a/discovery/scaleway/baremetal.go b/discovery/scaleway/baremetal.go index c313e6695d..06f13532df 100644 --- a/discovery/scaleway/baremetal.go +++ b/discovery/scaleway/baremetal.go @@ -93,7 +93,7 @@ func newBaremetalDiscovery(conf *SDConfig) (*baremetalDiscovery, error) { Transport: rt, Timeout: time.Duration(conf.RefreshInterval), }), - scw.WithUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)), + scw.WithUserAgent(version.PrometheusUserAgent()), scw.WithProfile(profile), ) if err != nil { diff --git a/discovery/scaleway/instance.go b/discovery/scaleway/instance.go index 2542c63253..2c192a2bac 100644 --- a/discovery/scaleway/instance.go +++ b/discovery/scaleway/instance.go @@ -104,7 +104,7 @@ func newInstanceDiscovery(conf *SDConfig) (*instanceDiscovery, error) { Transport: rt, Timeout: time.Duration(conf.RefreshInterval), }), - scw.WithUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)), + scw.WithUserAgent(version.PrometheusUserAgent()), scw.WithProfile(profile), ) if err != nil { diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go index ee92f01699..3e9f68864a 100644 --- a/discovery/vultr/vultr.go +++ b/discovery/vultr/vultr.go @@ -135,7 +135,7 @@ func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.Discove Timeout: time.Duration(conf.RefreshInterval), }) - d.client.SetUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)) + d.client.SetUserAgent(version.PrometheusUserAgent()) if err != nil { return nil, fmt.Errorf("error setting up vultr agent: %w", err) diff --git a/discovery/xds/client.go b/discovery/xds/client.go index 027ceb2715..a27e060fbd 100644 --- a/discovery/xds/client.go +++ b/discovery/xds/client.go @@ -30,7 +30,7 @@ import ( "github.com/prometheus/common/version" ) -var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) +var userAgent = version.PrometheusUserAgent() // ResourceClient exposes the xDS protocol for a single resource type. // See https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#rest-json-polling-subscriptions . diff --git a/notifier/notifier.go b/notifier/notifier.go index 956fd4652a..fbc37c29ef 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -56,7 +56,7 @@ const ( alertmanagerLabel = "alertmanager" ) -var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) +var userAgent = version.PrometheusUserAgent() // Alert is a generic representation of an alert in the Prometheus eco-system. type Alert struct { diff --git a/scrape/scrape.go b/scrape/scrape.go index 85eb07a1cc..92c44e79cb 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -792,7 +792,7 @@ func acceptEncodingHeader(enableCompression bool) string { return "identity" } -var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) +var UserAgent = version.PrometheusUserAgent() func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) { if s.req == nil { diff --git a/storage/remote/client.go b/storage/remote/client.go index 2538ee90a0..aadf15307c 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -66,7 +66,7 @@ const ( var ( // UserAgent represents Prometheus version to use for user agent header. - UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) + UserAgent = version.PrometheusUserAgent() remoteWriteContentTypeHeaders = map[config.RemoteWriteProtoMsg]string{ config.RemoteWriteProtoMsgV1: appProtoContentType, // Also application/x-protobuf;proto=prometheus.WriteRequest but simplified for compatibility with 1.x spec. From 2033713757e453fd4fd50764e3aec0a361b98bb8 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 22 Jan 2025 14:04:14 +0100 Subject: [PATCH 1286/1397] docs: Improve documentation of promql-delayed-name-removal flag This fixes a formatting problem (`__name__`) was rendered in boldface without the underscores in the headline). Furthermore, it explains the possible issues with the feature flag (change of behavior of certain "weird" queries, problems when disecting a query manually or in PromLens). Signed-off-by: beorn7 --- docs/feature_flags.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 0de3d2bf7d..2a5a9263f5 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -131,7 +131,7 @@ Note that during this delay, the Head continues its usual operations, which incl Despite the delay in compaction, the blocks produced are time-aligned in the same manner as they would be if the delay was not in place. -## Delay __name__ label removal for PromQL engine +## Delay `__name__` label removal for PromQL engine `--enable-feature=promql-delayed-name-removal` @@ -139,6 +139,18 @@ When enabled, Prometheus will change the way in which the `__name__` label is re This allows optionally preserving the `__name__` label via the `label_replace` and `label_join` functions, and helps prevent the "vector cannot contain metrics with the same labelset" error, which can happen when applying a regex-matcher to the `__name__` label. +Note that evaluating parts of the query separately will still trigger the +labelset collision. This commonly happens when analyzing intermediate results +of a query manually or with a tool like PromLens. + +If a query refers to the already removed `__name__` label, its behavior may +change while this feature flag is set. (Example: `sum by (__name__) +(rate({foo="bar"}[5m]))`, see [details on +GitHub](https://github.com/prometheus/prometheus/issues/11397#issuecomment-1451998792).) +These queries are rare to occur and easy to fix. (In the above example, +removing `by (__name__)` doesn't change anything without the feature flag and +fixes the possible problem with the feature flag.) + ## Auto Reload Config `--enable-feature=auto-reload-config` From 665d1ba0cc62a6c3bb58274e91a1a62082298100 Mon Sep 17 00:00:00 2001 From: Joshua Hesketh Date: Thu, 23 Jan 2025 00:35:43 +1100 Subject: [PATCH 1287/1397] Ensure metric name is present on histogram_quantile annotations (#15828) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ensure metric name is present on histogram_quantile annotations Previously the series __name__ label was dropped before the annotation was emitted causing the annotation message to have a blank value. This fix also allows delayed name removal for classic histograms. This also adds a test for malformed le label This annotation could be split into two to be clearer for the user (one for missing, and one for malformed). This is outside of the scope of this change. Fixes: #15411 --------- Signed-off-by: Joshua Hesketh Signed-off-by: Björn Rabenstein Co-authored-by: Björn Rabenstein --- promql/engine_test.go | 75 +++++++++++++++++++++++++++++++++++++++++++ promql/functions.go | 18 +++++++---- promql/quantile.go | 1 - 3 files changed, 87 insertions(+), 7 deletions(-) diff --git a/promql/engine_test.go b/promql/engine_test.go index 20d78a7d86..697e76116e 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3482,6 +3482,81 @@ func TestRateAnnotations(t *testing.T) { } } +func TestHistogramQuantileAnnotations(t *testing.T) { + testCases := map[string]struct { + data string + expr string + expectedWarningAnnotations []string + expectedInfoAnnotations []string + }{ + "info annotation for nonmonotonic buckets": { + data: ` + nonmonotonic_bucket{le="0.1"} 0+2x10 + nonmonotonic_bucket{le="1"} 0+1x10 + nonmonotonic_bucket{le="10"} 0+5x10 + nonmonotonic_bucket{le="100"} 0+4x10 + nonmonotonic_bucket{le="1000"} 0+9x10 + nonmonotonic_bucket{le="+Inf"} 0+8x10 + `, + expr: "histogram_quantile(0.5, nonmonotonic_bucket)", + expectedWarningAnnotations: []string{}, + expectedInfoAnnotations: []string{ + `PromQL info: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name "nonmonotonic_bucket" (1:25)`, + }, + }, + "warning annotation for missing le label": { + data: ` + myHistogram{abe="0.1"} 0+2x10 + `, + expr: "histogram_quantile(0.5, myHistogram)", + expectedWarningAnnotations: []string{ + `PromQL warning: bucket label "le" is missing or has a malformed value of "" for metric name "myHistogram" (1:25)`, + }, + expectedInfoAnnotations: []string{}, + }, + "warning annotation for malformed le label": { + data: ` + myHistogram{le="Hello World"} 0+2x10 + `, + expr: "histogram_quantile(0.5, myHistogram)", + expectedWarningAnnotations: []string{ + `PromQL warning: bucket label "le" is missing or has a malformed value of "Hello World" for metric name "myHistogram" (1:25)`, + }, + expectedInfoAnnotations: []string{}, + }, + "warning annotation for mixed histograms": { + data: ` + mixedHistogram{le="0.1"} 0+2x10 + mixedHistogram{le="1"} 0+3x10 + mixedHistogram{} {{schema:0 count:10 sum:50 buckets:[1 2 3]}} + `, + expr: "histogram_quantile(0.5, mixedHistogram)", + expectedWarningAnnotations: []string{ + `PromQL warning: vector contains a mix of classic and native histograms for metric name "mixedHistogram" (1:25)`, + }, + expectedInfoAnnotations: []string{}, + }, + } + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + store := promqltest.LoadedStorage(t, "load 1m\n"+strings.TrimSpace(testCase.data)) + t.Cleanup(func() { _ = store.Close() }) + + engine := newTestEngine(t) + query, err := engine.NewInstantQuery(context.Background(), store, nil, testCase.expr, timestamp.Time(0).Add(1*time.Minute)) + require.NoError(t, err) + t.Cleanup(query.Close) + + res := query.Exec(context.Background()) + require.NoError(t, res.Err) + + warnings, infos := res.Warnings.AsStrings(testCase.expr, 0, 0) + testutil.RequireEqual(t, testCase.expectedWarningAnnotations, warnings) + testutil.RequireEqual(t, testCase.expectedInfoAnnotations, infos) + }) + } +} + func TestHistogramRateWithFloatStaleness(t *testing.T) { // Make a chunk with two normal histograms of the same value. h1 := histogram.Histogram{ diff --git a/promql/functions.go b/promql/functions.go index 2d809571d4..605661e5a0 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -1387,14 +1387,13 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev sample.Metric = labels.NewBuilder(sample.Metric). Del(excludedLabels...). Labels() - mb = &metricWithBuckets{sample.Metric, nil} enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb } mb.buckets = append(mb.buckets, Bucket{upperBound, sample.F}) } - // Now deal with the histograms. + // Now deal with the native histograms. for _, sample := range histogramSamples { // We have to reconstruct the exact same signature as above for // a classic histogram, just ignoring any le label. @@ -1418,16 +1417,23 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev }) } + // Now do classic histograms that have already been filtered for conflicting native histograms. for _, mb := range enh.signatureToMetricWithBuckets { if len(mb.buckets) > 0 { res, forcedMonotonicity, _ := BucketQuantile(q, mb.buckets) - enh.Out = append(enh.Out, Sample{ - Metric: mb.metric, - F: res, - }) if forcedMonotonicity { annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(mb.metric.Get(labels.MetricName), args[1].PositionRange())) } + + if !enh.enableDelayedNameRemoval { + mb.metric = mb.metric.DropMetricName() + } + + enh.Out = append(enh.Out, Sample{ + Metric: mb.metric, + F: res, + DropName: true, + }) } } diff --git a/promql/quantile.go b/promql/quantile.go index a6851810c5..f3af82487c 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -47,7 +47,6 @@ const smallDeltaTolerance = 1e-12 // excludedLabels are the labels to exclude from signature calculation for // quantiles. var excludedLabels = []string{ - labels.MetricName, labels.BucketLabel, } From 940016e00265748fba2f5f843161d726fe9823af Mon Sep 17 00:00:00 2001 From: Linas Medziunas Date: Thu, 23 Jan 2025 08:49:47 +0200 Subject: [PATCH 1288/1397] promql: use histogram stats decoder for histogram_avg Signed-off-by: Linas Medziunas --- promql/engine.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index bbd8410268..4e765c434a 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3721,14 +3721,15 @@ func detectHistogramStatsDecoding(expr parser.Expr) { if !ok { continue } - if call.Func.Name == "histogram_count" || call.Func.Name == "histogram_sum" { + switch call.Func.Name { + case "histogram_count", "histogram_sum", "histogram_avg": n.SkipHistogramBuckets = true - break - } - if call.Func.Name == "histogram_quantile" || call.Func.Name == "histogram_fraction" { + case "histogram_quantile", "histogram_fraction": n.SkipHistogramBuckets = false - break + default: + continue } + break } return errors.New("stop") }) From 7263dfe50ec3ba794e6e5319d3fc9f36d752b558 Mon Sep 17 00:00:00 2001 From: bwplotka Date: Thu, 23 Jan 2025 08:53:59 +0000 Subject: [PATCH 1289/1397] Fixed relabelling; allowing UTF-8 in targetLabel. Signed-off-by: bwplotka --- model/relabel/relabel.go | 14 ++- model/relabel/relabel_test.go | 162 +++++++++++++++++++++++++++------- 2 files changed, 142 insertions(+), 34 deletions(-) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index 2bec6cfabb..1425141248 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -29,7 +29,8 @@ import ( ) var ( - relabelTarget = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) + relabelTargetLegacy = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) + relabelTargetUTF8 = regexp.MustCompile(`^(?:(?:[^\${}]|\$(?:\{[^\${}]+\}|[^\${}]+))+[^\${}]*)+$`) // All UTF-8 chars, but ${}. DefaultRelabelConfig = Config{ Action: Replace, @@ -124,10 +125,15 @@ func (c *Config) Validate() error { if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) } - if c.Action == Replace && !strings.Contains(c.TargetLabel, "$") && !model.LabelName(c.TargetLabel).IsValid() { + if c.Action == Replace && !varInRegexTemplate(c.TargetLabel) && !model.LabelName(c.TargetLabel).IsValid() { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } - if c.Action == Replace && strings.Contains(c.TargetLabel, "$") && !relabelTarget.MatchString(c.TargetLabel) { + + relabelTargetRe := relabelTargetUTF8 + if model.NameValidationScheme == model.LegacyValidation { + relabelTargetRe = relabelTargetLegacy + } + if c.Action == Replace && varInRegexTemplate(c.TargetLabel) && !relabelTargetRe.MatchString(c.TargetLabel) { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !model.LabelName(c.TargetLabel).IsValid() { @@ -136,7 +142,7 @@ func (c *Config) Validate() error { if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement { return fmt.Errorf("'replacement' can not be set for %s action", c.Action) } - if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) { + if c.Action == LabelMap && !relabelTargetRe.MatchString(c.Replacement) { return fmt.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action) } if c.Action == HashMod && !model.LabelName(c.TargetLabel).IsValid() { diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index 6f234675c6..fbf8f05e07 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -593,6 +593,75 @@ func TestRelabel(t *testing.T) { "d": "match", }), }, + { // Replace on source label with UTF-8 characters. + input: labels.FromMap(map[string]string{ + "utf-8.label": "line1\nline2", + "b": "bar", + "c": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"utf-8.label"}, + Regex: MustNewRegexp("line1.*line2"), + TargetLabel: "d", + Separator: ";", + Replacement: `match${1}`, + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "utf-8.label": "line1\nline2", + "b": "bar", + "c": "baz", + "d": "match", + }), + }, + { // Replace targetLabel with UTF-8 characters. + input: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("line1.*line2"), + TargetLabel: "utf-8.label", + Separator: ";", + Replacement: `match${1}`, + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + "utf-8.label": "match", + }), + }, + { // Replace targetLabel with UTF-8 characters and $variable. + input: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("line1.*line2"), + TargetLabel: "utf-8.label${1}", + Separator: ";", + Replacement: `match${1}`, + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + "utf-8.label": "match", + }), + }, } for _, test := range tests { @@ -646,9 +715,8 @@ func TestRelabelValidate(t *testing.T) { config: Config{ Action: Lowercase, Replacement: DefaultRelabelConfig.Replacement, - TargetLabel: "${3}", + TargetLabel: "${3}", // With UTF-8 naming, this is now a legal relabel rule. }, - expected: `"${3}" is invalid 'target_label'`, }, { config: Config{ @@ -665,9 +733,8 @@ func TestRelabelValidate(t *testing.T) { Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), Action: Replace, Replacement: "${1}", - TargetLabel: "0${3}", + TargetLabel: "0${3}", // With UTF-8 naming this targets a valid label. }, - expected: `"0${3}" is invalid 'target_label'`, }, { config: Config{ @@ -675,9 +742,8 @@ func TestRelabelValidate(t *testing.T) { Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), Action: Replace, Replacement: "${1}", - TargetLabel: "-${3}", + TargetLabel: "-${3}", // With UTF-8 naming this targets a valid label. }, - expected: `"-${3}" is invalid 'target_label' for replace action`, }, } for i, test := range tests { @@ -693,30 +759,66 @@ func TestRelabelValidate(t *testing.T) { } func TestTargetLabelValidity(t *testing.T) { - tests := []struct { - str string - valid bool - }{ - {"-label", false}, - {"label", true}, - {"label${1}", true}, - {"${1}label", true}, - {"${1}", true}, - {"${1}label", true}, - {"${", false}, - {"$", false}, - {"${}", false}, - {"foo${", false}, - {"$1", true}, - {"asd$2asd", true}, - {"-foo${1}bar-", false}, - {"_${1}_", true}, - {"foo${bar}foo", true}, - } - for _, test := range tests { - require.Equal(t, test.valid, relabelTarget.Match([]byte(test.str)), - "Expected %q to be %v", test.str, test.valid) - } + t.Run("legacy", func(t *testing.T) { + for _, test := range []struct { + str string + valid bool + }{ + {"-label", false}, + {"label", true}, + {"label${1}", true}, + {"${1}label", true}, + {"${1}", true}, + {"${1}label", true}, + {"${", false}, + {"$", false}, + {"${}", false}, + {"foo${", false}, + {"$1", true}, + {"asd$2asd", true}, + {"-foo${1}bar-", false}, + {"bar.foo${1}bar", false}, + {"_${1}_", true}, + {"foo${bar}foo", true}, + } { + t.Run(test.str, func(t *testing.T) { + require.Equal(t, test.valid, relabelTargetLegacy.Match([]byte(test.str)), + "Expected %q to be %v", test.str, test.valid) + }) + } + }) + t.Run("utf-8", func(t *testing.T) { + for _, test := range []struct { + str string + valid bool + }{ + {"-label", true}, + {"label", true}, + {"label${1}", true}, + {"${1}label", true}, + {"${1}", true}, + {"${1}label", true}, + {"$1", true}, + {"asd$2asd", true}, + {"-foo${1}bar-", true}, + {"bar.foo${1}bar", true}, + {"_${1}_", true}, + {"foo${bar}foo", true}, + + // Those can be ambiguous. Currently, we assume user error. + {"${", false}, + {"$", false}, + {"${}", false}, + {"foo${", false}, + } { + t.Run(test.str, func(t *testing.T) { + require.Equal(t, test.valid, relabelTargetUTF8.Match([]byte(test.str)), + "Expected %q to be %v", test.str, test.valid) + }) + + } + }) + } func BenchmarkRelabel(b *testing.B) { From 80d702afdc08aa453aa96770188bf57bf670be6f Mon Sep 17 00:00:00 2001 From: bwplotka Date: Thu, 23 Jan 2025 09:20:16 +0000 Subject: [PATCH 1290/1397] Fixed rulefmt UTF-8 expectations. Signed-off-by: bwplotka --- model/relabel/relabel_test.go | 2 -- model/rulefmt/rulefmt.go | 8 +++++ model/rulefmt/rulefmt_test.go | 29 +++++++++---------- ...on.bad.yaml => utf-8_annotation.good.yaml} | 0 ...d_lname.bad.yaml => utf-8_lname.good.yaml} | 0 5 files changed, 21 insertions(+), 18 deletions(-) rename model/rulefmt/testdata/{bad_annotation.bad.yaml => utf-8_annotation.good.yaml} (100%) rename model/rulefmt/testdata/{bad_lname.bad.yaml => utf-8_lname.good.yaml} (100%) diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index fbf8f05e07..c5064540f7 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -815,10 +815,8 @@ func TestTargetLabelValidity(t *testing.T) { require.Equal(t, test.valid, relabelTargetUTF8.Match([]byte(test.str)), "Expected %q to be %v", test.str, test.valid) }) - } }) - } func BenchmarkRelabel(b *testing.B) { diff --git a/model/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go index 76b12f0e5c..4769e6d5ba 100644 --- a/model/rulefmt/rulefmt.go +++ b/model/rulefmt/rulefmt.go @@ -233,6 +233,14 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { node: &r.Record, }) } + // While record is a valid UTF-8 it's common mistake to put PromQL expression in the record name. + // Disallow "{}" chars. + if strings.Contains(r.Record.Value, "{") || strings.Contains(r.Record.Value, "}") { + nodes = append(nodes, WrappedError{ + err: fmt.Errorf("potential issue in the recording rule name; should it be in expr?: %s", r.Record.Value), + node: &r.Record, + }) + } } for k, v := range r.Labels { diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go index 286e760a41..f24b02f7b3 100644 --- a/model/rulefmt/rulefmt_test.go +++ b/model/rulefmt/rulefmt_test.go @@ -26,10 +26,15 @@ import ( func TestParseFileSuccess(t *testing.T) { _, errs := ParseFile("testdata/test.yaml", false) require.Empty(t, errs, "unexpected errors parsing file") + + _, errs = ParseFile("testdata/utf-8_lname.good.yaml", false) + require.Empty(t, errs, "unexpected errors parsing file") + _, errs = ParseFile("testdata/utf-8_annotation.good.yaml", false) + require.Empty(t, errs, "unexpected errors parsing file") } func TestParseFileFailure(t *testing.T) { - table := []struct { + for _, c := range []struct { filename string errMsg string }{ @@ -53,17 +58,9 @@ func TestParseFileFailure(t *testing.T) { filename: "noexpr.bad.yaml", errMsg: "field 'expr' must be set in rule", }, - { - filename: "bad_lname.bad.yaml", - errMsg: "invalid label name", - }, - { - filename: "bad_annotation.bad.yaml", - errMsg: "invalid annotation name", - }, { filename: "invalid_record_name.bad.yaml", - errMsg: "invalid recording rule name", + errMsg: "potential issue in the recording rule name; should it be in expr?: strawberry{flavor=\"sweet\"}", }, { filename: "bad_field.bad.yaml", @@ -81,12 +78,12 @@ func TestParseFileFailure(t *testing.T) { filename: "record_and_keep_firing_for.bad.yaml", errMsg: "invalid field 'keep_firing_for' in recording rule", }, - } - - for _, c := range table { - _, errs := ParseFile(filepath.Join("testdata", c.filename), false) - require.NotEmpty(t, errs, "Expected error parsing %s but got none", c.filename) - require.ErrorContainsf(t, errs[0], c.errMsg, "Expected error for %s.", c.filename) + } { + t.Run(c.filename, func(t *testing.T) { + _, errs := ParseFile(filepath.Join("testdata", c.filename), false) + require.NotEmpty(t, errs, "Expected error parsing %s but got none", c.filename) + require.ErrorContainsf(t, errs[0], c.errMsg, "Expected error for %s.", c.filename) + }) } } diff --git a/model/rulefmt/testdata/bad_annotation.bad.yaml b/model/rulefmt/testdata/utf-8_annotation.good.yaml similarity index 100% rename from model/rulefmt/testdata/bad_annotation.bad.yaml rename to model/rulefmt/testdata/utf-8_annotation.good.yaml diff --git a/model/rulefmt/testdata/bad_lname.bad.yaml b/model/rulefmt/testdata/utf-8_lname.good.yaml similarity index 100% rename from model/rulefmt/testdata/bad_lname.bad.yaml rename to model/rulefmt/testdata/utf-8_lname.good.yaml From 9097f8f4e7bd239371a1f042b22f4e5cef0d9438 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Thu, 23 Jan 2025 09:43:39 +0100 Subject: [PATCH 1291/1397] test(promql): some functions silently ignore native histograms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Functions that silently ignore native histograms, not covered by existing tests: abs(), ceil(), clamp(), clamp_min(), clamp_max(), floor(), round(). Ref: https://github.com/prometheus/prometheus/pull/15845 Signed-off-by: György Krajcsovits --- promql/promqltest/testdata/functions.test | 63 ++++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 7fc636450f..fadd29c53c 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -492,6 +492,7 @@ load 5m test_clamp{src="clamp-a"} -50 test_clamp{src="clamp-b"} 0 test_clamp{src="clamp-c"} 100 + test_clamp{src="histogram"} {{schema:0 sum:1 count:1}} eval instant at 0m clamp_max(test_clamp, 75) {src="clamp-a"} -50 @@ -1209,7 +1210,67 @@ eval instant at 1m count_over_time({__name__=~"data(_histogram)?"}[2m]) clear -# Test for absent() +# Test for abs, ceil(), floor(), round(). + +load 5m + data{type="positive"} 2.5 + data{type="whole"} 2 + data{type="negative"} -2.5 + data{type="nwhole"} -2 + data{type="zero"} 0 + data{type="nzero"} -0 + data{type="inf"} +Inf + data{type="ninf"} -Inf + data{type="nan"} NaN + data{type="histogram"} {{schema:0 sum:1 count:1}} + +eval instant at 0m abs(data) + {type="positive"} 2.5 + {type="whole"} 2 + {type="negative"} 2.5 + {type="nwhole"} 2 + {type="zero"} 0 + {type="nzero"} 0 + {type="inf"} +Inf + {type="ninf"} +Inf + {type="nan"} NaN + +eval instant at 0m ceil(data) + {type="positive"} 3 + {type="whole"} 2 + {type="negative"} -2 + {type="nwhole"} -2 + {type="zero"} 0 + {type="nzero"} 0 + {type="inf"} +Inf + {type="ninf"} -Inf + {type="nan"} NaN + +eval instant at 0m floor(data) + {type="positive"} 2 + {type="whole"} 2 + {type="negative"} -3 + {type="nwhole"} -2 + {type="zero"} 0 + {type="nzero"} 0 + {type="inf"} +Inf + {type="ninf"} -Inf + {type="nan"} NaN + +eval instant at 0m round(data) + {type="positive"} 3 + {type="whole"} 2 + {type="negative"} -2 + {type="nwhole"} -2 + {type="zero"} 0 + {type="nzero"} 0 + {type="inf"} +Inf + {type="ninf"} -Inf + {type="nan"} NaN + +clear + +# Test for absent(). eval instant at 50m absent(nonexistent) {} 1 From 30112f6ed74fe7d6b88a019524f35fbc26903c0a Mon Sep 17 00:00:00 2001 From: Artur Melanchyk Date: Fri, 24 Jan 2025 14:31:13 +0100 Subject: [PATCH 1292/1397] promtool: optimize labels slice allocation Signed-off-by: Artur Melanchyk --- cmd/promtool/tsdb.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index cefacfec28..b291d29bf7 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -315,12 +315,11 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) { i := 0 for scanner.Scan() && i < n { - m := make([]labels.Label, 0, 10) - r := strings.NewReplacer("\"", "", "{", "", "}", "") s := r.Replace(scanner.Text()) labelChunks := strings.Split(s, ",") + m := make([]labels.Label, 0, len(labelChunks)) for _, labelChunk := range labelChunks { split := strings.Split(labelChunk, ":") m = append(m, labels.Label{Name: split[0], Value: split[1]}) From bd0d9e7a0abea6e44b01e9fe8beb430f5e0053c6 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Fri, 24 Jan 2025 15:49:10 +0000 Subject: [PATCH 1293/1397] Update model/rulefmt/rulefmt.go Co-authored-by: Owen Williams Signed-off-by: Bartlomiej Plotka --- model/rulefmt/rulefmt.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go index 4769e6d5ba..3fc3fa0437 100644 --- a/model/rulefmt/rulefmt.go +++ b/model/rulefmt/rulefmt.go @@ -237,7 +237,7 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { // Disallow "{}" chars. if strings.Contains(r.Record.Value, "{") || strings.Contains(r.Record.Value, "}") { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("potential issue in the recording rule name; should it be in expr?: %s", r.Record.Value), + err: fmt.Errorf("braces present in the recording rule name; should it be in expr?: %s", r.Record.Value), node: &r.Record, }) } From 36cf85fc1e6f1ebe7f41a3b5028843ba4dd167f1 Mon Sep 17 00:00:00 2001 From: bwplotka Date: Mon, 27 Jan 2025 09:49:50 +0000 Subject: [PATCH 1294/1397] Addressed comments. Signed-off-by: bwplotka --- model/relabel/relabel.go | 19 ++++-- model/relabel/relabel_test.go | 118 +++++++++++++++++----------------- model/rulefmt/rulefmt_test.go | 2 +- 3 files changed, 72 insertions(+), 67 deletions(-) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index 1425141248..1373484426 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -29,8 +29,8 @@ import ( ) var ( + // relabelTargetLegacy allows targeting labels with legacy Prometheus character set, plus ${} variables for dynamic characters from source the metrics. relabelTargetLegacy = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) - relabelTargetUTF8 = regexp.MustCompile(`^(?:(?:[^\${}]|\$(?:\{[^\${}]+\}|[^\${}]+))+[^\${}]*)+$`) // All UTF-8 chars, but ${}. DefaultRelabelConfig = Config{ Action: Replace, @@ -129,11 +129,18 @@ func (c *Config) Validate() error { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } - relabelTargetRe := relabelTargetUTF8 - if model.NameValidationScheme == model.LegacyValidation { - relabelTargetRe = relabelTargetLegacy + isValidLabelNameWithRegexVarFn := func(value string) bool { + // UTF-8 allows ${} characters, so standard validation allow $variables by default. + // TODO(bwplotka): Relabelling users cannot put $ and ${<...>} characters in metric names or values. + // Design escaping mechanism to allow that, once valid use case appears. + return model.LabelName(value).IsValid() } - if c.Action == Replace && varInRegexTemplate(c.TargetLabel) && !relabelTargetRe.MatchString(c.TargetLabel) { + if model.NameValidationScheme == model.LegacyValidation { + isValidLabelNameWithRegexVarFn = func(value string) bool { + return relabelTargetLegacy.MatchString(value) + } + } + if c.Action == Replace && varInRegexTemplate(c.TargetLabel) && !isValidLabelNameWithRegexVarFn(c.TargetLabel) { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !model.LabelName(c.TargetLabel).IsValid() { @@ -142,7 +149,7 @@ func (c *Config) Validate() error { if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement { return fmt.Errorf("'replacement' can not be set for %s action", c.Action) } - if c.Action == LabelMap && !relabelTargetRe.MatchString(c.Replacement) { + if c.Action == LabelMap && !isValidLabelNameWithRegexVarFn(c.Replacement) { return fmt.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action) } if c.Action == HashMod && !model.LabelName(c.TargetLabel).IsValid() { diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index c5064540f7..a8026f275e 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -651,7 +651,7 @@ func TestRelabel(t *testing.T) { Regex: MustNewRegexp("line1.*line2"), TargetLabel: "utf-8.label${1}", Separator: ";", - Replacement: `match${1}`, + Replacement: "match${1}", Action: Replace, }, }, @@ -662,6 +662,38 @@ func TestRelabel(t *testing.T) { "utf-8.label": "match", }), }, + { // Replace targetLabel with UTF-8 characters and various $var styles. + input: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("(line1).*(?line2)"), + TargetLabel: "label1_${1}", + Separator: ";", + Replacement: "val_${second}", + Action: Replace, + }, + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("(line1).*(?line2)"), + TargetLabel: "label2_$1", + Separator: ";", + Replacement: "val_$second", + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "a": "line1\nline2", + "b": "bar", + "c": "baz", + "label1_line1": "val_line2", + "label2_line1": "val_line2", + }), + }, } for _, test := range tests { @@ -758,65 +790,31 @@ func TestRelabelValidate(t *testing.T) { } } -func TestTargetLabelValidity(t *testing.T) { - t.Run("legacy", func(t *testing.T) { - for _, test := range []struct { - str string - valid bool - }{ - {"-label", false}, - {"label", true}, - {"label${1}", true}, - {"${1}label", true}, - {"${1}", true}, - {"${1}label", true}, - {"${", false}, - {"$", false}, - {"${}", false}, - {"foo${", false}, - {"$1", true}, - {"asd$2asd", true}, - {"-foo${1}bar-", false}, - {"bar.foo${1}bar", false}, - {"_${1}_", true}, - {"foo${bar}foo", true}, - } { - t.Run(test.str, func(t *testing.T) { - require.Equal(t, test.valid, relabelTargetLegacy.Match([]byte(test.str)), - "Expected %q to be %v", test.str, test.valid) - }) - } - }) - t.Run("utf-8", func(t *testing.T) { - for _, test := range []struct { - str string - valid bool - }{ - {"-label", true}, - {"label", true}, - {"label${1}", true}, - {"${1}label", true}, - {"${1}", true}, - {"${1}label", true}, - {"$1", true}, - {"asd$2asd", true}, - {"-foo${1}bar-", true}, - {"bar.foo${1}bar", true}, - {"_${1}_", true}, - {"foo${bar}foo", true}, - - // Those can be ambiguous. Currently, we assume user error. - {"${", false}, - {"$", false}, - {"${}", false}, - {"foo${", false}, - } { - t.Run(test.str, func(t *testing.T) { - require.Equal(t, test.valid, relabelTargetUTF8.Match([]byte(test.str)), - "Expected %q to be %v", test.str, test.valid) - }) - } - }) +func TestTargetLabelLegacyValidity(t *testing.T) { + for _, test := range []struct { + str string + valid bool + }{ + {"-label", false}, + {"label", true}, + {"label${1}", true}, + {"${1}label", true}, + {"${1}", true}, + {"${1}label", true}, + {"${", false}, + {"$", false}, + {"${}", false}, + {"foo${", false}, + {"$1", true}, + {"asd$2asd", true}, + {"-foo${1}bar-", false}, + {"bar.foo${1}bar", false}, + {"_${1}_", true}, + {"foo${bar}foo", true}, + } { + require.Equal(t, test.valid, relabelTargetLegacy.Match([]byte(test.str)), + "Expected %q to be %v", test.str, test.valid) + } } func BenchmarkRelabel(b *testing.B) { diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go index f24b02f7b3..9e191820c5 100644 --- a/model/rulefmt/rulefmt_test.go +++ b/model/rulefmt/rulefmt_test.go @@ -60,7 +60,7 @@ func TestParseFileFailure(t *testing.T) { }, { filename: "invalid_record_name.bad.yaml", - errMsg: "potential issue in the recording rule name; should it be in expr?: strawberry{flavor=\"sweet\"}", + errMsg: "braces present in the recording rule name; should it be in expr?: strawberry{flavor=\"sweet\"}", }, { filename: "bad_field.bad.yaml", From e722a3923fdf072abdf0be3c67414b846e926ab7 Mon Sep 17 00:00:00 2001 From: Zhang Zhanpeng Date: Sat, 11 Jan 2025 22:00:39 +0800 Subject: [PATCH 1295/1397] upgrade influxdb client to v2 Signed-off-by: Zhang Zhanpeng --- documentation/examples/remote_storage/go.mod | 5 +- documentation/examples/remote_storage/go.sum | 14 +- .../remote_storage_adapter/README.md | 2 +- .../remote_storage_adapter/influxdb/client.go | 219 ++++++++++-------- .../influxdb/client_test.go | 13 +- .../remote_storage_adapter/main.go | 53 ++--- 6 files changed, 160 insertions(+), 146 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 643fbc901c..da8ef55332 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -6,7 +6,7 @@ require ( github.com/alecthomas/kingpin/v2 v2.4.0 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/influxdata/influxdb v1.11.8 + github.com/influxdata/influxdb-client-go/v2 v2.14.0 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/common v0.61.0 github.com/prometheus/prometheus v1.99.0 @@ -19,6 +19,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -32,6 +33,7 @@ require ( github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect + github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -41,6 +43,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/oapi-codegen/runtime v1.0.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.1 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 23ac21c12c..53077c877a 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -14,6 +14,7 @@ github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -23,6 +24,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= @@ -34,6 +37,7 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -166,8 +170,10 @@ github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MI github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/influxdata/influxdb v1.11.8 h1:lX8MJDfk91O7nqzzonQkjk87gOeQy9V/Xp3gpELhG1s= -github.com/influxdata/influxdb v1.11.8/go.mod h1:zRTAuk/Ie/V1LGxJUv8jfDmfv+ypz22lxfhc1MxC3rI= +github.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjwJdUHnwvfjMF71M1iI4= +github.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -183,6 +189,7 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -232,6 +239,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oapi-codegen/runtime v1.0.0 h1:P4rqFX5fMFWqRzY9M/3YF9+aPSPPB06IzP2P7oOxrWo= +github.com/oapi-codegen/runtime v1.0.0/go.mod h1:LmCUMQuPB4M/nLXilQXhHw+BLZdDb18B34OO356yJ/A= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -285,6 +294,7 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= diff --git a/documentation/examples/remote_storage/remote_storage_adapter/README.md b/documentation/examples/remote_storage/remote_storage_adapter/README.md index ce0735bff5..5f37c4d9eb 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/README.md +++ b/documentation/examples/remote_storage/remote_storage_adapter/README.md @@ -31,7 +31,7 @@ OpenTSDB example: InfluxDB example: ``` -./remote_storage_adapter --influxdb-url=http://localhost:8086/ --influxdb.database=prometheus --influxdb.retention-policy=autogen +INFLUXDB_AUTH_TOKEN= ./remote_storage_adapter --influxdb-url=http://localhost:8086/ --influxdb.organization= --influxdb.bucket= ``` To show all flags: diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go index 6ae40f8173..12dc1732a5 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go @@ -14,15 +14,17 @@ package influxdb import ( - "encoding/json" + "context" "errors" "fmt" "log/slog" "math" - "os" "strings" + "time" - influx "github.com/influxdata/influxdb/client/v2" + influx "github.com/influxdata/influxdb-client-go/v2" + "github.com/influxdata/influxdb-client-go/v2/api/query" + "github.com/influxdata/influxdb-client-go/v2/api/write" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" @@ -34,36 +36,39 @@ import ( type Client struct { logger *slog.Logger - client influx.Client - database string - retentionPolicy string - ignoredSamples prometheus.Counter + client influx.Client + organization string + bucket string + ignoredSamples prometheus.Counter + + context context.Context } // NewClient creates a new Client. -func NewClient(logger *slog.Logger, conf influx.HTTPConfig, db, rp string) *Client { - c, err := influx.NewHTTPClient(conf) - // Currently influx.NewClient() *should* never return an error. - if err != nil { - logger.Error("Error creating influx HTTP client", "err", err) - os.Exit(1) - } +func NewClient(logger *slog.Logger, url, authToken, organization, bucket string) *Client { + c := influx.NewClientWithOptions( + url, + authToken, + influx.DefaultOptions().SetPrecision(time.Millisecond), + ) if logger == nil { logger = promslog.NewNopLogger() } return &Client{ - logger: logger, - client: c, - database: db, - retentionPolicy: rp, + logger: logger, + client: c, + organization: organization, + bucket: bucket, ignoredSamples: prometheus.NewCounter( prometheus.CounterOpts{ Name: "prometheus_influxdb_ignored_samples_total", Help: "The total number of samples not sent to InfluxDB due to unsupported float values (Inf, -Inf, NaN).", }, ), + + context: context.Background(), } } @@ -80,39 +85,41 @@ func tagsFromMetric(m model.Metric) map[string]string { // Write sends a batch of samples to InfluxDB via its HTTP API. func (c *Client) Write(samples model.Samples) error { - points := make([]*influx.Point, 0, len(samples)) + points := make([]*write.Point, 0, len(samples)) for _, s := range samples { v := float64(s.Value) if math.IsNaN(v) || math.IsInf(v, 0) { - c.logger.Debug("Cannot send to InfluxDB, skipping sample", "value", v, "sample", s) + c.logger.Debug("Cannot send to InfluxDB, skipping sample", "value", v, "sample", s) c.ignoredSamples.Inc() continue } - p, err := influx.NewPoint( + p := influx.NewPoint( string(s.Metric[model.MetricNameLabel]), tagsFromMetric(s.Metric), map[string]interface{}{"value": v}, s.Timestamp.Time(), ) - if err != nil { - return err - } points = append(points, p) } - bps, err := influx.NewBatchPoints(influx.BatchPointsConfig{ - Precision: "ms", - Database: c.database, - RetentionPolicy: c.retentionPolicy, - }) - if err != nil { + writeAPI := c.client.WriteAPIBlocking(c.organization, c.bucket) + writeAPI.EnableBatching() // default 5_000 + var err error + for _, p := range points { + if err = writeAPI.WritePoint(c.context, p); err != nil { + return err + } + } + if err = writeAPI.Flush(c.context); err != nil { return err } - bps.AddPoints(points) - return c.client.Write(bps) + + return nil } func (c *Client) Read(req *prompb.ReadRequest) (*prompb.ReadResponse, error) { + queryAPI := c.client.QueryAPI(c.organization) + labelsToSeries := map[string]*prompb.TimeSeries{} for _, q := range req.Queries { command, err := c.buildCommand(q) @@ -120,17 +127,18 @@ func (c *Client) Read(req *prompb.ReadRequest) (*prompb.ReadResponse, error) { return nil, err } - query := influx.NewQuery(command, c.database, "ms") - resp, err := c.client.Query(query) + resp, err := queryAPI.Query(c.context, command) if err != nil { return nil, err } - if resp.Err != "" { - return nil, errors.New(resp.Err) + if resp.Err() != nil { + return nil, resp.Err() } - if err = mergeResult(labelsToSeries, resp.Results); err != nil { - return nil, err + for resp.Next() { + if err = mergeResult(labelsToSeries, resp.Record()); err != nil { + return nil, err + } } } @@ -146,17 +154,20 @@ func (c *Client) Read(req *prompb.ReadRequest) (*prompb.ReadResponse, error) { } func (c *Client) buildCommand(q *prompb.Query) (string, error) { - matchers := make([]string, 0, len(q.Matchers)) + rangeInNs := fmt.Sprintf("start: time(v: %v), stop: time(v: %v)", q.StartTimestampMs*time.Millisecond.Nanoseconds(), q.EndTimestampMs*time.Millisecond.Nanoseconds()) + // If we don't find a metric name matcher, query all metrics // (InfluxDB measurements) by default. - from := "FROM /.+/" + measurement := `r._measurement` + matchers := make([]string, 0, len(q.Matchers)) + var joinedMatchers string for _, m := range q.Matchers { if m.Name == model.MetricNameLabel { switch m.Type { case prompb.LabelMatcher_EQ: - from = fmt.Sprintf("FROM %q.%q", c.retentionPolicy, m.Value) + measurement += fmt.Sprintf(" == \"%s\"", m.Value) case prompb.LabelMatcher_RE: - from = fmt.Sprintf("FROM %q./^%s$/", c.retentionPolicy, escapeSlashes(m.Value)) + measurement += fmt.Sprintf(" =~ /%s/", escapeSlashes(m.Value)) default: // TODO: Figure out how to support these efficiently. return "", errors.New("non-equal or regex-non-equal matchers are not supported on the metric name yet") @@ -166,21 +177,28 @@ func (c *Client) buildCommand(q *prompb.Query) (string, error) { switch m.Type { case prompb.LabelMatcher_EQ: - matchers = append(matchers, fmt.Sprintf("%q = '%s'", m.Name, escapeSingleQuotes(m.Value))) + matchers = append(matchers, fmt.Sprintf("r.%s == \"%s\"", m.Name, escapeSingleQuotes(m.Value))) case prompb.LabelMatcher_NEQ: - matchers = append(matchers, fmt.Sprintf("%q != '%s'", m.Name, escapeSingleQuotes(m.Value))) + matchers = append(matchers, fmt.Sprintf("r.%s != \"%s\"", m.Name, escapeSingleQuotes(m.Value))) case prompb.LabelMatcher_RE: - matchers = append(matchers, fmt.Sprintf("%q =~ /^%s$/", m.Name, escapeSlashes(m.Value))) + matchers = append(matchers, fmt.Sprintf("r.%s =~ /%s/", m.Name, escapeSingleQuotes(m.Value))) case prompb.LabelMatcher_NRE: - matchers = append(matchers, fmt.Sprintf("%q !~ /^%s$/", m.Name, escapeSlashes(m.Value))) + matchers = append(matchers, fmt.Sprintf("r.%s !~ /%s/", m.Name, escapeSingleQuotes(m.Value))) default: return "", fmt.Errorf("unknown match type %v", m.Type) } } - matchers = append(matchers, fmt.Sprintf("time >= %vms", q.StartTimestampMs)) - matchers = append(matchers, fmt.Sprintf("time <= %vms", q.EndTimestampMs)) + if len(matchers) > 0 { + joinedMatchers = fmt.Sprintf(" and %s", strings.Join(matchers, " and ")) + } - return fmt.Sprintf("SELECT value %s WHERE %v GROUP BY *", from, strings.Join(matchers, " AND ")), nil + // _measurement must be retained, otherwise "invalid metric name" shall be thrown + command := fmt.Sprintf( + "from(bucket: \"%s\") |> range(%s) |> filter(fn: (r) => %s%s)", + c.bucket, rangeInNs, measurement, joinedMatchers, + ) + + return command, nil } func escapeSingleQuotes(str string) string { @@ -191,44 +209,60 @@ func escapeSlashes(str string) string { return strings.ReplaceAll(str, `/`, `\/`) } -func mergeResult(labelsToSeries map[string]*prompb.TimeSeries, results []influx.Result) error { - for _, r := range results { - for _, s := range r.Series { - k := concatLabels(s.Tags) - ts, ok := labelsToSeries[k] - if !ok { - ts = &prompb.TimeSeries{ - Labels: tagsToLabelPairs(s.Name, s.Tags), - } - labelsToSeries[k] = ts - } +func mergeResult(labelsToSeries map[string]*prompb.TimeSeries, record *query.FluxRecord) error { + builtIntime := record.Time() + builtInvalue := record.Value() + builtInMeasurement := record.Measurement() + labels := record.Values() - samples, err := valuesToSamples(s.Values) - if err != nil { - return err - } + filterOutBuiltInLabels(labels) - ts.Samples = mergeSamples(ts.Samples, samples) + k := concatLabels(labels) + + ts, ok := labelsToSeries[k] + if !ok { + ts = &prompb.TimeSeries{ + Labels: tagsToLabelPairs(builtInMeasurement, labels), } + labelsToSeries[k] = ts } + + sample, err := valuesToSamples(builtIntime, builtInvalue) + if err != nil { + return err + } + + ts.Samples = mergeSamples(ts.Samples, []prompb.Sample{sample}) + return nil } -func concatLabels(labels map[string]string) string { +func filterOutBuiltInLabels(labels map[string]interface{}) { + delete(labels, "table") + delete(labels, "_start") + delete(labels, "_stop") + delete(labels, "_time") + delete(labels, "_value") + delete(labels, "_field") + delete(labels, "result") + delete(labels, "_measurement") +} + +func concatLabels(labels map[string]interface{}) string { // 0xff cannot occur in valid UTF-8 sequences, so use it // as a separator here. separator := "\xff" pairs := make([]string, 0, len(labels)) for k, v := range labels { - pairs = append(pairs, k+separator+v) + pairs = append(pairs, fmt.Sprintf("%s%s%v", k, separator, v)) } return strings.Join(pairs, separator) } -func tagsToLabelPairs(name string, tags map[string]string) []prompb.Label { +func tagsToLabelPairs(name string, tags map[string]interface{}) []prompb.Label { pairs := make([]prompb.Label, 0, len(tags)) for k, v := range tags { - if v == "" { + if v == nil { // If we select metrics with different sets of labels names, // InfluxDB returns *all* possible tag names on all returned // series, with empty tag values on series where they don't @@ -239,7 +273,7 @@ func tagsToLabelPairs(name string, tags map[string]string) []prompb.Label { } pairs = append(pairs, prompb.Label{ Name: k, - Value: v, + Value: fmt.Sprintf("%v", v), }) } pairs = append(pairs, prompb.Label{ @@ -249,39 +283,22 @@ func tagsToLabelPairs(name string, tags map[string]string) []prompb.Label { return pairs } -func valuesToSamples(values [][]interface{}) ([]prompb.Sample, error) { - samples := make([]prompb.Sample, 0, len(values)) - for _, v := range values { - if len(v) != 2 { - return nil, fmt.Errorf("bad sample tuple length, expected [, ], got %v", v) +func valuesToSamples(timestamp time.Time, value interface{}) (prompb.Sample, error) { + var valueFloat64 float64 + var valueInt64 int64 + var ok bool + if valueFloat64, ok = value.(float64); !ok { + if valueInt64, ok = value.(int64); ok { + valueFloat64 = float64(valueInt64) + } else { + return prompb.Sample{}, fmt.Errorf("unable to convert sample value to float64: %v", value) } - - jsonTimestamp, ok := v[0].(json.Number) - if !ok { - return nil, fmt.Errorf("bad timestamp: %v", v[0]) - } - - jsonValue, ok := v[1].(json.Number) - if !ok { - return nil, fmt.Errorf("bad sample value: %v", v[1]) - } - - timestamp, err := jsonTimestamp.Int64() - if err != nil { - return nil, fmt.Errorf("unable to convert sample timestamp to int64: %w", err) - } - - value, err := jsonValue.Float64() - if err != nil { - return nil, fmt.Errorf("unable to convert sample value to float64: %w", err) - } - - samples = append(samples, prompb.Sample{ - Timestamp: timestamp, - Value: value, - }) } - return samples, nil + + return prompb.Sample{ + Timestamp: timestamp.UnixMilli(), + Value: valueFloat64, + }, nil } // mergeSamples merges two lists of sample pairs and removes duplicate diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go index a738c01dcd..3f756a9be4 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go @@ -20,9 +20,7 @@ import ( "net/http/httptest" "net/url" "testing" - "time" - influx "github.com/influxdata/influxdb/client/v2" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" ) @@ -69,13 +67,14 @@ func TestClient(t *testing.T) { } expectedBody := `testmetric,test_label=test_label_value1 value=1.23 123456789123 + testmetric,test_label=test_label_value2 value=5.1234 123456789123 ` server := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { require.Equal(t, http.MethodPost, r.Method, "Unexpected method.") - require.Equal(t, "/write", r.URL.Path, "Unexpected path.") + require.Equal(t, "/api/v2/write", r.URL.Path, "Unexpected path.") b, err := io.ReadAll(r.Body) require.NoError(t, err, "Error reading body.") require.Equal(t, expectedBody, string(b), "Unexpected request body.") @@ -86,13 +85,7 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123 serverURL, err := url.Parse(server.URL) require.NoError(t, err, "Unable to parse server URL.") - conf := influx.HTTPConfig{ - Addr: serverURL.String(), - Username: "testuser", - Password: "testpass", - Timeout: time.Minute, - } - c := NewClient(nil, conf, "test_db", "default") + c := NewClient(nil, serverURL.String(), "auth_token", "test_organization", "test_bucket") err = c.Write(samples) require.NoError(t, err, "Error sending samples.") } diff --git a/documentation/examples/remote_storage/remote_storage_adapter/main.go b/documentation/examples/remote_storage/remote_storage_adapter/main.go index 7f62990d2e..9ea9b8e5f9 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/main.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/main.go @@ -29,7 +29,6 @@ import ( "github.com/alecthomas/kingpin/v2" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" - influx "github.com/influxdata/influxdb/client/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/model" @@ -44,19 +43,18 @@ import ( ) type config struct { - graphiteAddress string - graphiteTransport string - graphitePrefix string - opentsdbURL string - influxdbURL string - influxdbRetentionPolicy string - influxdbUsername string - influxdbDatabase string - influxdbPassword string - remoteTimeout time.Duration - listenAddr string - telemetryPath string - promslogConfig promslog.Config + graphiteAddress string + graphiteTransport string + graphitePrefix string + opentsdbURL string + influxdbURL string + bucket string + organization string + influxdbAuthToken string + remoteTimeout time.Duration + listenAddr string + telemetryPath string + promslogConfig promslog.Config } var ( @@ -118,8 +116,8 @@ func parseFlags() *config { a.HelpFlag.Short('h') cfg := &config{ - influxdbPassword: os.Getenv("INFLUXDB_PW"), - promslogConfig: promslog.Config{}, + influxdbAuthToken: os.Getenv("INFLUXDB_AUTH_TOKEN"), + promslogConfig: promslog.Config{}, } a.Flag("graphite-address", "The host:port of the Graphite server to send samples to. None, if empty."). @@ -132,12 +130,10 @@ func parseFlags() *config { Default("").StringVar(&cfg.opentsdbURL) a.Flag("influxdb-url", "The URL of the remote InfluxDB server to send samples to. None, if empty."). Default("").StringVar(&cfg.influxdbURL) - a.Flag("influxdb.retention-policy", "The InfluxDB retention policy to use."). - Default("autogen").StringVar(&cfg.influxdbRetentionPolicy) - a.Flag("influxdb.username", "The username to use when sending samples to InfluxDB. The corresponding password must be provided via the INFLUXDB_PW environment variable."). - Default("").StringVar(&cfg.influxdbUsername) - a.Flag("influxdb.database", "The name of the database to use for storing samples in InfluxDB."). - Default("prometheus").StringVar(&cfg.influxdbDatabase) + a.Flag("influxdb.bucket", "The InfluxDB bucket to use."). + Default("").StringVar(&cfg.bucket) + a.Flag("influxdb.organization", "The name of the organization to use for storing samples in InfluxDB."). + Default("").StringVar(&cfg.organization) a.Flag("send-timeout", "The timeout to use when sending samples to the remote storage."). Default("30s").DurationVar(&cfg.remoteTimeout) a.Flag("web.listen-address", "Address to listen on for web endpoints."). @@ -191,17 +187,12 @@ func buildClients(logger *slog.Logger, cfg *config) ([]writer, []reader) { logger.Error("Failed to parse InfluxDB URL", "url", cfg.influxdbURL, "err", err) os.Exit(1) } - conf := influx.HTTPConfig{ - Addr: url.String(), - Username: cfg.influxdbUsername, - Password: cfg.influxdbPassword, - Timeout: cfg.remoteTimeout, - } c := influxdb.NewClient( logger.With("storage", "InfluxDB"), - conf, - cfg.influxdbDatabase, - cfg.influxdbRetentionPolicy, + url.String(), + cfg.influxdbAuthToken, + cfg.organization, + cfg.bucket, ) prometheus.MustRegister(c) writers = append(writers, c) From f4fbe47254856612559153051a87e76b083f6eaf Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 27 Nov 2023 15:13:01 +0000 Subject: [PATCH 1296/1397] tsdb tests: avoid capture-by-reference in goroutines Only one version of the variable is captured; this is a source of race conditions. Signed-off-by: Bryan Boreham --- tsdb/compact_test.go | 9 +++++---- tsdb/head_wal.go | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 310fe8f25a..6e3db15eb4 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1575,12 +1575,13 @@ func TestHeadCompactionWithHistograms(t *testing.T) { func TestSparseHistogramSpaceSavings(t *testing.T) { t.Skip() - cases := []struct { + type testcase struct { numSeriesPerSchema int numBuckets int numSpans int gapBetweenSpans int - }{ + } + cases := []testcase{ {1, 15, 1, 0}, {1, 50, 1, 0}, {1, 100, 1, 0}, @@ -1692,7 +1693,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { }() wg.Add(1) - go func() { + go func(c testcase) { defer wg.Done() // Ingest histograms the old way. @@ -1740,7 +1741,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { oldULIDs, err = compactor.Write(oldHead.opts.ChunkDirRoot, oldHead, mint, maxt, nil) require.NoError(t, err) require.Len(t, oldULIDs, 1) - }() + }(c) wg.Wait() diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index e9557c59f6..0afe84a875 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -694,9 +694,9 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch go func() { defer close(decodedCh) - var err error dec := record.NewDecoder(syms) for r.Next() { + var err error rec := r.Record() switch dec.Type(rec) { case record.Samples: From 2f615a200d8591db573f9993fd2690720193030f Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 27 Nov 2023 15:15:29 +0000 Subject: [PATCH 1297/1397] tsdb tests: restrict some 'defer' operations 'defer' only runs at the end of the function, so introduce some more functions / move the start, so that 'defer' can run at the end of the logical block. Signed-off-by: Bryan Boreham --- tsdb/db_test.go | 284 ++++++++++++++++++++++--------------------- tsdb/querier_test.go | 68 +++++------ 2 files changed, 178 insertions(+), 174 deletions(-) diff --git a/tsdb/db_test.go b/tsdb/db_test.go index b858e6f524..7dc60a7304 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -426,64 +426,65 @@ func TestDeleteSimple(t *testing.T) { }, } -Outer: for _, c := range cases { - db := openTestDB(t, nil, nil) - defer func() { - require.NoError(t, db.Close()) - }() + t.Run("", func(t *testing.T) { + db := openTestDB(t, nil, nil) + defer func() { + require.NoError(t, db.Close()) + }() - ctx := context.Background() - app := db.Appender(ctx) + ctx := context.Background() + app := db.Appender(ctx) - smpls := make([]float64, numSamples) - for i := int64(0); i < numSamples; i++ { - smpls[i] = rand.Float64() - app.Append(0, labels.FromStrings("a", "b"), i, smpls[i]) - } - - require.NoError(t, app.Commit()) - - // TODO(gouthamve): Reset the tombstones somehow. - // Delete the ranges. - for _, r := range c.Intervals { - require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))) - } - - // Compare the result. - q, err := db.Querier(0, numSamples) - require.NoError(t, err) - - res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) - - expSamples := make([]chunks.Sample, 0, len(c.remaint)) - for _, ts := range c.remaint { - expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil}) - } - - expss := newMockSeriesSet([]storage.Series{ - storage.NewListSeries(labels.FromStrings("a", "b"), expSamples), - }) - - for { - eok, rok := expss.Next(), res.Next() - require.Equal(t, eok, rok) - - if !eok { - require.Empty(t, res.Warnings()) - continue Outer + smpls := make([]float64, numSamples) + for i := int64(0); i < numSamples; i++ { + smpls[i] = rand.Float64() + app.Append(0, labels.FromStrings("a", "b"), i, smpls[i]) } - sexp := expss.At() - sres := res.At() - require.Equal(t, sexp.Labels(), sres.Labels()) + require.NoError(t, app.Commit()) - smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil) - smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil) + // TODO(gouthamve): Reset the tombstones somehow. + // Delete the ranges. + for _, r := range c.Intervals { + require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))) + } - require.Equal(t, errExp, errRes) - require.Equal(t, smplExp, smplRes) - } + // Compare the result. + q, err := db.Querier(0, numSamples) + require.NoError(t, err) + + res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) + + expSamples := make([]chunks.Sample, 0, len(c.remaint)) + for _, ts := range c.remaint { + expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil}) + } + + expss := newMockSeriesSet([]storage.Series{ + storage.NewListSeries(labels.FromStrings("a", "b"), expSamples), + }) + + for { + eok, rok := expss.Next(), res.Next() + require.Equal(t, eok, rok) + + if !eok { + require.Empty(t, res.Warnings()) + break + } + sexp := expss.At() + sres := res.At() + + require.Equal(t, sexp.Labels(), sres.Labels()) + + smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil) + smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil) + + require.Equal(t, errExp, errRes) + require.Equal(t, smplExp, smplRes) + } + }) } } @@ -759,64 +760,65 @@ func TestDB_SnapshotWithDelete(t *testing.T) { }, } -Outer: for _, c := range cases { - // TODO(gouthamve): Reset the tombstones somehow. - // Delete the ranges. - for _, r := range c.intervals { - require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))) - } - - // create snapshot - snap := t.TempDir() - - require.NoError(t, db.Snapshot(snap, true)) - - // reopen DB from snapshot - newDB, err := Open(snap, nil, nil, nil, nil) - require.NoError(t, err) - defer func() { require.NoError(t, newDB.Close()) }() - - // Compare the result. - q, err := newDB.Querier(0, numSamples) - require.NoError(t, err) - defer func() { require.NoError(t, q.Close()) }() - - res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) - - expSamples := make([]chunks.Sample, 0, len(c.remaint)) - for _, ts := range c.remaint { - expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil}) - } - - expss := newMockSeriesSet([]storage.Series{ - storage.NewListSeries(labels.FromStrings("a", "b"), expSamples), - }) - - if len(expSamples) == 0 { - require.False(t, res.Next()) - continue - } - - for { - eok, rok := expss.Next(), res.Next() - require.Equal(t, eok, rok) - - if !eok { - require.Empty(t, res.Warnings()) - continue Outer + t.Run("", func(t *testing.T) { + // TODO(gouthamve): Reset the tombstones somehow. + // Delete the ranges. + for _, r := range c.intervals { + require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))) } - sexp := expss.At() - sres := res.At() - require.Equal(t, sexp.Labels(), sres.Labels()) + // create snapshot + snap := t.TempDir() - smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil) - smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil) + require.NoError(t, db.Snapshot(snap, true)) - require.Equal(t, errExp, errRes) - require.Equal(t, smplExp, smplRes) - } + // reopen DB from snapshot + newDB, err := Open(snap, nil, nil, nil, nil) + require.NoError(t, err) + defer func() { require.NoError(t, newDB.Close()) }() + + // Compare the result. + q, err := newDB.Querier(0, numSamples) + require.NoError(t, err) + defer func() { require.NoError(t, q.Close()) }() + + res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) + + expSamples := make([]chunks.Sample, 0, len(c.remaint)) + for _, ts := range c.remaint { + expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil}) + } + + expss := newMockSeriesSet([]storage.Series{ + storage.NewListSeries(labels.FromStrings("a", "b"), expSamples), + }) + + if len(expSamples) == 0 { + require.False(t, res.Next()) + return + } + + for { + eok, rok := expss.Next(), res.Next() + require.Equal(t, eok, rok) + + if !eok { + require.Empty(t, res.Warnings()) + break + } + sexp := expss.At() + sres := res.At() + + require.Equal(t, sexp.Labels(), sres.Labels()) + + smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil) + smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil) + + require.Equal(t, errExp, errRes) + require.Equal(t, smplExp, smplRes) + } + }) } } @@ -2250,49 +2252,51 @@ func TestDB_LabelNames(t *testing.T) { require.NoError(t, err) } for _, tst := range tests { - ctx := context.Background() - db := openTestDB(t, nil, nil) - defer func() { - require.NoError(t, db.Close()) - }() + t.Run("", func(t *testing.T) { + ctx := context.Background() + db := openTestDB(t, nil, nil) + defer func() { + require.NoError(t, db.Close()) + }() - appendSamples(db, 0, 4, tst.sampleLabels1) + appendSamples(db, 0, 4, tst.sampleLabels1) - // Testing head. - headIndexr, err := db.head.Index() - require.NoError(t, err) - labelNames, err := headIndexr.LabelNames(ctx) - require.NoError(t, err) - require.Equal(t, tst.exp1, labelNames) - require.NoError(t, headIndexr.Close()) - - // Testing disk. - err = db.Compact(ctx) - require.NoError(t, err) - // All blocks have same label names, hence check them individually. - // No need to aggregate and check. - for _, b := range db.Blocks() { - blockIndexr, err := b.Index() + // Testing head. + headIndexr, err := db.head.Index() require.NoError(t, err) - labelNames, err = blockIndexr.LabelNames(ctx) + labelNames, err := headIndexr.LabelNames(ctx) require.NoError(t, err) require.Equal(t, tst.exp1, labelNames) - require.NoError(t, blockIndexr.Close()) - } + require.NoError(t, headIndexr.Close()) - // Adding more samples to head with new label names - // so that we can test (head+disk).LabelNames(ctx) (the union). - appendSamples(db, 5, 9, tst.sampleLabels2) + // Testing disk. + err = db.Compact(ctx) + require.NoError(t, err) + // All blocks have same label names, hence check them individually. + // No need to aggregate and check. + for _, b := range db.Blocks() { + blockIndexr, err := b.Index() + require.NoError(t, err) + labelNames, err = blockIndexr.LabelNames(ctx) + require.NoError(t, err) + require.Equal(t, tst.exp1, labelNames) + require.NoError(t, blockIndexr.Close()) + } - // Testing DB (union). - q, err := db.Querier(math.MinInt64, math.MaxInt64) - require.NoError(t, err) - var ws annotations.Annotations - labelNames, ws, err = q.LabelNames(ctx, nil) - require.NoError(t, err) - require.Empty(t, ws) - require.NoError(t, q.Close()) - require.Equal(t, tst.exp2, labelNames) + // Adding more samples to head with new label names + // so that we can test (head+disk).LabelNames(ctx) (the union). + appendSamples(db, 5, 9, tst.sampleLabels2) + + // Testing DB (union). + q, err := db.Querier(math.MinInt64, math.MaxInt64) + require.NoError(t, err) + var ws annotations.Annotations + labelNames, ws, err = q.LabelNames(ctx, nil) + require.NoError(t, err) + require.Empty(t, ws) + require.NoError(t, q.Close()) + require.Equal(t, tst.exp2, labelNames) + }) } } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 2d83b04ba3..9a7743ec12 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -2633,42 +2633,42 @@ func BenchmarkSetMatcher(b *testing.B) { } for _, c := range cases { - dir := b.TempDir() - - var ( - blocks []*Block - prefilledLabels []map[string]string - generatedSeries []storage.Series - ) - for i := int64(0); i < int64(c.numBlocks); i++ { - mint := i * int64(c.numSamplesPerSeriesPerBlock) - maxt := mint + int64(c.numSamplesPerSeriesPerBlock) - 1 - if len(prefilledLabels) == 0 { - generatedSeries = genSeries(c.numSeries, 10, mint, maxt) - for _, s := range generatedSeries { - prefilledLabels = append(prefilledLabels, s.Labels().Map()) - } - } else { - generatedSeries = populateSeries(prefilledLabels, mint, maxt) - } - block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil, nil) - require.NoError(b, err) - blocks = append(blocks, block) - defer block.Close() - } - - qblocks := make([]storage.Querier, 0, len(blocks)) - for _, blk := range blocks { - q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64) - require.NoError(b, err) - qblocks = append(qblocks, q) - } - - sq := storage.NewMergeQuerier(qblocks, nil, storage.ChainedSeriesMerge) - defer sq.Close() - benchMsg := fmt.Sprintf("nSeries=%d,nBlocks=%d,cardinality=%d,pattern=\"%s\"", c.numSeries, c.numBlocks, c.cardinality, c.pattern) b.Run(benchMsg, func(b *testing.B) { + dir := b.TempDir() + + var ( + blocks []*Block + prefilledLabels []map[string]string + generatedSeries []storage.Series + ) + for i := int64(0); i < int64(c.numBlocks); i++ { + mint := i * int64(c.numSamplesPerSeriesPerBlock) + maxt := mint + int64(c.numSamplesPerSeriesPerBlock) - 1 + if len(prefilledLabels) == 0 { + generatedSeries = genSeries(c.numSeries, 10, mint, maxt) + for _, s := range generatedSeries { + prefilledLabels = append(prefilledLabels, s.Labels().Map()) + } + } else { + generatedSeries = populateSeries(prefilledLabels, mint, maxt) + } + block, err := OpenBlock(nil, createBlock(b, dir, generatedSeries), nil, nil) + require.NoError(b, err) + blocks = append(blocks, block) + defer block.Close() + } + + qblocks := make([]storage.Querier, 0, len(blocks)) + for _, blk := range blocks { + q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64) + require.NoError(b, err) + qblocks = append(qblocks, q) + } + + sq := storage.NewMergeQuerier(qblocks, nil, storage.ChainedSeriesMerge) + defer sq.Close() + b.ResetTimer() b.ReportAllocs() for n := 0; n < b.N; n++ { From 6ba25ba93fc9c6f0b1eebed7f30c476f90eca1d6 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 27 Nov 2023 15:17:08 +0000 Subject: [PATCH 1298/1397] tsdb tests: avoid 'defer' till end of function 'defer' only runs at the end of the function, so explicitly close the querier after we finish with it. Also check it didn't error. Signed-off-by: Bryan Boreham --- tsdb/head_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 748922ac6b..33b54a756f 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -1594,7 +1594,6 @@ func TestDelete_e2e(t *testing.T) { for i := 0; i < numRanges; i++ { q, err := NewBlockQuerier(hb, 0, 100000) require.NoError(t, err) - defer q.Close() ss := q.Select(context.Background(), true, nil, del.ms...) // Build the mockSeriesSet. matchedSeries := make([]storage.Series, 0, len(matched)) @@ -1635,6 +1634,7 @@ func TestDelete_e2e(t *testing.T) { } require.NoError(t, ss.Err()) require.Empty(t, ss.Warnings()) + require.NoError(t, q.Close()) } } } From 21afc0beb41458dc423a3319698c5a07ba12147b Mon Sep 17 00:00:00 2001 From: Neeraj Gartia Date: Tue, 28 Jan 2025 02:05:30 +0530 Subject: [PATCH 1299/1397] adds tests for sum_over_time and avg_over_time Signed-off-by: Neeraj Gartia --- promql/promqltest/testdata/functions.test | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index fadd29c53c..23d0101566 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -810,6 +810,8 @@ load 10s metric9 -9.988465674311579e+307 -9.988465674311579e+307 -9.988465674311579e+307 metric10 -9.988465674311579e+307 9.988465674311579e+307 metric11 1 2 3 NaN NaN + metric12 1 2 3 {{schema:0 sum:1 count:1}} {{schema:0 sum:3 count:3}} + metric13 {{schema:0 sum:1 count:1}}x5 eval instant at 55s avg_over_time(metric[1m]) {} 3 @@ -916,6 +918,23 @@ eval instant at 1m avg_over_time(metric11[1m]) eval instant at 1m sum_over_time(metric11[1m])/count_over_time(metric11[1m]) {} NaN +# Tests for samples with mix of floats and histograms. +eval_warn instant at 1m sum_over_time(metric12[1m]) + # no result. + +eval_warn instant at 1m avg_over_time(metric12[1m]) + # no result. + +# Tests for samples with only histograms. +eval instant at 1m sum_over_time(metric13[1m]) + {} {{schema:0 sum:5 count:5}} + +eval instant at 1m avg_over_time(metric13[1m]) + {} {{schema:0 sum:1 count:1}} + +eval instant at 1m sum_over_time(metric13[1m])/count_over_time(metric13[1m]) + {} {{schema:0 sum:1 count:1}} + # Test if very big intermediate values cause loss of detail. clear load 10s From b9fcc81695eae37169afb90c05865e48a8feb6f4 Mon Sep 17 00:00:00 2001 From: Neeraj Gartia Date: Tue, 28 Jan 2025 02:10:36 +0530 Subject: [PATCH 1300/1397] adds tests for timestamp() Signed-off-by: Neeraj Gartia --- promql/promqltest/testdata/functions.test | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 23d0101566..9bcb7e6945 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -419,7 +419,7 @@ clear # Tests for vector, time and timestamp. load 10s - metric 1 1 + metric 1 1 {{schema:0 sum:1 count:1}} eval instant at 0s timestamp(metric) {} 0 @@ -436,6 +436,9 @@ eval instant at 10s timestamp(metric) eval instant at 10s timestamp(((metric))) {} 10 +eval instant at 20s timestamp(metric) + {} 20 + # Tests for label_join. load 5m testmetric{src="a",src1="b",src2="c",dst="original-destination-value"} 0 From 9d1abbb9ed15df50bb094b8251cbe7bb4fa14755 Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Tue, 28 Jan 2025 03:11:58 -0800 Subject: [PATCH 1301/1397] Call PostCreation callback only after the new series is added to the mempotings (#15579) Signed-off-by: alanprot --- tsdb/head.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index 47f85d7713..4fbb4b5710 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -1730,6 +1730,11 @@ func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labe h.numSeries.Inc() h.postings.Add(storage.SeriesRef(id), lset) + + // Adding the series in the postings marks the creation of series + // as any further calls to this and the read methods would return that series. + h.series.postCreation(lset) + return s, true, nil } @@ -2037,9 +2042,6 @@ func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries fu // The callback prevented creation of series. return nil, false, preCreationErr } - // Setting the series in the s.hashes marks the creation of series - // as any further calls to this methods would return that series. - s.seriesLifecycleCallback.PostCreation(series.labels()) i = uint64(series.ref) & uint64(s.size-1) @@ -2050,6 +2052,10 @@ func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries fu return series, true, nil } +func (s *stripeSeries) postCreation(lset labels.Labels) { + s.seriesLifecycleCallback.PostCreation(lset) +} + type sample struct { t int64 f float64 From 96ef262fe230e7b1ea68eaa9c3d604bcaafdc8d1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 14:58:56 +0100 Subject: [PATCH 1302/1397] chore(deps-dev): bump eslint-plugin-prettier from 4.2.1 to 5.2.1 in /web/ui/react-app (#15605) * chore(deps-dev): bump eslint-plugin-prettier in /web/ui/react-app Bumps [eslint-plugin-prettier](https://github.com/prettier/eslint-plugin-prettier) from 4.2.1 to 5.2.1. - [Release notes](https://github.com/prettier/eslint-plugin-prettier/releases) - [Changelog](https://github.com/prettier/eslint-plugin-prettier/blob/master/CHANGELOG.md) - [Commits](https://github.com/prettier/eslint-plugin-prettier/compare/v4.2.1...v5.2.1) --- updated-dependencies: - dependency-name: eslint-plugin-prettier dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * chore(deps-dev): bump prettier in /web/ui/react-app Signed-off-by: SuperQ * Add @babel/plugin-proposal-private-property-in-object to react-app deps. Signed-off-by: SuperQ * Add missing trailing comma in react-app. Signed-off-by: SuperQ --------- Signed-off-by: dependabot[bot] Signed-off-by: SuperQ Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: SuperQ --- web/ui/react-app/package-lock.json | 94 +++++++++++++++---- web/ui/react-app/package.json | 5 +- .../react-app/src/pages/graph/DataTable.tsx | 2 +- 3 files changed, 81 insertions(+), 20 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index c17bf60abf..1d21be832a 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -48,6 +48,7 @@ "tempusdominus-core": "^5.19.3" }, "devDependencies": { + "@babel/plugin-proposal-private-property-in-object": "^7.21.11", "@testing-library/react-hooks": "^8.0.1", "@types/enzyme": "^3.10.18", "@types/flot": "0.0.36", @@ -65,11 +66,11 @@ "enzyme-to-json": "^3.6.2", "eslint-config-prettier": "^9.1.0", "eslint-config-react-app": "^7.0.1", - "eslint-plugin-prettier": "^4.2.1", + "eslint-plugin-prettier": "^5.2.1", "jest-canvas-mock": "^2.5.2", "jest-fetch-mock": "^3.0.3", "mutationobserver-shim": "^0.3.7", - "prettier": "^2.8.8", + "prettier": "^3.0.0", "react-scripts": "^5.0.1", "sinon": "^19.0.2", "ts-jest": "^29.2.5" @@ -774,10 +775,17 @@ } }, "node_modules/@babel/plugin-proposal-private-property-in-object": { - "version": "7.21.0-placeholder-for-preset-env.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", - "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "version": "7.21.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.11.tgz", + "integrity": "sha512-0QZ8qP/3RLDVBwBFoWAwCtgcDZJVwA5LUJRZU8x2YFfKNuFq161wK3cuGrALu5yiPu+vzwTAg/sMWVNeWeNyaw==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-private-property-in-object instead.", "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.18.6", + "@babel/helper-create-class-features-plugin": "^7.21.0", + "@babel/helper-plugin-utils": "^7.20.2", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" + }, "engines": { "node": ">=6.9.0" }, @@ -2101,6 +2109,18 @@ "@babel/core": "^7.0.0-0" } }, + "node_modules/@babel/preset-env/node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "dev": true, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, "node_modules/@babel/preset-env/node_modules/semver": { "version": "6.3.1", "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", @@ -4593,6 +4613,19 @@ "node": ">=14" } }, + "node_modules/@pkgr/core": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/@pkgr/core/-/core-0.1.1.tgz", + "integrity": "sha512-cq8o4cWH0ibXh9VGi5P20Tu9XF/0fFXl9EUinr9QfTM7a7p0oTA4iJRCQWppXR1Pg8dSM0UCItCkPwsk9qWWYA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/unts" + } + }, "node_modules/@pmmmwh/react-refresh-webpack-plugin": { "version": "0.5.11", "resolved": "https://registry.npmjs.org/@pmmmwh/react-refresh-webpack-plugin/-/react-refresh-webpack-plugin-0.5.11.tgz", @@ -9551,21 +9584,31 @@ } }, "node_modules/eslint-plugin-prettier": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-4.2.1.tgz", - "integrity": "sha512-f/0rXLXUt0oFYs8ra4w49wYZBG5GKZpAYsJSm6rnYL5uVDjd+zowwMwVZHnAjf4edNrKpCDYfXDgmRE/Ak7QyQ==", + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-5.2.1.tgz", + "integrity": "sha512-gH3iR3g4JfF+yYPaJYkN7jEl9QbweL/YfkoRlNnuIEHEz1vHVlCmWOS+eGGiRuzHQXdJFCOTxRgvju9b8VUmrw==", "dev": true, + "license": "MIT", "dependencies": { - "prettier-linter-helpers": "^1.0.0" + "prettier-linter-helpers": "^1.0.0", + "synckit": "^0.9.1" }, "engines": { - "node": ">=12.0.0" + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint-plugin-prettier" }, "peerDependencies": { - "eslint": ">=7.28.0", - "prettier": ">=2.0.0" + "@types/eslint": ">=8.0.0", + "eslint": ">=8.0.0", + "eslint-config-prettier": "*", + "prettier": ">=3.0.0" }, "peerDependenciesMeta": { + "@types/eslint": { + "optional": true + }, "eslint-config-prettier": { "optional": true } @@ -18630,15 +18673,15 @@ } }, "node_modules/prettier": { - "version": "2.8.8", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.8.tgz", - "integrity": "sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==", + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.4.2.tgz", + "integrity": "sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ==", "dev": true, "bin": { - "prettier": "bin-prettier.js" + "prettier": "bin/prettier.cjs" }, "engines": { - "node": ">=10.13.0" + "node": ">=14" }, "funding": { "url": "https://github.com/prettier/prettier?sponsor=1" @@ -22360,6 +22403,23 @@ "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", "dev": true }, + "node_modules/synckit": { + "version": "0.9.2", + "resolved": "https://registry.npmjs.org/synckit/-/synckit-0.9.2.tgz", + "integrity": "sha512-vrozgXDQwYO72vHjUb/HnFbQx1exDjoKzqx23aXEg2a9VIg2TSFZ8FmeZpTjUCFMYw7mpX4BE2SFu8wI7asYsw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@pkgr/core": "^0.1.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/unts" + } + }, "node_modules/tailwindcss": { "version": "3.4.3", "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.3.tgz", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index d126e85a79..34e63f395d 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -65,6 +65,7 @@ "not op_mini all" ], "devDependencies": { + "@babel/plugin-proposal-private-property-in-object": "^7.21.11", "@testing-library/react-hooks": "^8.0.1", "@types/enzyme": "^3.10.18", "@types/flot": "0.0.36", @@ -82,11 +83,11 @@ "enzyme-to-json": "^3.6.2", "eslint-config-prettier": "^9.1.0", "eslint-config-react-app": "^7.0.1", - "eslint-plugin-prettier": "^4.2.1", + "eslint-plugin-prettier": "^5.2.1", "jest-canvas-mock": "^2.5.2", "jest-fetch-mock": "^3.0.3", "mutationobserver-shim": "^0.3.7", - "prettier": "^2.8.8", + "prettier": "^3.0.0", "react-scripts": "^5.0.1", "sinon": "^19.0.2", "ts-jest": "^29.2.5" diff --git a/web/ui/react-app/src/pages/graph/DataTable.tsx b/web/ui/react-app/src/pages/graph/DataTable.tsx index add28f182b..c783a22264 100644 --- a/web/ui/react-app/src/pages/graph/DataTable.tsx +++ b/web/ui/react-app/src/pages/graph/DataTable.tsx @@ -205,7 +205,7 @@ export const bucketRangeString = ([boundaryRule, leftBoundary, rightBoundary, _] number, string, string, - string + string, ]): string => { return `${leftDelim(boundaryRule)}${leftBoundary} -> ${rightBoundary}${rightDelim(boundaryRule)}`; }; From d8eab3f1c88cd0e472b000adf7490d3076efe1ea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 15:00:28 +0100 Subject: [PATCH 1303/1397] chore(deps): bump the go-opentelemetry-io group with 14 updates (#15875) Bumps the go-opentelemetry-io group with 14 updates: | Package | From | To | | --- | --- | --- | | [go.opentelemetry.io/collector/component](https://github.com/open-telemetry/opentelemetry-collector) | `0.116.0` | `0.118.0` | | [go.opentelemetry.io/collector/consumer](https://github.com/open-telemetry/opentelemetry-collector) | `1.22.0` | `1.24.0` | | [go.opentelemetry.io/collector/pdata](https://github.com/open-telemetry/opentelemetry-collector) | `1.22.0` | `1.24.0` | | [go.opentelemetry.io/collector/processor](https://github.com/open-telemetry/opentelemetry-collector) | `0.116.0` | `0.118.0` | | [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector) | `0.116.0` | `0.118.0` | | [go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.58.0` | `0.59.0` | | [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.58.0` | `0.59.0` | | [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go) | `1.33.0` | `1.34.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace](https://github.com/open-telemetry/opentelemetry-go) | `1.33.0` | `1.34.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://github.com/open-telemetry/opentelemetry-go) | `1.33.0` | `1.34.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://github.com/open-telemetry/opentelemetry-go) | `1.33.0` | `1.34.0` | | [go.opentelemetry.io/otel/metric](https://github.com/open-telemetry/opentelemetry-go) | `1.33.0` | `1.34.0` | | [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) | `1.33.0` | `1.34.0` | | [go.opentelemetry.io/otel/trace](https://github.com/open-telemetry/opentelemetry-go) | `1.33.0` | `1.34.0` | Updates `go.opentelemetry.io/collector/component` from 0.116.0 to 0.118.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.116.0...v0.118.0) Updates `go.opentelemetry.io/collector/consumer` from 1.22.0 to 1.24.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.22.0...pdata/v1.24.0) Updates `go.opentelemetry.io/collector/pdata` from 1.22.0 to 1.24.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.22.0...pdata/v1.24.0) Updates `go.opentelemetry.io/collector/processor` from 0.116.0 to 0.118.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.116.0...v0.118.0) Updates `go.opentelemetry.io/collector/semconv` from 0.116.0 to 0.118.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.116.0...v0.118.0) Updates `go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace` from 0.58.0 to 0.59.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.58.0...zpages/v0.59.0) Updates `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp` from 0.58.0 to 0.59.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.58.0...zpages/v0.59.0) Updates `go.opentelemetry.io/otel` from 1.33.0 to 1.34.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.33.0...v1.34.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from 1.33.0 to 1.34.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.33.0...v1.34.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` from 1.33.0 to 1.34.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.33.0...v1.34.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` from 1.33.0 to 1.34.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.33.0...v1.34.0) Updates `go.opentelemetry.io/otel/metric` from 1.33.0 to 1.34.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.33.0...v1.34.0) Updates `go.opentelemetry.io/otel/sdk` from 1.33.0 to 1.34.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.33.0...v1.34.0) Updates `go.opentelemetry.io/otel/trace` from 1.33.0 to 1.34.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.33.0...v1.34.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/collector/component dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/consumer dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/pdata dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/processor dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/semconv dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/metric dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/sdk dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/otel/trace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 50 +++++++++++----------- go.sum | 132 ++++++++++++++++++++++++++++----------------------------- 2 files changed, 91 insertions(+), 91 deletions(-) diff --git a/go.mod b/go.mod index edac2e9d55..0cbb74a454 100644 --- a/go.mod +++ b/go.mod @@ -61,33 +61,33 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.10.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/component v0.116.0 - go.opentelemetry.io/collector/consumer v1.22.0 - go.opentelemetry.io/collector/pdata v1.22.0 - go.opentelemetry.io/collector/processor v0.116.0 - go.opentelemetry.io/collector/semconv v0.116.0 - go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 - go.opentelemetry.io/otel v1.33.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 - go.opentelemetry.io/otel/metric v1.33.0 - go.opentelemetry.io/otel/sdk v1.33.0 - go.opentelemetry.io/otel/trace v1.33.0 + go.opentelemetry.io/collector/component v0.118.0 + go.opentelemetry.io/collector/consumer v1.24.0 + go.opentelemetry.io/collector/pdata v1.24.0 + go.opentelemetry.io/collector/processor v0.118.0 + go.opentelemetry.io/collector/semconv v0.118.0 + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.59.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 + go.opentelemetry.io/otel v1.34.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 + go.opentelemetry.io/otel/metric v1.34.0 + go.opentelemetry.io/otel/sdk v1.34.0 + go.opentelemetry.io/otel/trace v1.34.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 golang.org/x/oauth2 v0.25.0 golang.org/x/sync v0.10.0 - golang.org/x/sys v0.28.0 + golang.org/x/sys v0.29.0 golang.org/x/text v0.21.0 golang.org/x/tools v0.28.0 google.golang.org/api v0.216.0 - google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f google.golang.org/grpc v1.69.4 - google.golang.org/protobuf v1.36.1 + google.golang.org/protobuf v1.36.3 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.3 @@ -144,7 +144,7 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.14.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -191,17 +191,17 @@ require ( github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.116.0 // indirect - go.opentelemetry.io/collector/pipeline v0.116.0 // indirect - go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline v0.118.0 // indirect + go.opentelemetry.io/proto/otlp v1.5.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.31.0 // indirect + golang.org/x/crypto v0.32.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/term v0.27.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/term v0.28.0 // indirect golang.org/x/time v0.9.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 0d393c0dae..879fe3f124 100644 --- a/go.sum +++ b/go.sum @@ -225,8 +225,8 @@ github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrR github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= github.com/hashicorp/consul/api v1.31.0 h1:32BUNLembeSRek0G/ZAM6WNfdEwYdYo8oQ4+JoqGkNQ= github.com/hashicorp/consul/api v1.31.0/go.mod h1:2ZGIiXM3A610NmDULmCHd/aqBJj8CkMfOhswhOafxRg= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= @@ -510,60 +510,60 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/collector/component v0.116.0 h1:SQE1YeVfYCN7bw1n4hknUwJE5U/1qJL552sDhAdSlaA= -go.opentelemetry.io/collector/component v0.116.0/go.mod h1:MYgXFZWDTq0uPgF1mkLSFibtpNqksRVAOrmihckOQEs= -go.opentelemetry.io/collector/component/componentstatus v0.116.0 h1:wpgY0H2K9IPBzaNAvavKziK86VZ7TuNFQbS9OC4Z6Cs= -go.opentelemetry.io/collector/component/componentstatus v0.116.0/go.mod h1:ZRlVwHFMGNfcsAywEJqivOn5JzDZkpe3KZVSwMWu4tw= -go.opentelemetry.io/collector/component/componenttest v0.116.0 h1:UIcnx4Rrs/oDRYSAZNHRMUiYs2FBlwgV5Nc0oMYfR6A= -go.opentelemetry.io/collector/component/componenttest v0.116.0/go.mod h1:W40HaKPHdBFMVI7zzHE7dhdWC+CgAnAC9SmWetFBATY= -go.opentelemetry.io/collector/config/configtelemetry v0.116.0 h1:Vl49VCHQwBOeMswDpFwcl2HD8e9y94xlrfII3SR2VeQ= -go.opentelemetry.io/collector/config/configtelemetry v0.116.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE= +go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= go.opentelemetry.io/collector/confmap v1.22.0 h1:ZKQzRuj5lKu+seKArAAZ1yPRroDPricaIVIREm/jr3w= go.opentelemetry.io/collector/confmap v1.22.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= -go.opentelemetry.io/collector/consumer v1.22.0 h1:QmfnNizyNZFt0uK3GG/EoT5h6PvZJ0dgVTc5hFEc1l0= -go.opentelemetry.io/collector/consumer v1.22.0/go.mod h1:tiz2khNceFAPokxxfzAuFfIpShBasMT2AL2Sbc7+m0I= -go.opentelemetry.io/collector/consumer/consumertest v0.116.0 h1:pIVR7FtQMNAzfxBUSMEIC2dX5Lfo3O9ZBfx+sAwrrrM= -go.opentelemetry.io/collector/consumer/consumertest v0.116.0/go.mod h1:cV3cNDiPnls5JdhnOJJFVlclrClg9kPs04cXgYP9Gmk= -go.opentelemetry.io/collector/consumer/xconsumer v0.116.0 h1:ZrWvq7HumB0jRYmS2ztZ3hhXRNpUVBWPKMbPhsVGmZM= -go.opentelemetry.io/collector/consumer/xconsumer v0.116.0/go.mod h1:C+VFMk8vLzPun6XK8aMts6h4RaDjmzXHCPaiOxzRQzQ= -go.opentelemetry.io/collector/pdata v1.22.0 h1:3yhjL46NLdTMoP8rkkcE9B0pzjf2973crn0KKhX5UrI= -go.opentelemetry.io/collector/pdata v1.22.0/go.mod h1:nLLf6uDg8Kn5g3WNZwGyu8+kf77SwOqQvMTb5AXEbEY= -go.opentelemetry.io/collector/pdata/pprofile v0.116.0 h1:iE6lqkO7Hi6lTIIml1RI7yQ55CKqW12R2qHinwF5Zuk= -go.opentelemetry.io/collector/pdata/pprofile v0.116.0/go.mod h1:xQiPpjzIiXRFb+1fPxUy/3ygEZgo0Bu/xmLKOWu8vMQ= -go.opentelemetry.io/collector/pdata/testdata v0.116.0 h1:zmn1zpeX2BvzL6vt2dBF4OuAyFF2ml/OXcqflNgFiP0= -go.opentelemetry.io/collector/pdata/testdata v0.116.0/go.mod h1:ytWzICFN4XTDP6o65B4+Ed52JGdqgk9B8CpLHCeCpMo= -go.opentelemetry.io/collector/pipeline v0.116.0 h1:o8eKEuWEszmRpfShy7ElBoQ3Jo6kCi9ucm3yRgdNb9s= -go.opentelemetry.io/collector/pipeline v0.116.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= -go.opentelemetry.io/collector/processor v0.116.0 h1:Kyu4tPzTdWNHtZjcxvI/bGNAgyv8L8Kem2r/Mk4IDAw= -go.opentelemetry.io/collector/processor v0.116.0/go.mod h1:+/Ugy48RAxlZEXmN2cw51W8t5wdHS9No+GAoP+moskk= -go.opentelemetry.io/collector/processor/processortest v0.116.0 h1:+IqNEVEE0E2MsO2g7+Y/9dz35sDuvAXRXrLts9NdXrA= -go.opentelemetry.io/collector/processor/processortest v0.116.0/go.mod h1:DLaQDBxzgeeaUO0ULMn/efos9PmHZkmYCHuxwCsiVHI= -go.opentelemetry.io/collector/processor/xprocessor v0.116.0 h1:iin/UwuWvSLB7ZNfINFUYbZ5lxIi1NjZ2brkyyFdiRA= -go.opentelemetry.io/collector/processor/xprocessor v0.116.0/go.mod h1:cnA43/XpKDbaOmd8buqKp/LGJ2l/OoCqbR//u5DMfn8= -go.opentelemetry.io/collector/semconv v0.116.0 h1:63xCZomsKJAWmKGWD3lnORiE3WKW6AO4LjnzcHzGx3Y= -go.opentelemetry.io/collector/semconv v0.116.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 h1:xwH3QJv6zL4u+gkPUu59NeT1Gyw9nScWT8FQpKLUJJI= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0/go.mod h1:uosvgpqTcTXtcPQORTbEkZNDQTCDOgTz1fe6aLSyqrQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= -go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= -go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= -go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= -go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= -go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= +go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= +go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws= +go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4= +go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU= +go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.59.0 h1:iQZYNQ7WwIcYXzOPR46FQv9O0dS1PW16RjvR0TjDOe8= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.59.0/go.mod h1:54CaSNqYEXvpzDh8KPjiMVoWm60t5R0dZRt0leEPgAs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 h1:BEj3SPM81McUZHYjRS5pEgNgnmzGJ5tRpU5krWnV8Bs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0/go.mod h1:9cKLGBDzI/F3NoHLQGm4ZrYdIHsvGt6ej6hUowxY0J4= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= -go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= -go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= -go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= @@ -580,8 +580,8 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= @@ -608,8 +608,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= @@ -653,11 +653,11 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -689,10 +689,10 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 h1:ChAdCYNQFDk5fYvFZMywKLIijG7TC2m1C2CMEu11G3o= -google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484/go.mod h1:KRUmxRI4JmbpAm8gcZM4Jsffi859fo5LQjILwuqj9z8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -700,8 +700,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From bb30a871aca2e6a8f324f663fd55935298fc7a40 Mon Sep 17 00:00:00 2001 From: Pierre Prinetti Date: Mon, 9 Dec 2024 16:23:36 +0100 Subject: [PATCH 1304/1397] deps: Use Gophercloud v2 Signed-off-by: Pierre Prinetti --- discovery/openstack/hypervisor.go | 13 ++++++------- discovery/openstack/instance.go | 21 ++++++++++----------- discovery/openstack/loadbalancer.go | 19 +++++++++---------- discovery/openstack/openstack.go | 4 ++-- go.mod | 2 +- go.sum | 8 ++------ 6 files changed, 30 insertions(+), 37 deletions(-) diff --git a/discovery/openstack/hypervisor.go b/discovery/openstack/hypervisor.go index ec127b1861..5cea68c4a5 100644 --- a/discovery/openstack/hypervisor.go +++ b/discovery/openstack/hypervisor.go @@ -20,10 +20,10 @@ import ( "net" "strconv" - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors" - "github.com/gophercloud/gophercloud/pagination" + "github.com/gophercloud/gophercloud/v2" + "github.com/gophercloud/gophercloud/v2/openstack" + "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/hypervisors" + "github.com/gophercloud/gophercloud/v2/pagination" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -59,8 +59,7 @@ func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercl } func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { - h.provider.Context = ctx - err := openstack.Authenticate(h.provider, *h.authOpts) + err := openstack.Authenticate(ctx, h.provider, *h.authOpts) if err != nil { return nil, fmt.Errorf("could not authenticate to OpenStack: %w", err) } @@ -78,7 +77,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group // OpenStack API reference // https://developer.openstack.org/api-ref/compute/#list-hypervisors-details pagerHypervisors := hypervisors.List(client, nil) - err = pagerHypervisors.EachPage(func(page pagination.Page) (bool, error) { + err = pagerHypervisors.EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) { hypervisorList, err := hypervisors.ExtractHypervisors(page) if err != nil { return false, fmt.Errorf("could not extract hypervisors: %w", err) diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index f25c19badb..dea327afe3 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -20,12 +20,12 @@ import ( "net" "strconv" - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" - "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" - "github.com/gophercloud/gophercloud/pagination" + "github.com/gophercloud/gophercloud/v2" + "github.com/gophercloud/gophercloud/v2/openstack" + "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports" + "github.com/gophercloud/gophercloud/v2/pagination" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" @@ -78,8 +78,7 @@ type floatingIPKey struct { } func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { - i.provider.Context = ctx - err := openstack.Authenticate(i.provider, *i.authOpts) + err := openstack.Authenticate(ctx, i.provider, *i.authOpts) if err != nil { return nil, fmt.Errorf("could not authenticate to OpenStack: %w", err) } @@ -100,7 +99,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, // OpenStack API reference // https://docs.openstack.org/api-ref/network/v2/index.html#list-ports - portPages, err := ports.List(networkClient, ports.ListOpts{}).AllPages() + portPages, err := ports.List(networkClient, ports.ListOpts{}).AllPages(ctx) if err != nil { return nil, fmt.Errorf("failed to list all ports: %w", err) } @@ -120,7 +119,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, pagerFIP := floatingips.List(networkClient, floatingips.ListOpts{}) floatingIPList := make(map[floatingIPKey]string) floatingIPPresent := make(map[string]struct{}) - err = pagerFIP.EachPage(func(page pagination.Page) (bool, error) { + err = pagerFIP.EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) { result, err := floatingips.ExtractFloatingIPs(page) if err != nil { return false, fmt.Errorf("could not extract floatingips: %w", err) @@ -161,7 +160,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, tg := &targetgroup.Group{ Source: "OS_" + i.region, } - err = pager.EachPage(func(page pagination.Page) (bool, error) { + err = pager.EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) { if ctx.Err() != nil { return false, fmt.Errorf("could not extract instances: %w", ctx.Err()) } diff --git a/discovery/openstack/loadbalancer.go b/discovery/openstack/loadbalancer.go index 469fe84275..32e0f24f8d 100644 --- a/discovery/openstack/loadbalancer.go +++ b/discovery/openstack/loadbalancer.go @@ -21,11 +21,11 @@ import ( "strconv" "strings" - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners" - "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/gophercloud/gophercloud/v2" + "github.com/gophercloud/gophercloud/v2/openstack" + "github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners" + "github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers" + "github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" @@ -67,8 +67,7 @@ func newLoadBalancerDiscovery(provider *gophercloud.ProviderClient, opts *gopher } func (i *LoadBalancerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { - i.provider.Context = ctx - err := openstack.Authenticate(i.provider, *i.authOpts) + err := openstack.Authenticate(ctx, i.provider, *i.authOpts) if err != nil { return nil, fmt.Errorf("could not authenticate to OpenStack: %w", err) } @@ -87,7 +86,7 @@ func (i *LoadBalancerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Gro return nil, fmt.Errorf("could not create OpenStack network session: %w", err) } - allPages, err := loadbalancers.List(client, loadbalancers.ListOpts{}).AllPages() + allPages, err := loadbalancers.List(client, loadbalancers.ListOpts{}).AllPages(ctx) if err != nil { return nil, fmt.Errorf("failed to list load balancers: %w", err) } @@ -98,7 +97,7 @@ func (i *LoadBalancerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Gro } // Fetch all listeners in one API call - listenerPages, err := listeners.List(client, listeners.ListOpts{}).AllPages() + listenerPages, err := listeners.List(client, listeners.ListOpts{}).AllPages(ctx) if err != nil { return nil, fmt.Errorf("failed to list all listeners: %w", err) } @@ -118,7 +117,7 @@ func (i *LoadBalancerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Gro } // Fetch all floating IPs - fipPages, err := floatingips.List(networkClient, floatingips.ListOpts{}).AllPages() + fipPages, err := floatingips.List(networkClient, floatingips.ListOpts{}).AllPages(ctx) if err != nil { return nil, fmt.Errorf("failed to list all fips: %w", err) } diff --git a/discovery/openstack/openstack.go b/discovery/openstack/openstack.go index e1dba211cf..eb1d4d5a4d 100644 --- a/discovery/openstack/openstack.go +++ b/discovery/openstack/openstack.go @@ -21,8 +21,8 @@ import ( "net/http" "time" - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/v2" + "github.com/gophercloud/gophercloud/v2/openstack" "github.com/mwitkow/go-conntrack" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" diff --git a/go.mod b/go.mod index 0cbb74a454..4cb460fc15 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad github.com/google/uuid v1.6.0 - github.com/gophercloud/gophercloud v1.14.1 + github.com/gophercloud/gophercloud/v2 v2.4.0 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.31.0 diff --git a/go.sum b/go.sum index 879fe3f124..4e7cae9e01 100644 --- a/go.sum +++ b/go.sum @@ -217,8 +217,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gT github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= -github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw= -github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud/v2 v2.4.0 h1:XhP5tVEH3ni66NSNK1+0iSO6kaGPH/6srtx6Cr+8eCg= +github.com/gophercloud/gophercloud/v2 v2.4.0/go.mod h1:uJWNpTgJPSl2gyzJqcU/pIAhFUWvIkp8eE8M15n9rs4= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= @@ -579,7 +579,6 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -607,7 +606,6 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -642,8 +640,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From 0aaa9cb9f8aea0212e3d9c55cb7cfec1e19b4d78 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 15:11:02 +0100 Subject: [PATCH 1305/1397] chore(deps): bump github.com/linode/linodego from 1.44.0 to 1.46.0 (#15888) Bumps [github.com/linode/linodego](https://github.com/linode/linodego) from 1.44.0 to 1.46.0. - [Release notes](https://github.com/linode/linodego/releases) - [Commits](https://github.com/linode/linodego/compare/v1.44.0...v1.46.0) --- updated-dependencies: - dependency-name: github.com/linode/linodego dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 0cbb74a454..e55d57e7bf 100644 --- a/go.mod +++ b/go.mod @@ -41,7 +41,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.44.0 + github.com/linode/linodego v1.46.0 github.com/miekg/dns v1.1.62 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f @@ -132,7 +132,7 @@ require ( github.com/go-openapi/spec v0.20.14 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/validate v0.23.0 // indirect - github.com/go-resty/resty/v2 v2.16.2 // indirect + github.com/go-resty/resty/v2 v2.16.3 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/glog v1.2.2 // indirect diff --git a/go.sum b/go.sum index 879fe3f124..5b3733ec05 100644 --- a/go.sum +++ b/go.sum @@ -160,8 +160,8 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= -github.com/go-resty/resty/v2 v2.16.2 h1:CpRqTjIzq/rweXUt9+GxzzQdlkqMdt8Lm/fuK/CAbAg= -github.com/go-resty/resty/v2 v2.16.2/go.mod h1:0fHAoK7JoBy/Ch36N8VFeMsK7xQOHhvWaC3iOktwmIU= +github.com/go-resty/resty/v2 v2.16.3 h1:zacNT7lt4b8M/io2Ahj6yPypL7bqx9n1iprfQuodV+E= +github.com/go-resty/resty/v2 v2.16.3/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -325,8 +325,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linode/linodego v1.44.0 h1:JZLLWzCAx3CmHSV9NmCoXisuqKtrmPhfY9MrgvaHMUY= -github.com/linode/linodego v1.44.0/go.mod h1:umdoNOmtbqAdGQbmQnPFZ2YS4US+/mU/1bA7MjoKAvg= +github.com/linode/linodego v1.46.0 h1:+uOG4SD2MIrhbrLrvOD5HrbdLN3D19Wgn3MgdUNQjeU= +github.com/linode/linodego v1.46.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= From d5a93ed0db5b85b90c45b3f3ee9e3036de3f3843 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 15:12:02 +0100 Subject: [PATCH 1306/1397] chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity (#15882) Bumps [github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://github.com/Azure/azure-sdk-for-go) from 1.8.0 to 1.8.1. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.8.0...sdk/azidentity/v1.8.1) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azidentity dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 20 ++++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/go.mod b/go.mod index e55d57e7bf..c289bbe1c3 100644 --- a/go.mod +++ b/go.mod @@ -5,8 +5,8 @@ go 1.22.7 toolchain go1.23.4 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 github.com/Code-Hex/go-generics-cache v1.5.1 @@ -102,7 +102,7 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect cloud.google.com/go/compute/metadata v0.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect diff --git a/go.sum b/go.sum index 5b3733ec05..575d03de93 100644 --- a/go.sum +++ b/go.sum @@ -6,12 +6,12 @@ cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyT cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 h1:1mvYtZfWQAnwNah/C+Z+Jb9rQH95LPE2vlmMuWAHJk8= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1/go.mod h1:75I/mXtme1JyWFtz8GocPHVFyH421IBoZErnO16dd0k= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1 h1:Bk5uOhSAenHyR5P61D/NzeQCv+4fEVV8mOkJ82NqpWw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1/go.mod h1:QZ4pw3or1WPmRBxf0cHd1tknzrT54WPBOQoGutCPvSU= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= @@ -26,8 +26,8 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOEl github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= @@ -459,8 +459,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/sigv4 v0.1.1 h1:UJxjOqVcXctZlwDjpUpZ2OiMWJdFijgSofwLzO1Xk0Q= github.com/prometheus/sigv4 v0.1.1/go.mod h1:RAmWVKqx0bwi0Qm4lrKMXFM0nhpesBcenfCtz9qRyH8= -github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= -github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= +github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= +github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= From 319211372401bfbeb4c6bd8061d749ea87edb37d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 15:12:50 +0100 Subject: [PATCH 1307/1397] chore(deps): bump github.com/envoyproxy/protoc-gen-validate (#15877) Bumps [github.com/envoyproxy/protoc-gen-validate](https://github.com/envoyproxy/protoc-gen-validate) from 1.1.0 to 1.2.1. - [Release notes](https://github.com/envoyproxy/protoc-gen-validate/releases) - [Changelog](https://github.com/bufbuild/protoc-gen-validate/blob/main/.goreleaser.yaml) - [Commits](https://github.com/envoyproxy/protoc-gen-validate/compare/v1.1.0...v1.2.1) --- updated-dependencies: - dependency-name: github.com/envoyproxy/protoc-gen-validate dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c289bbe1c3..fb0a57cd60 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/docker/docker v27.4.1+incompatible github.com/edsrzf/mmap-go v1.2.0 github.com/envoyproxy/go-control-plane/envoy v1.32.2 - github.com/envoyproxy/protoc-gen-validate v1.1.0 + github.com/envoyproxy/protoc-gen-validate v1.2.1 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/fsnotify/fsnotify v1.8.0 github.com/go-openapi/strfmt v0.23.0 diff --git a/go.sum b/go.sum index 575d03de93..548a72924d 100644 --- a/go.sum +++ b/go.sum @@ -113,8 +113,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane/envoy v1.32.2 h1:zidqwmijfcbyKqVxjQDFx042PgX+p9U+/fu/f9VtSk8= github.com/envoyproxy/go-control-plane/envoy v1.32.2/go.mod h1:eR2SOX2IedqlPvmiKjUH7Wu//S602JKI7HPC/L3SRq8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= -github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= From c242a53f539cc171bad901404d6711304c1d2002 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 15:03:52 +0000 Subject: [PATCH 1308/1397] chore(deps): bump github.com/miekg/dns from 1.1.62 to 1.1.63 Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.62 to 1.1.63. - [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release) - [Commits](https://github.com/miekg/dns/compare/v1.1.62...v1.1.63) --- updated-dependencies: - dependency-name: github.com/miekg/dns dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fb0a57cd60..66c8dfae61 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/klauspost/compress v1.17.11 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/linode/linodego v1.46.0 - github.com/miekg/dns v1.1.62 + github.com/miekg/dns v1.1.63 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 diff --git a/go.sum b/go.sum index 548a72924d..3afebd3cb1 100644 --- a/go.sum +++ b/go.sum @@ -353,8 +353,8 @@ github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= -github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= +github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY= +github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= From 9f9f4f059747f9dcaf17ca631fb026b3e4196676 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 16:19:10 +0100 Subject: [PATCH 1309/1397] chore(deps): bump @fortawesome/fontawesome-svg-core from 6.5.2 to 6.7.2 in /web/ui/react-app (#15755) * chore(deps): bump @fortawesome/fontawesome-svg-core in /web/ui/react-app Bumps [@fortawesome/fontawesome-svg-core](https://github.com/FortAwesome/Font-Awesome) from 6.5.2 to 6.7.2. - [Release notes](https://github.com/FortAwesome/Font-Awesome/releases) - [Changelog](https://github.com/FortAwesome/Font-Awesome/blob/6.x/CHANGELOG.md) - [Commits](https://github.com/FortAwesome/Font-Awesome/compare/6.5.2...6.7.2) --- updated-dependencies: - dependency-name: "@fortawesome/fontawesome-svg-core" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * chore(deps): bump @fortawesome/free-solid-svg-icons in /web/ui/react-app Signed-off-by: SuperQ --------- Signed-off-by: dependabot[bot] Signed-off-by: SuperQ Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: SuperQ --- web/ui/react-app/package-lock.json | 32 ++++++++++++------------------ web/ui/react-app/package.json | 4 ++-- 2 files changed, 15 insertions(+), 21 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 1d21be832a..2b4dcfea61 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -16,8 +16,8 @@ "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.36.1", "@forevolve/bootstrap-dark": "^4.0.2", - "@fortawesome/fontawesome-svg-core": "6.5.2", - "@fortawesome/free-solid-svg-icons": "6.5.2", + "@fortawesome/fontawesome-svg-core": "6.7.2", + "@fortawesome/free-solid-svg-icons": "6.7.2", "@fortawesome/react-fontawesome": "0.2.2", "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.1", @@ -2732,36 +2732,30 @@ } }, "node_modules/@fortawesome/fontawesome-common-types": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.5.2.tgz", - "integrity": "sha512-gBxPg3aVO6J0kpfHNILc+NMhXnqHumFxOmjYCFfOiLZfwhnnfhtsdA2hfJlDnj+8PjAs6kKQPenOTKj3Rf7zHw==", - "hasInstallScript": true, - "license": "MIT", + "version": "6.7.2", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.7.2.tgz", + "integrity": "sha512-Zs+YeHUC5fkt7Mg1l6XTniei3k4bwG/yo3iFUtZWd/pMx9g3fdvkSK9E0FOC+++phXOka78uJcYb8JaFkW52Xg==", "engines": { "node": ">=6" } }, "node_modules/@fortawesome/fontawesome-svg-core": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.5.2.tgz", - "integrity": "sha512-5CdaCBGl8Rh9ohNdxeeTMxIj8oc3KNBgIeLMvJosBMdslK/UnEB8rzyDRrbKdL1kDweqBPo4GT9wvnakHWucZw==", - "hasInstallScript": true, - "license": "MIT", + "version": "6.7.2", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.7.2.tgz", + "integrity": "sha512-yxtOBWDrdi5DD5o1pmVdq3WMCvnobT0LU6R8RyyVXPvFRd2o79/0NCuQoCjNTeZz9EzA9xS3JxNWfv54RIHFEA==", "dependencies": { - "@fortawesome/fontawesome-common-types": "6.5.2" + "@fortawesome/fontawesome-common-types": "6.7.2" }, "engines": { "node": ">=6" } }, "node_modules/@fortawesome/free-solid-svg-icons": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.5.2.tgz", - "integrity": "sha512-QWFZYXFE7O1Gr1dTIp+D6UcFUF0qElOnZptpi7PBUMylJh+vFmIedVe1Ir6RM1t2tEQLLSV1k7bR4o92M+uqlw==", - "hasInstallScript": true, - "license": "(CC-BY-4.0 AND MIT)", + "version": "6.7.2", + "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.7.2.tgz", + "integrity": "sha512-GsBrnOzU8uj0LECDfD5zomZJIjrPhIlWU82AHwa2s40FKH+kcxQaBvBo3Z4TxyZHIyX8XTDxsyA33/Vx9eFuQA==", "dependencies": { - "@fortawesome/fontawesome-common-types": "6.5.2" + "@fortawesome/fontawesome-common-types": "6.7.2" }, "engines": { "node": ">=6" diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 34e63f395d..232f229a3b 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -11,8 +11,8 @@ "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.36.1", "@forevolve/bootstrap-dark": "^4.0.2", - "@fortawesome/fontawesome-svg-core": "6.5.2", - "@fortawesome/free-solid-svg-icons": "6.5.2", + "@fortawesome/fontawesome-svg-core": "6.7.2", + "@fortawesome/free-solid-svg-icons": "6.7.2", "@fortawesome/react-fontawesome": "0.2.2", "@lezer/common": "^1.2.3", "@lezer/highlight": "^1.2.1", From e0a6f2224d90db914e9885f68c7f95b5daf43f8e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 17:32:19 +0100 Subject: [PATCH 1310/1397] chore(deps): bump google.golang.org/api from 0.216.0 to 0.218.0 (#15885) Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.216.0 to 0.218.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.216.0...v0.218.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 555d494ae3..ae286c24e1 100644 --- a/go.mod +++ b/go.mod @@ -84,7 +84,7 @@ require ( golang.org/x/sys v0.29.0 golang.org/x/text v0.21.0 golang.org/x/tools v0.28.0 - google.golang.org/api v0.216.0 + google.golang.org/api v0.218.0 google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f google.golang.org/grpc v1.69.4 google.golang.org/protobuf v1.36.3 @@ -98,8 +98,8 @@ require ( ) require ( - cloud.google.com/go/auth v0.13.0 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/auth v0.14.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect cloud.google.com/go/compute/metadata v0.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect @@ -140,7 +140,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.8 // indirect + github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.14.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect diff --git a/go.sum b/go.sum index acdaa5698b..925d0efe0e 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,9 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= -cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= -cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= -cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/auth v0.14.0 h1:A5C4dKV/Spdvxcl0ggWwWEzzP7AZMJSEIgrkngwhGYM= +cloud.google.com/go/auth v0.14.0/go.mod h1:CYsoRL1PdiDuqeQpZE0bP2pnPrGqFcOkI0nldEQis+A= +cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M= +cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc= cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= @@ -208,8 +208,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= -github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -678,8 +678,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.216.0 h1:xnEHy+xWFrtYInWPy8OdGFsyIfWJjtVnO39g7pz2BFY= -google.golang.org/api v0.216.0/go.mod h1:K9wzQMvWi47Z9IU7OgdOofvZuw75Ge3PPITImZR/UyI= +google.golang.org/api v0.218.0 h1:x6JCjEWeZ9PFCRe9z0FBrNwj7pB7DOAqT35N+IPnAUA= +google.golang.org/api v0.218.0/go.mod h1:5VGHBAkxrA/8EFjLVEYmMUJ8/8+gWWQ3s4cFH0FxG2M= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= From 4aebe745b902d721f6b9a5f93ed5317248eecdcc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 17:32:45 +0100 Subject: [PATCH 1311/1397] chore(deps): bump google.golang.org/grpc from 1.69.4 to 1.70.0 (#15883) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.69.4 to 1.70.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.69.4...v1.70.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index ae286c24e1..46b238d8a6 100644 --- a/go.mod +++ b/go.mod @@ -86,7 +86,7 @@ require ( golang.org/x/tools v0.28.0 google.golang.org/api v0.218.0 google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f - google.golang.org/grpc v1.69.4 + google.golang.org/grpc v1.70.0 google.golang.org/protobuf v1.36.3 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -135,7 +135,7 @@ require ( github.com/go-resty/resty/v2 v2.16.3 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/glog v1.2.2 // indirect + github.com/golang/glog v1.2.3 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect diff --git a/go.sum b/go.sum index 925d0efe0e..dbacaca50e 100644 --- a/go.sum +++ b/go.sum @@ -177,8 +177,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= -github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.3 h1:oDTdz9f5VGVVNGu/Q7UXKWYsD0873HXLHdJUNBsSEKM= +github.com/golang/glog v1.2.3/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -694,8 +694,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= -google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= +google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= From 172028c71d8e3173c4460f5c1b75ffc42bbfc115 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 17:33:10 +0100 Subject: [PATCH 1312/1397] chore(deps): bump github.com/aws/aws-sdk-go from 1.55.5 to 1.55.6 (#15884) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.55.5 to 1.55.6. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG_PENDING.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.55.5...v1.55.6) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 46b238d8a6..76582f8051 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/KimMachineGun/automemlimit v0.6.1 github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b - github.com/aws/aws-sdk-go v1.55.5 + github.com/aws/aws-sdk-go v1.55.6 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 diff --git a/go.sum b/go.sum index dbacaca50e..db06f12ca9 100644 --- a/go.sum +++ b/go.sum @@ -53,8 +53,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= -github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk= +github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= From 5d8af4e5fbac77e89caa73248de10de48c2168a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 17:33:38 +0100 Subject: [PATCH 1313/1397] chore(deps): bump github.com/envoyproxy/go-control-plane/envoy (#15880) Bumps [github.com/envoyproxy/go-control-plane/envoy](https://github.com/envoyproxy/go-control-plane) from 1.32.2 to 1.32.3. - [Release notes](https://github.com/envoyproxy/go-control-plane/releases) - [Changelog](https://github.com/envoyproxy/go-control-plane/blob/main/CHANGELOG.md) - [Commits](https://github.com/envoyproxy/go-control-plane/compare/envoy/v1.32.2...envoy/v1.32.3) --- updated-dependencies: - dependency-name: github.com/envoyproxy/go-control-plane/envoy dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 76582f8051..8569f86642 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/digitalocean/godo v1.132.0 github.com/docker/docker v27.4.1+incompatible github.com/edsrzf/mmap-go v1.2.0 - github.com/envoyproxy/go-control-plane/envoy v1.32.2 + github.com/envoyproxy/go-control-plane/envoy v1.32.3 github.com/envoyproxy/protoc-gen-validate v1.2.1 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/fsnotify/fsnotify v1.8.0 diff --git a/go.sum b/go.sum index db06f12ca9..678fb9d9a3 100644 --- a/go.sum +++ b/go.sum @@ -110,8 +110,8 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane/envoy v1.32.2 h1:zidqwmijfcbyKqVxjQDFx042PgX+p9U+/fu/f9VtSk8= -github.com/envoyproxy/go-control-plane/envoy v1.32.2/go.mod h1:eR2SOX2IedqlPvmiKjUH7Wu//S602JKI7HPC/L3SRq8= +github.com/envoyproxy/go-control-plane/envoy v1.32.3 h1:hVEaommgvzTjTd4xCaFd+kEQ2iYBtGxP6luyLrx6uOk= +github.com/envoyproxy/go-control-plane/envoy v1.32.3/go.mod h1:F6hWupPfh75TBXGKA++MCT/CZHFq5r9/uwt/kQYkZfE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= From a36c73c945d1fab2f4ec34f271bfc08f8d2caa87 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 17:34:26 +0100 Subject: [PATCH 1314/1397] chore(deps): bump github.com/KimMachineGun/automemlimit (#15891) Bumps [github.com/KimMachineGun/automemlimit](https://github.com/KimMachineGun/automemlimit) from 0.6.1 to 0.7.0. - [Release notes](https://github.com/KimMachineGun/automemlimit/releases) - [Commits](https://github.com/KimMachineGun/automemlimit/compare/v0.6.1...v0.7.0) --- updated-dependencies: - dependency-name: github.com/KimMachineGun/automemlimit dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 7 +------ go.sum | 15 ++------------- 2 files changed, 3 insertions(+), 19 deletions(-) diff --git a/go.mod b/go.mod index 8569f86642..e232e85cfa 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 github.com/Code-Hex/go-generics-cache v1.5.1 - github.com/KimMachineGun/automemlimit v0.6.1 + github.com/KimMachineGun/automemlimit v0.7.0 github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b github.com/aws/aws-sdk-go v1.55.6 @@ -108,9 +108,7 @@ require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cilium/ebpf v0.11.0 // indirect github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect - github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -133,7 +131,6 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/validate v0.23.0 // indirect github.com/go-resty/resty/v2 v2.16.3 // indirect - github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/glog v1.2.3 // indirect github.com/golang/protobuf v1.5.4 // indirect @@ -177,14 +174,12 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect - github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/x448/float16 v0.8.4 // indirect diff --git a/go.sum b/go.sum index 678fb9d9a3..bd81e8b557 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8= -github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY= +github.com/KimMachineGun/automemlimit v0.7.0 h1:7G06p/dMSf7G8E6oq+f2uOPuVncFyIlDI/pBWK49u88= +github.com/KimMachineGun/automemlimit v0.7.0/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= @@ -68,16 +68,12 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y= -github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= -github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -124,8 +120,6 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= -github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -169,7 +163,6 @@ github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIx github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= -github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -407,8 +400,6 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -489,7 +480,6 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -644,7 +634,6 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From 95fe7fc811e9c9ec8266b893f96a4b0afcada363 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 17:35:02 +0100 Subject: [PATCH 1315/1397] chore(deps): bump github.com/hetznercloud/hcloud-go/v2 (#15881) Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.17.1 to 2.18.0. - [Release notes](https://github.com/hetznercloud/hcloud-go/releases) - [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.17.1...v2.18.0) --- updated-dependencies: - dependency-name: github.com/hetznercloud/hcloud-go/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e232e85cfa..166408cdca 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.31.0 github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec - github.com/hetznercloud/hcloud-go/v2 v2.17.1 + github.com/hetznercloud/hcloud-go/v2 v2.18.0 github.com/ionos-cloud/sdk-go/v6 v6.3.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 diff --git a/go.sum b/go.sum index bd81e8b557..cb7e08f34e 100644 --- a/go.sum +++ b/go.sum @@ -270,8 +270,8 @@ github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977Vrm github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.17.1 h1:DPi019dv0WCiECEmtcuTgc//hBvnxESb6QlJnAb4a04= -github.com/hetznercloud/hcloud-go/v2 v2.17.1/go.mod h1:6ygmBba+FdawR2lLp/d9uJljY2k0dTYthprrI8usdLw= +github.com/hetznercloud/hcloud-go/v2 v2.18.0 h1:BemrVGeWI8Kn/pvaC1jBsHZxQMnRqOydS7Ju4BERB4Q= +github.com/hetznercloud/hcloud-go/v2 v2.18.0/go.mod h1:r5RTzv+qi8IbLcDIskTzxkFIji7Ovc8yNgepQR9M+UA= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/ionos-cloud/sdk-go/v6 v6.3.0 h1:/lTieTH9Mo/CWm3cTlFLnK10jgxjUGkAqRffGqvPteY= From ccdc108f09ab072adfd8a7d5d3009227c17ece19 Mon Sep 17 00:00:00 2001 From: SuperQ Date: Tue, 28 Jan 2025 17:38:45 +0100 Subject: [PATCH 1316/1397] Apply npm audit fix to react-app Apply package-lock.json changes from `npm audit fix` in order to reduce vulnerability reports. Signed-off-by: SuperQ --- web/ui/react-app/package-lock.json | 49 ++++++++++++++++-------------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 2b4dcfea61..6f5eb5aff2 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -7892,9 +7892,9 @@ } }, "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "dev": true, "dependencies": { "path-key": "^3.1.0", @@ -10171,9 +10171,9 @@ } }, "node_modules/express": { - "version": "4.21.1", - "resolved": "https://registry.npmjs.org/express/-/express-4.21.1.tgz", - "integrity": "sha512-YSFlK1Ee0/GC8QaO91tHcDxJiE/X4FbpAyQWkxAvG6AXCuR65YzK8ua6D9hvi/TzUfZMpc+BwuM1IPw8fmQBiQ==", + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", + "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==", "dev": true, "dependencies": { "accepts": "~1.3.8", @@ -10195,7 +10195,7 @@ "methods": "~1.1.2", "on-finished": "2.4.1", "parseurl": "~1.3.3", - "path-to-regexp": "0.1.10", + "path-to-regexp": "0.1.12", "proxy-addr": "~2.0.7", "qs": "6.13.0", "range-parser": "~1.2.1", @@ -10210,6 +10210,10 @@ }, "engines": { "node": ">= 0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/express/node_modules/debug": { @@ -10228,9 +10232,9 @@ "dev": true }, "node_modules/express/node_modules/path-to-regexp": { - "version": "0.1.10", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", - "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==", + "version": "0.1.12", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", + "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", "dev": true }, "node_modules/fast-deep-equal": { @@ -17101,9 +17105,9 @@ } }, "node_modules/path-to-regexp": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", - "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.9.0.tgz", + "integrity": "sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==", "dependencies": { "isarray": "0.0.1" } @@ -23614,11 +23618,10 @@ } }, "node_modules/webpack-dev-server/node_modules/http-proxy-middleware": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", - "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz", + "integrity": "sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA==", "dev": true, - "license": "MIT", "dependencies": { "@types/http-proxy": "^1.17.8", "http-proxy": "^1.18.1", @@ -23664,9 +23667,9 @@ } }, "node_modules/webpack-dev-server/node_modules/ws": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.16.0.tgz", - "integrity": "sha512-HS0c//TP7Ina87TfiPUz1rQzMhHrl/SG2guqRcTOIUYD2q8uhUdNHZYJUaQ8aTGPzCh+c6oawMKW35nFl1dxyQ==", + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz", + "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==", "dev": true, "engines": { "node": ">=10.0.0" @@ -24373,9 +24376,9 @@ } }, "node_modules/ws": { - "version": "7.5.9", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.9.tgz", - "integrity": "sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q==", + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", "dev": true, "engines": { "node": ">=8.3.0" From 6c1162c03e2a05bb79ad583c685a4e7ffaf21b8d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 18:54:41 +0100 Subject: [PATCH 1317/1397] chore(deps): bump google.golang.org/protobuf from 1.36.1 to 1.36.4 (#15874) Bumps google.golang.org/protobuf from 1.36.1 to 1.36.4. --- updated-dependencies: - dependency-name: google.golang.org/protobuf dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 166408cdca..9492818f10 100644 --- a/go.mod +++ b/go.mod @@ -87,7 +87,7 @@ require ( google.golang.org/api v0.218.0 google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f google.golang.org/grpc v1.70.0 - google.golang.org/protobuf v1.36.3 + google.golang.org/protobuf v1.36.4 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.3 diff --git a/go.sum b/go.sum index cb7e08f34e..a649e84958 100644 --- a/go.sum +++ b/go.sum @@ -685,8 +685,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= -google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= -google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= +google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From bd55cdcf8f6764d2025842e8a87f0425547b3db9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 18:55:14 +0100 Subject: [PATCH 1318/1397] chore(deps): bump github.com/ionos-cloud/sdk-go/v6 from 6.3.0 to 6.3.2 (#15879) Bumps [github.com/ionos-cloud/sdk-go/v6](https://github.com/ionos-cloud/sdk-go) from 6.3.0 to 6.3.2. - [Release notes](https://github.com/ionos-cloud/sdk-go/releases) - [Changelog](https://github.com/ionos-cloud/sdk-go/blob/master/docs/CHANGELOG.md) - [Commits](https://github.com/ionos-cloud/sdk-go/compare/v6.3.0...v6.3.2) --- updated-dependencies: - dependency-name: github.com/ionos-cloud/sdk-go/v6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9492818f10..a2fae4991f 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( github.com/hashicorp/consul/api v1.31.0 github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec github.com/hetznercloud/hcloud-go/v2 v2.18.0 - github.com/ionos-cloud/sdk-go/v6 v6.3.0 + github.com/ionos-cloud/sdk-go/v6 v6.3.2 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b diff --git a/go.sum b/go.sum index a649e84958..29422c6e70 100644 --- a/go.sum +++ b/go.sum @@ -274,8 +274,8 @@ github.com/hetznercloud/hcloud-go/v2 v2.18.0 h1:BemrVGeWI8Kn/pvaC1jBsHZxQMnRqOyd github.com/hetznercloud/hcloud-go/v2 v2.18.0/go.mod h1:r5RTzv+qi8IbLcDIskTzxkFIji7Ovc8yNgepQR9M+UA= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/ionos-cloud/sdk-go/v6 v6.3.0 h1:/lTieTH9Mo/CWm3cTlFLnK10jgxjUGkAqRffGqvPteY= -github.com/ionos-cloud/sdk-go/v6 v6.3.0/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= +github.com/ionos-cloud/sdk-go/v6 v6.3.2 h1:2mUmrZZz6cPyT9IRX0T8fBLc/7XU/eTxP2Y5tS7/09k= +github.com/ionos-cloud/sdk-go/v6 v6.3.2/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= From 007d1115e1a4bbab27af93de1c1cf4448637da9e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 18:55:39 +0100 Subject: [PATCH 1319/1397] chore(deps): bump golang.org/x/tools from 0.28.0 to 0.29.0 (#15876) Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.28.0 to 0.29.0. - [Release notes](https://github.com/golang/tools/releases) - [Commits](https://github.com/golang/tools/compare/v0.28.0...v0.29.0) --- updated-dependencies: - dependency-name: golang.org/x/tools dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a2fae4991f..a45bcdee0f 100644 --- a/go.mod +++ b/go.mod @@ -83,7 +83,7 @@ require ( golang.org/x/sync v0.10.0 golang.org/x/sys v0.29.0 golang.org/x/text v0.21.0 - golang.org/x/tools v0.28.0 + golang.org/x/tools v0.29.0 google.golang.org/api v0.218.0 google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f google.golang.org/grpc v1.70.0 diff --git a/go.sum b/go.sum index 29422c6e70..107ef83613 100644 --- a/go.sum +++ b/go.sum @@ -661,8 +661,8 @@ golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= -golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From e87e308f9d4a7e483f1f4cebc713d5de56334e6c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 20:11:23 +0100 Subject: [PATCH 1320/1397] chore(deps): bump github.com/prometheus/alertmanager (#15878) Bumps [github.com/prometheus/alertmanager](https://github.com/prometheus/alertmanager) from 0.27.0 to 0.28.0. - [Release notes](https://github.com/prometheus/alertmanager/releases) - [Changelog](https://github.com/prometheus/alertmanager/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/alertmanager/compare/v0.27.0...v0.28.0) --- updated-dependencies: - dependency-name: github.com/prometheus/alertmanager dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 12 ++++++------ go.sum | 33 ++++++++++++++++++--------------- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index a45bcdee0f..fbbeaf19e3 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( github.com/oklog/ulid v1.3.1 github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 github.com/ovh/go-ovh v1.6.0 - github.com/prometheus/alertmanager v0.27.0 + github.com/prometheus/alertmanager v0.28.0 github.com/prometheus/client_golang v1.21.0-rc.0 github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.62.0 @@ -122,14 +122,14 @@ require ( github.com/ghodss/yaml v1.0.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.22.2 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect github.com/go-openapi/errors v0.22.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.20.4 // indirect - github.com/go-openapi/loads v0.21.5 // indirect - github.com/go-openapi/spec v0.20.14 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect - github.com/go-openapi/validate v0.23.0 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/go-resty/resty/v2 v2.16.3 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/glog v1.2.3 // indirect diff --git a/go.sum b/go.sum index 107ef83613..1e70e07ac2 100644 --- a/go.sum +++ b/go.sum @@ -136,24 +136,24 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0= -github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= -github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= -github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= -github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8= -github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do= -github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= -github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-resty/resty/v2 v2.16.3 h1:zacNT7lt4b8M/io2Ahj6yPypL7bqx9n1iprfQuodV+E= github.com/go-resty/resty/v2 v2.16.3/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -240,6 +240,8 @@ github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack/v2 v2.1.1 h1:xQEY9yB2wnHitoSzk/B9UjXWRQ67QKu5AOm8aFp8N3I= +github.com/hashicorp/go-msgpack/v2 v2.1.1/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -250,8 +252,8 @@ github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFO github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I= -github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -264,8 +266,9 @@ github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/memberlist v0.5.1 h1:mk5dRuzeDNis2bi6LLoQIXfMH7JQvAzt3mQD0vNZZUo= +github.com/hashicorp/memberlist v0.5.1/go.mod h1:zGDXV6AqbDTKTM6yxW0I4+JtFzZAJVoIPvss4hV8F24= github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977VrmffaCX/OBm17dEVJUcWn5dW+eqs3aIJ/A= github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= @@ -422,8 +425,8 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I= -github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE= +github.com/prometheus/alertmanager v0.28.0 h1:sLN+6HhZet8hrbmGHLAHWsTXgZSVCvq9Ix3U3wvivqc= +github.com/prometheus/alertmanager v0.28.0/go.mod h1:/okSnb2LlodbMlRoOWQEKtqI/coOo2NKZDm2Hu9QHLQ= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= From 2581c7d057d64564477507f7118a518cca2457f9 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 29 Jan 2025 14:47:44 +0100 Subject: [PATCH 1321/1397] promql: fix rate calculation with a counter reset after the 1st histogram If a rate (or increase) is calculated on native histograms, and there is a counter reset between the 1st and 2nd histogram, we never have to touch the 1st histogram, so it doesn't even matter if it has an incompatible bucket layout. So we should not error out in that case. This simply nulls out the 1st histogram in that case. Signed-off-by: beorn7 --- promql/functions.go | 49 ++++++++++------ .../testdata/native_histograms.test | 57 +++++++++++++++---- 2 files changed, 77 insertions(+), 29 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index 605661e5a0..ba5b32b737 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -187,35 +187,48 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod // not a histogram, and a warning wrapped in an annotation in that case. // Otherwise, it returns the calculated histogram and an empty annotation. func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) { - prev := points[0].H - usingCustomBuckets := prev.UsesCustomBuckets() - last := points[len(points)-1].H + var ( + prev = points[0].H + usingCustomBuckets = prev.UsesCustomBuckets() + last = points[len(points)-1].H + annos annotations.Annotations + ) + if last == nil { - return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) + return nil, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) } - minSchema := prev.Schema - if last.Schema < minSchema { - minSchema = last.Schema + // We check for gauge type histograms in the loop below, but the loop + // below does not run on the first and last point, so check the first + // and last point now. + if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) { + annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos)) + } + + // Null out the 1st sample if there is a counter reset between the 1st + // and 2nd. In this case, we want to ignore any incompatibility in the + // bucket layout of the 1st sample because we do not need to look at it. + if isCounter && len(points) > 1 { + second := points[1].H + if second != nil && second.DetectReset(prev) { + prev = &histogram.FloatHistogram{} + prev.Schema = second.Schema + prev.CustomValues = second.CustomValues + usingCustomBuckets = second.UsesCustomBuckets() + } } if last.UsesCustomBuckets() != usingCustomBuckets { - return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) - } - - var annos annotations.Annotations - - // We check for gauge type histograms in the loop below, but the loop below does not run on the first and last point, - // so check the first and last point now. - if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) { - annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos)) + return nil, annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) } // First iteration to find out two things: // - What's the smallest relevant schema? // - Are all data points histograms? - // TODO(beorn7): Find a way to check that earlier, e.g. by handing in a - // []FloatPoint and a []HistogramPoint separately. + minSchema := prev.Schema + if last.Schema < minSchema { + minSchema = last.Schema + } for _, currPoint := range points[1 : len(points)-1] { curr := currPoint.H if curr == nil { diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index f03b39a9f6..dd119c0617 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -1013,7 +1013,7 @@ eval instant at 5m sum(custom_buckets_histogram) clear -# Test 'this native histogram metric is not a gauge' warning for rate +# Test 'this native histogram metric is not a counter' warning for rate load 30s some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}} @@ -1022,7 +1022,7 @@ eval_warn instant at 30s rate(some_metric[1m]) {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} # Test the case where we have more than two points for rate -eval_warn instant at 1m rate(some_metric[1m]) +eval_warn instant at 1m rate(some_metric[1m30s]) {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} clear @@ -1032,20 +1032,20 @@ load 30s some_metric {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} # Start and end with exponential, with custom in the middle. -eval_warn instant at 1m rate(some_metric[1m]) +eval_warn instant at 1m rate(some_metric[1m30s]) # Should produce no results. # Start and end with custom, with exponential in the middle. -eval_warn instant at 1m30s rate(some_metric[1m]) +eval_warn instant at 1m30s rate(some_metric[1m30s]) # Should produce no results. -# Start with custom, end with exponential. -eval_warn instant at 1m rate(some_metric[1m]) - # Should produce no results. +# Start with custom, end with exponential. Return the exponential histogram divided by 30. +eval instant at 1m rate(some_metric[1m]) + {} {{schema:0 sum:0.16666666666666666 count:0.13333333333333333 buckets:[0.03333333333333333 0.06666666666666667 0.03333333333333333]}} -# Start with exponential, end with custom. -eval_warn instant at 30s rate(some_metric[1m]) - # Should produce no results. +# Start with exponential, end with custom. Return the custom buckets histogram divided by 30. +eval instant at 30s rate(some_metric[1m]) + {} {{schema:-53 sum:0.03333333333333333 count:0.03333333333333333 custom_values:[5 10] buckets:[0.03333333333333333]}} clear @@ -1179,7 +1179,10 @@ eval_info range from 0 to 6m step 6m metric2 > metric2 clear load 6m - nhcb_metric {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + nhcb_metric {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +# If evaluating at 12m, the first two NHCBs have the same custom values +# while the 3rd one has different ones. eval_warn instant at 12m sum_over_time(nhcb_metric[13m]) @@ -1206,6 +1209,38 @@ eval_warn instant at 12m rate(nhcb_metric[13m]) eval instant at 12m resets(nhcb_metric[13m]) {} 1 +# Now doing the same again, but at 18m, where the first NHCB has +# different custom_values compared to the other two. This now +# works with no warning for increase() and rate(). No change +# otherwise. + +eval_warn instant at 18m sum_over_time(nhcb_metric[13m]) + +eval_warn instant at 18m avg_over_time(nhcb_metric[13m]) + +eval instant at 18m last_over_time(nhcb_metric[13m]) +nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +eval instant at 18m count_over_time(nhcb_metric[13m]) +{} 3 + +eval instant at 18m present_over_time(nhcb_metric[13m]) +{} 1 + +eval instant at 18m changes(nhcb_metric[13m]) +{} 1 + +eval_warn instant at 18m delta(nhcb_metric[13m]) + +eval instant at 18m increase(nhcb_metric[13m]) +{} {{schema:-53 count:1.0833333333333333 sum:1.0833333333333333 custom_values:[5 10] buckets:[1.0833333333333333]}} + +eval instant at 18m rate(nhcb_metric[13m]) +{} {{schema:-53 count:0.0013888888888888887 sum:0.0013888888888888887 custom_values:[5 10] buckets:[0.0013888888888888887]}} + +eval instant at 18m resets(nhcb_metric[13m]) +{} 1 + clear load 1m From 9f6c1d9cd3043b838d7c9e6b754c0e503b5982e5 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 29 Jan 2025 15:45:20 +0100 Subject: [PATCH 1322/1397] promqltest: Small formatting improvement for native histograms A `HistogramTestExpression` was missed in one case, which made the formatting weird in case a histogram got unexpectedly returned. Signed-off-by: beorn7 --- promql/promqltest/test.go | 4 ++-- promql/promqltest/test_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index 5e0d9083cb..e84eeebe6a 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -800,7 +800,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { fp := v.Metric.Hash() if _, ok := ev.metrics[fp]; !ok { if v.H != nil { - return fmt.Errorf("unexpected metric %s in result, has value %v", v.Metric, v.H) + return fmt.Errorf("unexpected metric %s in result, has value %s", v.Metric, HistogramTestExpression(v.H)) } return fmt.Errorf("unexpected metric %s in result, has value %v", v.Metric, v.F) @@ -838,7 +838,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { } exp0 := ev.expected[0].vals[0] if exp0.Histogram != nil { - return fmt.Errorf("expected histogram %v but got %s", exp0.Histogram.TestExpression(), val.String()) + return fmt.Errorf("expected histogram %s but got %s", exp0.Histogram.TestExpression(), val.String()) } if !almost.Equal(exp0.Value, val.V, defaultEpsilon) { return fmt.Errorf("expected scalar %v but got %v", exp0.Value, val.V) diff --git a/promql/promqltest/test_test.go b/promql/promqltest/test_test.go index 96499e869d..5122818438 100644 --- a/promql/promqltest/test_test.go +++ b/promql/promqltest/test_test.go @@ -247,7 +247,7 @@ load 5m eval instant at 0m testmetric `, - expectedError: `error in eval testmetric (line 5): unexpected metric {__name__="testmetric"} in result, has value {count:0, sum:0}`, + expectedError: `error in eval testmetric (line 5): unexpected metric {__name__="testmetric"} in result, has value {{}}`, }, "instant query, but result is missing a series": { input: testData + ` From cb3b17a14c162cce051ae944d06c2c1a742a9211 Mon Sep 17 00:00:00 2001 From: SuryaPrakash <119923001+suryaaprakassh@users.noreply.github.com> Date: Fri, 31 Jan 2025 20:02:20 +0530 Subject: [PATCH 1323/1397] fix: os.MkdirTemp with t.TempDir (#15860) Signed-off-by: Surya Prakash --- tsdb/tsdbutil/dir_locker_testutil.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/tsdb/tsdbutil/dir_locker_testutil.go b/tsdb/tsdbutil/dir_locker_testutil.go index 7228dbafed..5a335989c7 100644 --- a/tsdb/tsdbutil/dir_locker_testutil.go +++ b/tsdb/tsdbutil/dir_locker_testutil.go @@ -60,11 +60,7 @@ func TestDirLockerUsage(t *testing.T, open func(t *testing.T, data string, creat for _, c := range cases { t.Run(fmt.Sprintf("%+v", c), func(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "test") - require.NoError(t, err) - t.Cleanup(func() { - require.NoError(t, os.RemoveAll(tmpdir)) - }) + tmpdir := t.TempDir() // Test preconditions (file already exists + lockfile option) if c.fileAlreadyExists { @@ -82,7 +78,7 @@ func TestDirLockerUsage(t *testing.T, open func(t *testing.T, data string, creat // Check that the lockfile is always deleted if !c.lockFileDisabled { - _, err = os.Stat(locker.path) + _, err := os.Stat(locker.path) require.True(t, os.IsNotExist(err), "lockfile was not deleted") } }) From 130cd024e0f3faec1631e121c6305219eb874252 Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Sat, 1 Feb 2025 03:41:03 +0530 Subject: [PATCH 1324/1397] [FEATURE] PromQL: Implements `idelta` and `irate` with histograms (#15853) Add native histogram support to idelta and irate functions --------- Signed-off-by: Neeraj Gartia --- promql/functions.go | 103 ++++++++++++++++++---- promql/promqltest/testdata/functions.test | 56 ++++++++++++ 2 files changed, 142 insertions(+), 17 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index 605661e5a0..938eefdf00 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -286,46 +286,115 @@ func funcIncrease(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === irate(node parser.ValueTypeMatrix) (Vector, Annotations) === func funcIrate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return instantValue(vals, enh.Out, true) + return instantValue(vals, args, enh.Out, true) } // === idelta(node model.ValMatrix) (Vector, Annotations) === func funcIdelta(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return instantValue(vals, enh.Out, false) + return instantValue(vals, args, enh.Out, false) } -func instantValue(vals []parser.Value, out Vector, isRate bool) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] +func instantValue(vals []parser.Value, args parser.Expressions, out Vector, isRate bool) (Vector, annotations.Annotations) { + var ( + samples = vals[0].(Matrix)[0] + metricName = samples.Metric.Get(labels.MetricName) + ss = make([]Sample, 0, 2) + annos annotations.Annotations + ) + // No sense in trying to compute a rate without at least two points. Drop // this Vector element. // TODO: add RangeTooShortWarning - if len(samples.Floats) < 2 { + if len(samples.Floats)+len(samples.Histograms) < 2 { return out, nil } - lastSample := samples.Floats[len(samples.Floats)-1] - previousSample := samples.Floats[len(samples.Floats)-2] - - var resultValue float64 - if isRate && lastSample.F < previousSample.F { - // Counter reset. - resultValue = lastSample.F - } else { - resultValue = lastSample.F - previousSample.F + // Add the last 2 float samples if they exist. + for i := max(0, len(samples.Floats)-2); i < len(samples.Floats); i++ { + ss = append(ss, Sample{ + F: samples.Floats[i].F, + T: samples.Floats[i].T, + }) } - sampledInterval := lastSample.T - previousSample.T + // Add the last 2 histogram samples into their correct position if they exist. + for i := max(0, len(samples.Histograms)-2); i < len(samples.Histograms); i++ { + s := Sample{ + H: samples.Histograms[i].H, + T: samples.Histograms[i].T, + } + switch { + case len(ss) == 0: + ss = append(ss, s) + case len(ss) == 1: + if s.T < ss[0].T { + ss = append([]Sample{s}, ss...) + } else { + ss = append(ss, s) + } + case s.T < ss[0].T: + // s is older than 1st, so discard it. + case s.T > ss[1].T: + // s is newest, so add it as 2nd and make the old 2nd the new 1st. + ss[0] = ss[1] + ss[1] = s + default: + // In all other cases, we just make s the new 1st. + // This establishes a correct order, even in the (irregular) + // case of equal timestamps. + ss[0] = s + } + } + + resultSample := ss[1] + sampledInterval := ss[1].T - ss[0].T if sampledInterval == 0 { // Avoid dividing by 0. return out, nil } + switch { + case ss[1].H == nil && ss[0].H == nil: + if !isRate || ss[1].F >= ss[0].F { + // Gauge or counter without reset. + resultSample.F = ss[1].F - ss[0].F + } + // In case of a counter reset, we leave resultSample at + // its current value, which is already ss[1]. + case ss[1].H != nil && ss[0].H != nil: + resultSample.H = ss[1].H.Copy() + // irate should only be applied to counters. + if isRate && (ss[1].H.CounterResetHint == histogram.GaugeType || ss[0].H.CounterResetHint == histogram.GaugeType) { + annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, args.PositionRange())) + } + // idelta should only be applied to gauges. + if !isRate && (ss[1].H.CounterResetHint != histogram.GaugeType || ss[0].H.CounterResetHint != histogram.GaugeType) { + annos.Add(annotations.NewNativeHistogramNotGaugeWarning(metricName, args.PositionRange())) + } + if !isRate || !ss[1].H.DetectReset(ss[0].H) { + _, err := resultSample.H.Sub(ss[0].H) + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { + return out, annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args.PositionRange())) + } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return out, annos.Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, args.PositionRange())) + } + } + resultSample.H.CounterResetHint = histogram.GaugeType + resultSample.H.Compact(0) + default: + // Mix of a float and a histogram. + return out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args.PositionRange())) + } if isRate { // Convert to per-second. - resultValue /= float64(sampledInterval) / 1000 + if resultSample.H == nil { + resultSample.F /= float64(sampledInterval) / 1000 + } else { + resultSample.H.Div(float64(sampledInterval) / 1000) + } } - return append(out, Sample{F: resultValue}), nil + return append(out, resultSample), annos } // Calculate the trend value at the given index i in raw data d. diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 9bcb7e6945..32838afe73 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -218,6 +218,13 @@ clear load 5m http_requests_total{path="/foo"} 0+10x10 http_requests_total{path="/bar"} 0+10x5 0+10x5 + http_requests_histogram{path="/a"} {{sum:2 count:2}}+{{sum:3 count:3}}x5 + http_requests_histogram{path="/b"} 0 0 {{sum:1 count:1}} {{sum:4 count:4}} + http_requests_histogram{path="/c"} 0 0 {{sum:1 count:1}} {{sum:4 count:4 counter_reset_hint:gauge}} + http_requests_histogram{path="/d"} 0 0 {{sum:1 count:1 counter_reset_hint:gauge}} {{sum:4 count:4}} + http_requests_histogram{path="/e"} 0 1 2 {{sum:4 count:4}} + http_requests_histogram{path="/f"} 0 0 {{sum:1 count:1}} {{schema:-53 sum:3 count:3 custom_values:[5 10] buckets:[3]}} + http_requests_histogram{path="/g"} 0 0 {{schema:-53 sum:3 count:3 custom_values:[1] buckets:[3]}} {{schema:-53 sum:3 count:3 custom_values:[5 10] buckets:[3]}} eval instant at 50m irate(http_requests_total[50m]) {path="/foo"} .03333333333333333333 @@ -228,6 +235,28 @@ eval instant at 30m irate(http_requests_total[50m]) {path="/foo"} .03333333333333333333 {path="/bar"} 0 +eval instant at 20m irate(http_requests_histogram{path="/a"}[20m]) + {path="/a"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}} + +eval instant at 20m irate(http_requests_histogram{path="/b"}[20m]) + {path="/b"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}} + +eval instant at 20m irate(http_requests_histogram{path="/b"}[6m]) + +eval_warn instant at 20m irate(http_requests_histogram{path="/c"}[20m]) + {path="/c"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}} + +eval_warn instant at 20m irate(http_requests_histogram{path="/d"}[20m]) + {path="/d"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}} + +eval_warn instant at 20m irate(http_requests_histogram{path="/e"}[20m]) + +eval instant at 20m irate(http_requests_histogram{path="/f"}[20m]) + {path="/f"} {{schema:-53 sum:0.01 count:0.01 custom_values:[5 10] buckets:[0.01]}} + +eval instant at 20m irate(http_requests_histogram{path="/g"}[20m]) + {path="/g"} {{schema:-53 sum:0.01 count:0.01 custom_values:[5 10] buckets:[0.01]}} + clear # Tests for delta(). @@ -259,11 +288,38 @@ clear load 5m http_requests{path="/foo"} 0 50 100 150 http_requests{path="/bar"} 0 50 100 50 + http_requests_histogram{path="/a"} {{sum:2 count:2 counter_reset_hint:gauge}}+{{sum:1 count:3 counter_reset_hint:gauge}}x5 + http_requests_histogram{path="/b"} 0 0 {{sum:1 count:1 counter_reset_hint:gauge}} {{sum:2 count:2 counter_reset_hint:gauge}} + http_requests_histogram{path="/c"} 0 0 {{sum:1 count:1}} {{sum:2 count:2 counter_reset_hint:gauge}} + http_requests_histogram{path="/d"} 0 0 {{sum:1 count:1 counter_reset_hint:gauge}} {{sum:2 count:2}} + http_requests_histogram{path="/e"} 0 1 2 {{sum:1 count:2 counter_reset_hint:gauge}} + http_requests_histogram{path="/f"} 0 0 {{sum:1 count:1 counter_reset_hint:gauge}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1] counter_reset_hint:gauge}} + http_requests_histogram{path="/g"} 0 0 {{schema:-53 sum:1 count:1 custom_values:[1] buckets:[2] counter_reset_hint:gauge}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1] counter_reset_hint:gauge}} eval instant at 20m idelta(http_requests[20m]) {path="/foo"} 50 {path="/bar"} -50 +eval instant at 20m idelta(http_requests_histogram{path="/a"}[20m]) + {path="/a"} {{sum:1 count:3 counter_reset_hint:gauge}} + +eval instant at 20m idelta(http_requests_histogram{path="/b"}[20m]) + {path="/b"} {{sum:1 count:1 counter_reset_hint:gauge}} + +eval instant at 20m idelta(http_requests_histogram{path="/b"}[6m]) + +eval_warn instant at 20m idelta(http_requests_histogram{path="/c"}[20m]) + {path="/c"} {{sum:1 count:1 counter_reset_hint:gauge}} + +eval_warn instant at 20m idelta(http_requests_histogram{path="/d"}[20m]) + {path="/d"} {{sum:1 count:1 counter_reset_hint:gauge}} + +eval_warn instant at 20m idelta(http_requests_histogram{path="/e"}[20m]) + +eval_warn instant at 20m idelta(http_requests_histogram{path="/f"}[20m]) + +eval_warn instant at 20m idelta(http_requests_histogram{path="/g"}[20m]) + clear # Tests for deriv() and predict_linear(). From 8be416a67cea1663b90e360463fb2091cb0ffe9a Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Sun, 2 Feb 2025 03:27:29 +0530 Subject: [PATCH 1325/1397] [FIX] PromQL: Updates annotation for bin op between incompatible histograms (#15895) PromQL: Updates annotation for bin op between incompatible histograms --------- Signed-off-by: Neeraj Gartia --- promql/engine.go | 9 ++++----- util/annotations/annotations.go | 12 ++++++++++++ 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index cf66928201..44985e50f9 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3475,15 +3475,14 @@ func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotat if err == nil { return nil } - metricName := "" + op := parser.ItemTypeStr[e.Op] pos := e.PositionRange() if errors.Is(err, annotations.PromQLInfo) || errors.Is(err, annotations.PromQLWarning) { return annotations.New().Add(err) } - if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { - return annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) - } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { - return annotations.New().Add(annotations.NewIncompatibleCustomBucketsHistogramsWarning(metricName, pos)) + // TODO(NeerajGartia21): Test the exact annotation output once the testing framework can do so. + if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) || errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { + return annotations.New().Add(annotations.NewIncompatibleBucketLayoutInBinOpWarning(op, pos)) } return nil } diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go index 5b2fde152b..95783957a7 100644 --- a/util/annotations/annotations.go +++ b/util/annotations/annotations.go @@ -143,6 +143,7 @@ var ( NativeHistogramNotGaugeWarning = fmt.Errorf("%w: this native histogram metric is not a gauge:", PromQLWarning) MixedExponentialCustomHistogramsWarning = fmt.Errorf("%w: vector contains a mix of histograms with exponential and custom buckets schemas for metric name", PromQLWarning) IncompatibleCustomBucketsHistogramsWarning = fmt.Errorf("%w: vector contains histograms with incompatible custom buckets for metric name", PromQLWarning) + IncompatibleBucketLayoutInBinOpWarning = fmt.Errorf("%w: incompatible bucket layout encountered for binary operator", PromQLWarning) PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo) HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) @@ -295,9 +296,20 @@ func NewHistogramIgnoredInAggregationInfo(aggregation string, pos posrange.Posit } } +// NewHistogramIgnoredInMixedRangeInfo is used when a histogram is ignored +// in a range vector which contains mix of floats and histograms. func NewHistogramIgnoredInMixedRangeInfo(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", HistogramIgnoredInMixedRangeInfo, metricName), } } + +// NewIncompatibleBucketLayoutInBinOpWarning is used if binary operators act on a +// combination of two incompatible histograms. +func NewIncompatibleBucketLayoutInBinOpWarning(operator string, pos posrange.PositionRange) error { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %s", IncompatibleBucketLayoutInBinOpWarning, operator), + } +} From 7427753922df24d11bf79436a17b2b0ec6aa3630 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Mon, 3 Feb 2025 15:46:39 +0100 Subject: [PATCH 1326/1397] scrape: Add realistic data case for scrape loop append bench. (#15966) * scrape: Add realistic data case for scrape loop append bench. Signed-off-by: bwplotka * Update scrape/scrape_test.go Co-authored-by: George Krajcsovits Signed-off-by: Bartlomiej Plotka --------- Signed-off-by: bwplotka Signed-off-by: Bartlomiej Plotka Co-authored-by: George Krajcsovits --- model/textparse/benchmark_test.go | 3 +- scrape/scrape_test.go | 132 +++++++++++++++++++++--------- 2 files changed, 95 insertions(+), 40 deletions(-) diff --git a/model/textparse/benchmark_test.go b/model/textparse/benchmark_test.go index bd0d5089ac..c0492e3c99 100644 --- a/model/textparse/benchmark_test.go +++ b/model/textparse/benchmark_test.go @@ -65,6 +65,7 @@ var newTestParserFns = map[string]newParser{ // // NOTE(bwplotka): Do not try to conclude "what parser (OM, proto, prom) is the fastest" // as the testdata has different amount and type of metrics and features (e.g. exemplars). +// Use scrape.BenchmarkScrapeLoopAppend for this purpose. func BenchmarkParse(b *testing.B) { for _, bcase := range []struct { dataFile string // Localized to "./testdata". @@ -76,7 +77,7 @@ func BenchmarkParse(b *testing.B) { {dataFile: "promtestdata.txt", parser: "promtext", compareToExpfmtFormat: expfmt.TypeTextPlain}, {dataFile: "promtestdata.nometa.txt", parser: "promtext", compareToExpfmtFormat: expfmt.TypeTextPlain}, - // We don't pass compareToExpfmtFormat: expfmt.TypeProtoDelim as expfmt does not support GAUGE_HISTOGRAM, see https://github.com/prometheus/common/issues/430. + // We don't pass compareToExpfmtFormat: expfmt.TypeProtoDelim as expfmt does not support GAUGE_HISTOGRAM, see https://github.com/prometheus/common/issues/430. {dataProto: createTestProtoBuf(b).Bytes(), parser: "promproto"}, // We don't pass compareToExpfmtFormat: expfmt.TypeOpenMetrics as expfmt does not support OM exemplars, see https://github.com/prometheus/common/issues/703. diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 26117ffd08..02bd01e0d3 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -25,6 +25,8 @@ import ( "net/http" "net/http/httptest" "net/url" + "os" + "sort" "strconv" "strings" "sync" @@ -1386,8 +1388,17 @@ func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) { require.Equal(t, 1, seriesAdded) } -func makeTestMetrics(n int) []byte { - // Construct a metrics string to parse +func readTextParseTestMetrics(t testing.TB) []byte { + t.Helper() + + b, err := os.ReadFile("../model/textparse/testdata/promtestdata.txt") + if err != nil { + t.Fatal(err) + } + return b +} + +func makeTestGauges(n int) []byte { sb := bytes.Buffer{} fmt.Fprintf(&sb, "# TYPE metric_a gauge\n") fmt.Fprintf(&sb, "# HELP metric_a help text\n") @@ -1401,59 +1412,102 @@ func makeTestMetrics(n int) []byte { func promTextToProto(tb testing.TB, text []byte) []byte { tb.Helper() - d := expfmt.NewDecoder(bytes.NewReader(text), expfmt.TextVersion) - - pb := &dto.MetricFamily{} - if err := d.Decode(pb); err != nil { - tb.Fatal(err) - } - o, err := proto.Marshal(pb) + var p expfmt.TextParser + fams, err := p.TextToMetricFamilies(bytes.NewReader(text)) if err != nil { tb.Fatal(err) } + // Order by name for the deterministic tests. + var names []string + for n := range fams { + names = append(names, n) + } + sort.Strings(names) + buf := bytes.Buffer{} - // Write first length, then binary protobuf. - varintBuf := binary.AppendUvarint(nil, uint64(len(o))) - buf.Write(varintBuf) - buf.Write(o) + for _, n := range names { + o, err := proto.Marshal(fams[n]) + if err != nil { + tb.Fatal(err) + } + + // Write first length, then binary protobuf. + varintBuf := binary.AppendUvarint(nil, uint64(len(o))) + buf.Write(varintBuf) + buf.Write(o) + } return buf.Bytes() } +func TestPromTextToProto(t *testing.T) { + metricsText := readTextParseTestMetrics(t) + // TODO(bwplotka): Windows adds \r for new lines which is + // not handled correctly in the expfmt parser, fix it. + metricsText = bytes.ReplaceAll(metricsText, []byte("\r"), nil) + + metricsProto := promTextToProto(t, metricsText) + d := expfmt.NewDecoder(bytes.NewReader(metricsProto), expfmt.NewFormat(expfmt.TypeProtoDelim)) + + var got []string + for { + mf := &dto.MetricFamily{} + if err := d.Decode(mf); err != nil { + if errors.Is(err, io.EOF) { + break + } + t.Fatal(err) + } + got = append(got, mf.GetName()) + } + require.Len(t, got, 59) + // Check a few to see if those are not dups. + require.Equal(t, "go_gc_duration_seconds", got[0]) + require.Equal(t, "prometheus_evaluator_duration_seconds", got[32]) + require.Equal(t, "prometheus_treecache_zookeeper_failures_total", got[58]) +} + /* - export bench=scrape-loop-v1 && go test \ + export bench=append-v1 && go test \ -run '^$' -bench '^BenchmarkScrapeLoopAppend' \ -benchtime 5s -count 6 -cpu 2 -timeout 999m \ | tee ${bench}.txt */ func BenchmarkScrapeLoopAppend(b *testing.B) { - metricsText := makeTestMetrics(100) - - // Create proto representation. - metricsProto := promTextToProto(b, metricsText) - - for _, bcase := range []struct { - name string - contentType string - parsable []byte + for _, data := range []struct { + name string + parsableText []byte }{ - {name: "PromText", contentType: "text/plain", parsable: metricsText}, - {name: "OMText", contentType: "application/openmetrics-text", parsable: metricsText}, - {name: "PromProto", contentType: "application/vnd.google.protobuf", parsable: metricsProto}, + {name: "1Fam1000Gauges", parsableText: makeTestGauges(1000)}, // ~33.8 KB, ~38.8 KB in proto + {name: "59FamsAllTypes", parsableText: readTextParseTestMetrics(b)}, // ~33.3 KB, ~13.2 KB in proto. } { - b.Run(fmt.Sprintf("fmt=%v", bcase.name), func(b *testing.B) { - ctx, sl := simpleTestScrapeLoop(b) + b.Run(fmt.Sprintf("data=%v", data.name), func(b *testing.B) { + metricsProto := promTextToProto(b, data.parsableText) - slApp := sl.appender(ctx) - ts := time.Time{} + for _, bcase := range []struct { + name string + contentType string + parsable []byte + }{ + {name: "PromText", contentType: "text/plain", parsable: data.parsableText}, + {name: "OMText", contentType: "application/openmetrics-text", parsable: data.parsableText}, + {name: "PromProto", contentType: "application/vnd.google.protobuf", parsable: metricsProto}, + } { + b.Run(fmt.Sprintf("fmt=%v", bcase.name), func(b *testing.B) { + ctx, sl := simpleTestScrapeLoop(b) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - ts = ts.Add(time.Second) - _, _, _, err := sl.append(slApp, bcase.parsable, bcase.contentType, ts) - if err != nil { - b.Fatal(err) - } + slApp := sl.appender(ctx) + ts := time.Time{} + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts = ts.Add(time.Second) + _, _, _, err := sl.append(slApp, bcase.parsable, bcase.contentType, ts) + if err != nil { + b.Fatal(err) + } + } + }) } }) } @@ -4474,7 +4528,7 @@ func TestScrapeLoopCompression(t *testing.T) { simpleStorage := teststorage.New(t) defer simpleStorage.Close() - metricsText := makeTestMetrics(10) + metricsText := makeTestGauges(10) for _, tc := range []struct { enableCompression bool From 23b8dfb357a7abbb47e50aebc9841a09cd2db939 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 07:21:32 +0000 Subject: [PATCH 1327/1397] chore(deps): bump github.com/prometheus/common (#15963) Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.61.0 to 0.62.0. - [Release notes](https://github.com/prometheus/common/releases) - [Changelog](https://github.com/prometheus/common/blob/main/RELEASE.md) - [Commits](https://github.com/prometheus/common/compare/v0.61.0...v0.62.0) --- updated-dependencies: - dependency-name: github.com/prometheus/common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- documentation/examples/remote_storage/go.mod | 4 ++-- documentation/examples/remote_storage/go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index da8ef55332..541a8b382d 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -8,7 +8,7 @@ require ( github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb-client-go/v2 v2.14.0 github.com/prometheus/client_golang v1.20.5 - github.com/prometheus/common v0.61.0 + github.com/prometheus/common v0.62.0 github.com/prometheus/prometheus v1.99.0 github.com/stretchr/testify v1.10.0 ) @@ -66,7 +66,7 @@ require ( golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apimachinery v0.29.3 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 53077c877a..bea6b8e68b 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -273,8 +273,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= -github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -421,8 +421,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From c7a245000b26e31c34e41acc8f6aa218ffb30817 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Tue, 4 Feb 2025 10:29:22 +0100 Subject: [PATCH 1328/1397] Prepare release 3.2.0-rc.0 (#15901) Signed-off-by: Jan Fajerski --- CHANGELOG.md | 16 ++++++++++++++++ VERSION | 2 +- web/ui/mantine-ui/package.json | 4 ++-- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 4 ++-- web/ui/package.json | 2 +- 7 files changed, 25 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 40be59f724..3cd3f684f8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,23 @@ ## unreleased +## 3.2.0 RC0 / 2025-01-29 + +* [CHANGE] relabel: Replace actions can now use UTF-8 characters in `targetLabel` field. Note that `$` or `${}` will be expanded. This also apply to `replacement` field for `LabelMap` action. #15851 +* [CHANGE] rulefmt: Rule names can use UTF-8 characters, except `{` and `}` characters (due to common mistake checks). #15851 +* [FEATURE] remote/otlp: Add feature flag `otlp-deltatocumulative` to support conversion from delta to cumulative. #15165 +* [ENHANCEMENT] openstack SD: Discover Octavia loadbalancers. #15539 +* [ENHANCEMENT] scrape: Add metadata for automatic metrics to WAL for `metadata-wal-records` feature. #15837 * [ENHANCEMENT] promtool: Support linting of scrape interval, through lint option `too-long-scrape-interval`. #15719 +* [ENHANCEMENT] promtool: Add --ignore-unknown-fields option. #15706 +* [ENHANCEMENT] ui: Make "hide empty rules" and hide empty rules" persistent #15807 +* [ENHANCEMENT] web/api: Add a limit parameter to `/query` and `/query_range`. #15552 +* [ENHANCEMENT] api: Add fields Node and ServerTime to `/status`. #15784 +* [BUGFIX] remotewrite2: Fix invalid metadata bug for metrics without metadata. #15829 +* [BUGFIX] remotewrite2: Fix the unit field propagation. #15825 +* [BUGFIX] scrape: Fix WAL metadata for histograms and summaries. #15832 +* [BUGFIX] ui: Merge duplicate "Alerts page settings" sections. #15810 +* [BUGFIX] PromQL: Fix `` functions with histograms. #15711 ## 3.1.0 / 2025-01-02 diff --git a/VERSION b/VERSION index fd2a01863f..e11e207900 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.1.0 +3.2.0-rc.0 diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 54546c76ad..cdf7b848a8 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,7 +1,7 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "0.301.0", + "version": "0.302.0-rc.0", "type": "module", "scripts": { "start": "vite", @@ -28,7 +28,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.301.0", + "@prometheus-io/codemirror-promql": "0.302.0-rc.0", "@reduxjs/toolkit": "^2.5.0", "@tabler/icons-react": "^3.28.1", "@tanstack/react-query": "^5.62.11", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 270fa16b29..02c2e5e14d 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.301.0", + "version": "0.302.0-rc.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.301.0", + "@prometheus-io/lezer-promql": "0.302.0-rc.0", "lru-cache": "^11.0.2" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 270e57797f..d95dad37cb 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.301.0", + "version": "0.302.0-rc.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index dd251c660e..715ddb8260 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.301.0", + "version": "0.302.0-rc.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.301.0", + "version": "0.302.0-rc.0", "workspaces": [ "mantine-ui", "module/*" diff --git a/web/ui/package.json b/web/ui/package.json index 8539057eeb..fd70da548f 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,7 +1,7 @@ { "name": "prometheus-io", "description": "Monorepo for the Prometheus UI", - "version": "0.301.0", + "version": "0.302.0-rc.0", "private": true, "scripts": { "build": "bash build_ui.sh --all", From 96f31e370b2ac59c323faf8f6d7149a92bf139a0 Mon Sep 17 00:00:00 2001 From: asymmetric <101816+asymmetric@users.noreply.github.com> Date: Tue, 4 Feb 2025 12:34:59 +0100 Subject: [PATCH 1329/1397] doc: clarify `rate` values are averaged (#14045) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * doc: clarify `rate` values are averaged Signed-off-by: asymmetric <101816+asymmetric@users.noreply.github.com> Co-authored-by: Björn Rabenstein --- docs/querying/functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index facdfce154..29a89b18e6 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -666,7 +666,7 @@ resets due to target restarts) are automatically adjusted for. Also, the calculation extrapolates to the ends of the time range, allowing for missed scrapes or imperfect alignment of scrape cycles with the range's time period. -The following example expression returns the per-second rate of HTTP requests as measured +The following example expression returns the per-second average rate of HTTP requests over the last 5 minutes, per time series in the range vector: ``` From d80c58cade61a580aaae74e71baf09bc76c45a7a Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Tue, 4 Feb 2025 13:18:08 +0100 Subject: [PATCH 1330/1397] prepare 3.2.0-rc.1 (#15968) Signed-off-by: Jan Fajerski --- CHANGELOG.md | 2 +- VERSION | 2 +- web/ui/mantine-ui/package.json | 4 ++-- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 4 ++-- web/ui/package.json | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3cd3f684f8..4334a17137 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ## unreleased -## 3.2.0 RC0 / 2025-01-29 +## 3.2.0-rc.1 / 2025-01-29 * [CHANGE] relabel: Replace actions can now use UTF-8 characters in `targetLabel` field. Note that `$` or `${}` will be expanded. This also apply to `replacement` field for `LabelMap` action. #15851 * [CHANGE] rulefmt: Rule names can use UTF-8 characters, except `{` and `}` characters (due to common mistake checks). #15851 diff --git a/VERSION b/VERSION index e11e207900..f338b6234b 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.2.0-rc.0 +3.2.0-rc.1 diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index cdf7b848a8..1bc742de81 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,7 +1,7 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "0.302.0-rc.0", + "version": "0.302.0-rc.1", "type": "module", "scripts": { "start": "vite", @@ -28,7 +28,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.302.0-rc.0", + "@prometheus-io/codemirror-promql": "0.302.0-rc.1", "@reduxjs/toolkit": "^2.5.0", "@tabler/icons-react": "^3.28.1", "@tanstack/react-query": "^5.62.11", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 02c2e5e14d..a0f9f823c1 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.302.0-rc.0", + "version": "0.302.0-rc.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.302.0-rc.0", + "@prometheus-io/lezer-promql": "0.302.0-rc.1", "lru-cache": "^11.0.2" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index d95dad37cb..f41f56652b 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.302.0-rc.0", + "version": "0.302.0-rc.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 715ddb8260..c27ca7997e 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.302.0-rc.0", + "version": "0.302.0-rc.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.302.0-rc.0", + "version": "0.302.0-rc.1", "workspaces": [ "mantine-ui", "module/*" diff --git a/web/ui/package.json b/web/ui/package.json index fd70da548f..fb26970ccf 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,7 +1,7 @@ { "name": "prometheus-io", "description": "Monorepo for the Prometheus UI", - "version": "0.302.0-rc.0", + "version": "0.302.0-rc.1", "private": true, "scripts": { "build": "bash build_ui.sh --all", From 026d0198d53376b2539995c6919aafbbe452dfb5 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 5 Feb 2025 11:40:54 +1100 Subject: [PATCH 1331/1397] Add unit tests Signed-off-by: Charles Korn --- promql/engine_test.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/promql/engine_test.go b/promql/engine_test.go index 9ba93abc78..fcadfa1062 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -1900,6 +1900,15 @@ func TestSubquerySelector(t *testing.T) { }, Start: time.Unix(35, 0), }, + { + Query: "metric[0:10s]", + Result: promql.Result{ + nil, + promql.Matrix{}, + nil, + }, + Start: time.Unix(10, 0), + }, }, }, { @@ -3251,6 +3260,11 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) { }, }, }, + "matches series but range is 0": { + expr: "some_metric[0]", + ts: baseT.Add(2 * time.Minute), + expected: promql.Matrix{}, + }, } for name, testCase := range testCases { From 2fc6ba1c942dc06c2882beffd680375cecfe9a4e Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 5 Feb 2025 11:45:16 +1100 Subject: [PATCH 1332/1397] Expand `TestInstantQueryWithRangeVectorSelector` to include histograms Signed-off-by: Charles Korn --- promql/engine_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/promql/engine_test.go b/promql/engine_test.go index fcadfa1062..5c4bb744f5 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -3208,6 +3208,7 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) { load 1m some_metric{env="1"} 0+1x4 some_metric{env="2"} 0+2x4 + some_metric{env="3"} {{count:0}}+{{count:1}}x4 some_metric_with_stale_marker 0 1 stale 3 `) t.Cleanup(func() { require.NoError(t, storage.Close()) }) @@ -3235,6 +3236,13 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) { {T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 4}, }, }, + { + Metric: labels.FromStrings("__name__", "some_metric", "env", "3"), + Histograms: []promql.HPoint{ + {T: timestamp.FromTime(baseT.Add(time.Minute)), H: &histogram.FloatHistogram{Count: 1, CounterResetHint: histogram.NotCounterReset}}, + {T: timestamp.FromTime(baseT.Add(2 * time.Minute)), H: &histogram.FloatHistogram{Count: 2, CounterResetHint: histogram.NotCounterReset}}, + }, + }, }, }, "matches no series": { From 69ce0c24db1679341db88cbe89ec720b8cf527b2 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 5 Feb 2025 11:45:30 +1100 Subject: [PATCH 1333/1397] Fix issue Signed-off-by: Charles Korn --- promql/engine.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/promql/engine.go b/promql/engine.go index 44985e50f9..3e14a2529d 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -2357,6 +2357,11 @@ func (ev *evaluator) matrixIterSlice( } } + if mint == maxt { + // Empty range: return the empty slices. + return floats, histograms + } + soughtValueType := it.Seek(maxt) if soughtValueType == chunkenc.ValNone { if it.Err() != nil { From e4037b3ec3654ad9016105e3da80f034a1ad2f5a Mon Sep 17 00:00:00 2001 From: Joshua Hesketh Date: Wed, 5 Feb 2025 22:24:13 +1100 Subject: [PATCH 1334/1397] Allow UTF-8 labels in label_replace This makes it consistent with label_join. Signed-off-by: Joshua Hesketh --- promql/functions.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/promql/functions.go b/promql/functions.go index 938eefdf00..c2f292c3a6 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -1612,7 +1612,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio if err != nil { panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) } - if !model.LabelNameRE.MatchString(dst) { + if !model.LabelName(dst).IsValid() { panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) } From 5be31977016eb780da53c419c97b53cb52232338 Mon Sep 17 00:00:00 2001 From: Joshua Hesketh Date: Wed, 5 Feb 2025 23:08:24 +1100 Subject: [PATCH 1335/1397] Update the test to invalid utf-8 "invalid-label-name" is valid in utf-8 mode Signed-off-by: Joshua Hesketh --- promql/promqltest/testdata/functions.test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 32838afe73..b0241d7ed9 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -466,7 +466,7 @@ eval instant at 0m label_replace(testmetric, "dst", "", "dst", ".*") eval_fail instant at 0m label_replace(testmetric, "dst", "value-$1", "src", "(.*") # label_replace fails when the destination label name is not a valid Prometheus label name. -eval_fail instant at 0m label_replace(testmetric, "invalid-label-name", "", "src", "(.*)") +eval_fail instant at 0m label_replace(testmetric, "\xff", "", "src", "(.*)") # label_replace fails when there would be duplicated identical output label sets. eval_fail instant at 0m label_replace(testmetric, "src", "", "", "") From 5a5fdea7ad9307fae83e1289e2f396d6f75d0f79 Mon Sep 17 00:00:00 2001 From: Joshua Hesketh Date: Thu, 6 Feb 2025 02:40:23 +1100 Subject: [PATCH 1336/1397] Fix duplicate output vector if delayed name removal is disabled (#15975) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix duplicate output vector if delayed name removal is disabled This error is emitted in cleanupMetricLabels, but is skipped if enableDelayedNameRemoval is false. This makes it consistent with label_replace Signed-off-by: Joshua Hesketh --------- Signed-off-by: Joshua Hesketh Signed-off-by: Björn Rabenstein Co-authored-by: Björn Rabenstein --- promql/functions.go | 3 +++ promql/promqltest/testdata/functions.test | 5 +++++ 2 files changed, 8 insertions(+) diff --git a/promql/functions.go b/promql/functions.go index 938eefdf00..d8b22ddd40 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -1689,6 +1689,9 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) matrix[i].DropName = el.DropName } } + if matrix.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } return matrix, ws } diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 32838afe73..68cf5a09b4 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -499,6 +499,8 @@ eval instant at 20s timestamp(metric) load 5m testmetric{src="a",src1="b",src2="c",dst="original-destination-value"} 0 testmetric{src="d",src1="e",src2="f",dst="original-destination-value"} 1 + dup{label="a", this="a"} 1.0 + dup{label="b", this="a"} 1.0 # label_join joins all src values in order. eval instant at 0m label_join(testmetric, "dst", "-", "src", "src1", "src2") @@ -530,6 +532,9 @@ eval instant at 0m label_join(testmetric1, "dst", ", ", "src", "src1", "src2") testmetric1{src="foo",src1="bar",src2="foobar",dst="foo, bar, foobar"} 0 testmetric1{src="fizz",src1="buzz",src2="fizzbuzz",dst="fizz, buzz, fizzbuzz"} 1 +eval_fail instant at 0m label_join(dup, "label", "", "this") + expected_fail_message vector cannot contain metrics with the same labelset + clear # Tests for vector. From 20371118b6333312991833dc832eeb09fc37fa33 Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Thu, 6 Feb 2025 20:32:03 +0530 Subject: [PATCH 1337/1397] [FIX] PromQL: Ignore histograms in `scalar`, `sort` and `sort_desc` functions (#15964) PromQL: Ignore histograms in scalar, sort and sort_desc functions --------- Signed-off-by: Neeraj Gartia --- promql/functions.go | 55 +++++++++++++---------- promql/promqltest/testdata/functions.test | 20 +++++++++ 2 files changed, 52 insertions(+), 23 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index 7ac17167f4..1cb8b2af2d 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -486,11 +486,22 @@ func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions return append(enh.Out, Sample{F: s1}), nil } +// filterFloats filters out histogram samples from the vector in-place. +func filterFloats(v Vector) Vector { + floats := v[:0] + for _, s := range v { + if s.H == nil { + floats = append(floats, s) + } + } + return floats +} + // === sort(node parser.ValueTypeVector) (Vector, Annotations) === func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take descending sort with NaN first and // reverse it. - byValueSorter := vectorByReverseValueHeap(vals[0].(Vector)) + byValueSorter := vectorByReverseValueHeap(filterFloats(vals[0].(Vector))) sort.Sort(sort.Reverse(byValueSorter)) return Vector(byValueSorter), nil } @@ -499,7 +510,7 @@ func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take ascending sort with NaN first and // reverse it. - byValueSorter := vectorByValueHeap(vals[0].(Vector)) + byValueSorter := vectorByValueHeap(filterFloats(vals[0].(Vector))) sort.Sort(sort.Reverse(byValueSorter)) return Vector(byValueSorter), nil } @@ -631,11 +642,27 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // === Scalar(node parser.ValueTypeVector) Scalar === func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - v := vals[0].(Vector) - if len(v) != 1 { + var ( + v = vals[0].(Vector) + value float64 + found bool + ) + + for _, s := range v { + if s.H == nil { + if found { + // More than one float found, return NaN. + return append(enh.Out, Sample{F: math.NaN()}), nil + } + found = true + value = s.F + } + } + // Return the single float if found, otherwise return NaN. + if !found { return append(enh.Out, Sample{F: math.NaN()}), nil } - return append(enh.Out, Sample{F: v[0].F}), nil + return append(enh.Out, Sample{F: value}), nil } func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { @@ -1896,16 +1923,7 @@ func (s vectorByValueHeap) Len() int { } func (s vectorByValueHeap) Less(i, j int) bool { - // We compare histograms based on their sum of observations. - // TODO(beorn7): Is that what we want? vi, vj := s[i].F, s[j].F - if s[i].H != nil { - vi = s[i].H.Sum - } - if s[j].H != nil { - vj = s[j].H.Sum - } - if math.IsNaN(vi) { return true } @@ -1935,16 +1953,7 @@ func (s vectorByReverseValueHeap) Len() int { } func (s vectorByReverseValueHeap) Less(i, j int) bool { - // We compare histograms based on their sum of observations. - // TODO(beorn7): Is that what we want? vi, vj := s[i].F, s[j].F - if s[i].H != nil { - vi = s[i].H.Sum - } - if s[j].H != nil { - vj = s[j].H.Sum - } - if math.IsNaN(vi) { return true } diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 23aaa1fe59..49ab6c50ff 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -642,6 +642,7 @@ load 5m http_requests{job="app-server", instance="1", group="production"} 0+60x10 http_requests{job="app-server", instance="0", group="canary"} 0+70x10 http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + http_requests{job="app-server", instance="2", group="canary"} {{schema:0 sum:1 count:1}}x15 eval_ordered instant at 50m sort(http_requests) http_requests{group="production", instance="0", job="api-server"} 100 @@ -1652,3 +1653,22 @@ load 1m eval range from 0 to 5m step 1m round(mixed_metric) {} _ 1 2 3 + +# Test scalar() with histograms. +load 1m + metric{type="float", l="x"} 1 + metric{type="float", l="y"} 2 + metric{type="histogram", l="x"} {{schema:0 sum:1 count:1}} + metric{type="histogram", l="x"} {{schema:0 sum:1 count:1}} + +# Two floats in the vector. +eval instant at 0m scalar(metric) + NaN + +# No floats in the vector. +eval instant at 0m scalar({type="histogram"}) + NaN + +# One float in the vector. +eval instant at 0m scalar({l="x"}) + 1 From 686dcc7b0d78920553de5181689464cbc3e57dee Mon Sep 17 00:00:00 2001 From: Dimitar Dimitrov Date: Fri, 7 Feb 2025 16:46:55 +0100 Subject: [PATCH 1338/1397] headIndexReader: reduce debug logging (#15993) Around Mimir compactions we see logging in ShardedPostings do massive allocations and drive GC up to 50% of CPU. Signed-off-by: Dimitar Dimitrov --- tsdb/head_read.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tsdb/head_read.go b/tsdb/head_read.go index b95257c28a..438c7e6a4d 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -117,15 +117,19 @@ func (h *headIndexReader) PostingsForAllLabelValues(ctx context.Context, name st func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { series := make([]*memSeries, 0, 128) + notFoundSeriesCount := 0 // Fetch all the series only once. for p.Next() { s := h.head.series.getByID(chunks.HeadSeriesRef(p.At())) if s == nil { - h.head.logger.Debug("Looked up series not found") + notFoundSeriesCount++ } else { series = append(series, s) } } + if notFoundSeriesCount > 0 { + h.head.logger.Debug("Looked up series not found", "count", notFoundSeriesCount) + } if err := p.Err(); err != nil { return index.ErrPostings(fmt.Errorf("expand postings: %w", err)) } @@ -150,11 +154,12 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou } out := make([]storage.SeriesRef, 0, 128) + notFoundSeriesCount := 0 for p.Next() { s := h.head.series.getByID(chunks.HeadSeriesRef(p.At())) if s == nil { - h.head.logger.Debug("Looked up series not found") + notFoundSeriesCount++ continue } @@ -165,6 +170,9 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou out = append(out, storage.SeriesRef(s.ref)) } + if notFoundSeriesCount > 0 { + h.head.logger.Debug("Looked up series not found", "count", notFoundSeriesCount) + } return index.NewListPostings(out) } From 151d7e6c2845dffe0c1e0bcbe8771e1ee9127943 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 19:11:47 +0100 Subject: [PATCH 1339/1397] chore(deps): bump bufbuild/buf-setup-action from 1.48.0 to 1.50.0 (#15962) Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.48.0 to 1.50.0. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/1115d0acd3d2a120b30023fac52abc46807c8fd6...a47c93e0b1648d5651a065437926377d060baa99) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 31e4d7ee2a..d7adc93cf6 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: bufbuild/buf-setup-action@1115d0acd3d2a120b30023fac52abc46807c8fd6 # v1.48.0 + - uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index b72b927bb0..3384b4aa2f 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -13,7 +13,7 @@ jobs: if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: bufbuild/buf-setup-action@1115d0acd3d2a120b30023fac52abc46807c8fd6 # v1.48.0 + - uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 From f453f4317885fa36a7faaf0d91d779f80cb406d7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 19:12:03 +0100 Subject: [PATCH 1340/1397] chore(deps): bump actions/upload-artifact from 4.5.0 to 4.6.0 (#15961) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.5.0 to 4.6.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/6f51ac03b9356f520e9adb1b1b7802705f340c2b...65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/fuzzing.yml | 2 +- .github/workflows/scorecards.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index 9f5ef96296..cc3b57d85d 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -21,7 +21,7 @@ jobs: fuzz-seconds: 600 dry-run: false - name: Upload Crash - uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 if: failure() && steps.build.outcome == 'success' with: name: artifacts diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 1415fead8e..67cfc0cb44 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -37,7 +37,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # tag=v4.5.0 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # tag=v4.6.0 with: name: SARIF file path: results.sarif From 92873bbfe8e23a7e2975cc37756c434bc9a50856 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 19:12:24 +0100 Subject: [PATCH 1341/1397] chore(deps): bump github/codeql-action from 3.28.0 to 3.28.8 (#15960) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.0 to 3.28.8. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/48ab28a6f5dbc2a99bf1e0131198dd8f1df78169...dd746615b3b9d728a6a37ca2045b68ca76d4841a) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 9a522fb6c9..7e2a0af270 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,12 +27,12 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Initialize CodeQL - uses: github/codeql-action/init@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 + uses: github/codeql-action/init@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 + uses: github/codeql-action/autobuild@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 + uses: github/codeql-action/analyze@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 67cfc0cb44..85d467a37b 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # tag=v3.28.0 + uses: github/codeql-action/upload-sarif@dd746615b3b9d728a6a37ca2045b68ca76d4841a # tag=v3.28.8 with: sarif_file: results.sarif From bc07189662813a8654f8b6200bc05d05bb346ae5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 19:12:51 +0100 Subject: [PATCH 1342/1397] chore(deps): bump actions/stale from 9.0.0 to 9.1.0 (#15959) Bumps [actions/stale](https://github.com/actions/stale) from 9.0.0 to 9.1.0. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/28ca1036281a5e5922ead5184a1bbf96e5fc984e...5bef64f19d7facfb25b37b414482c7164d639639) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index d71bcbc9d8..371d92a69a 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -11,7 +11,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. runs-on: ubuntu-latest steps: - - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0 + - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 with: repo-token: ${{ secrets.GITHUB_TOKEN }} # opt out of defaults to avoid marking issues as stale and closing them From 6db18abe4e41703d4e79d2357e439035df75a872 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 19:13:23 +0100 Subject: [PATCH 1343/1397] chore(deps): bump golangci/golangci-lint-action from 6.1.1 to 6.2.0 (#15958) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.1.1 to 6.2.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/971e284b6050e8a5849b72094c50ab08da042db8...ec5d18412c0aeab7936cb16880d708ba2a64e1ae) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3bf12b07c4..e456734ff4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -191,7 +191,7 @@ jobs: run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 + uses: golangci/golangci-lint-action@ec5d18412c0aeab7936cb16880d708ba2a64e1ae # v6.2.0 with: args: --verbose # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. From 665d6058289d242d852cae5e6cab91ed4ead2aed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 19:14:06 +0100 Subject: [PATCH 1344/1397] chore(deps): bump golangci/golangci-lint-action in /scripts (#15955) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.1.1 to 6.2.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/971e284b6050e8a5849b72094c50ab08da042db8...ec5d18412c0aeab7936cb16880d708ba2a64e1ae) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- scripts/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 0c00c410a4..f557fbd589 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -33,7 +33,7 @@ jobs: run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 + uses: golangci/golangci-lint-action@ec5d18412c0aeab7936cb16880d708ba2a64e1ae # v6.2.0 with: args: --verbose version: v1.63.4 From 1dc7ed2e9267260218e4991073327153b5f20f22 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 19:14:11 +0100 Subject: [PATCH 1345/1397] chore(deps): bump actions/setup-go from 5.2.0 to 5.3.0 in /scripts (#15956) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.2.0 to 5.3.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/3041bf56c941b39c61721a86cd11f3bb1338122a...f111f3307d8850f501ac008e886eec1fd1932a34) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- scripts/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index f557fbd589..def9007ac6 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -26,7 +26,7 @@ jobs: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version: 1.23.x - name: Install snmp_exporter/generator dependencies From 47f3ddbb347517aba1bff8f9683b83acc22cd2da Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 19:15:52 +0100 Subject: [PATCH 1346/1397] chore(deps): bump github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor (#15924) Bumps [github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib) from 0.116.0 to 0.118.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector-contrib/compare/v0.116.0...v0.118.0) --- updated-dependencies: - dependency-name: github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 20 ++++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/go.mod b/go.mod index fbbeaf19e3..aed2703d57 100644 --- a/go.mod +++ b/go.mod @@ -48,7 +48,7 @@ require ( github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.118.0 github.com/ovh/go-ovh v1.6.0 github.com/prometheus/alertmanager v0.28.0 github.com/prometheus/client_golang v1.21.0-rc.0 @@ -170,8 +170,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect diff --git a/go.sum b/go.sum index 1e70e07ac2..baafd778cd 100644 --- a/go.sum +++ b/go.sum @@ -391,14 +391,14 @@ github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 h1:Kxk5Ral+Dc6VB9UmTketVjs+rbMZP8JxQ4SXDx4RivQ= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0/go.mod h1:ctT6oQmGmWGGGgUIKyx2fDwqz77N9+04gqKkDyAzKCg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0 h1:RlEK9MbxWyBHbLel8EJ1L7DbYVLai9dZL6Ljl2cBgyA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0/go.mod h1:AVUEyIjPb+0ARr7mhIkZkdNg3fd0ZcRhzAi53oZhl1Q= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 h1:jwnZYRBuPJnsKXE5H6ZvTEm91bXW5VP8+tLewzl54eg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0/go.mod h1:NT3Ag+DdnIAZQfD7l7OHwlYqnaAJ19SoPZ0nhD9yx4s= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 h1:ZBmLuipJv7BT9fho/2yAFsS8AtMsCOCe4ON8oqkX3n8= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0/go.mod h1:f0GdYWGxUunyRZ088gHnoX78pc/gZc3dQlRtidiGXzg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.118.0 h1:PbknCwTbeTz8GNSfN4fOIp50YCDO19s1IAp6PGFcdpA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.118.0/go.mod h1:cOY+YDFtxJH3eQzJDObvWFFSIvD2AstG5MZ9t8wqusQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.118.0 h1:DSoYrOjLv23HXpx72hl61br4ZZTj6dqtwZSGoypKWIA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.118.0/go.mod h1:nR+r7aAbsktscJk4fGmzljblbZBMaiZcIWeKbXV+HmY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 h1:aUTSkzJExtrlHN32g8hX/cRNEo2ZmucPg+vwPqOYvhg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0/go.mod h1:a3sewj4nEozMwcNwZTHPzddS+1BnA6BaAkO/CRIGHVU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.118.0 h1:RZszYLp7sVMOD1rppjY+fP2PQh5qNAh5U6RoQNvd4Rg= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.118.0/go.mod h1:5i928mwS+Ojv41l3/IxcyK1SCy6WnpL3wjLWKDb4YKQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -511,8 +511,8 @@ go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWS go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/confmap v1.22.0 h1:ZKQzRuj5lKu+seKArAAZ1yPRroDPricaIVIREm/jr3w= -go.opentelemetry.io/collector/confmap v1.22.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/confmap v1.24.0 h1:UUHVhkDCsVw14jPOarug9PDQE2vaB2ELPWMr7ARFBCA= +go.opentelemetry.io/collector/confmap v1.24.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= From 0ef0d92fdba70a66ee8b9012e3c970fcd9b3db88 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 19:16:24 +0100 Subject: [PATCH 1347/1397] chore(deps): bump github.com/docker/docker (#15923) Bumps [github.com/docker/docker](https://github.com/docker/docker) from 27.4.1+incompatible to 27.5.1+incompatible. - [Release notes](https://github.com/docker/docker/releases) - [Commits](https://github.com/docker/docker/compare/v27.4.1...v27.5.1) --- updated-dependencies: - dependency-name: github.com/docker/docker dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index aed2703d57..3583845766 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 github.com/digitalocean/godo v1.132.0 - github.com/docker/docker v27.4.1+incompatible + github.com/docker/docker v27.5.1+incompatible github.com/edsrzf/mmap-go v1.2.0 github.com/envoyproxy/go-control-plane/envoy v1.32.3 github.com/envoyproxy/protoc-gen-validate v1.2.1 diff --git a/go.sum b/go.sum index baafd778cd..a4c03b1c48 100644 --- a/go.sum +++ b/go.sum @@ -93,8 +93,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4= -github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= +github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= From 0e69b20098336897430af0e0046c97c734981b02 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 19:16:49 +0100 Subject: [PATCH 1348/1397] chore(deps): bump github.com/hetznercloud/hcloud-go/v2 (#15921) Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.18.0 to 2.19.0. - [Release notes](https://github.com/hetznercloud/hcloud-go/releases) - [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.18.0...v2.19.0) --- updated-dependencies: - dependency-name: github.com/hetznercloud/hcloud-go/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3583845766..7070fa80d4 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.31.0 github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec - github.com/hetznercloud/hcloud-go/v2 v2.18.0 + github.com/hetznercloud/hcloud-go/v2 v2.19.0 github.com/ionos-cloud/sdk-go/v6 v6.3.2 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 diff --git a/go.sum b/go.sum index a4c03b1c48..91d3b157fb 100644 --- a/go.sum +++ b/go.sum @@ -273,8 +273,8 @@ github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977Vrm github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.18.0 h1:BemrVGeWI8Kn/pvaC1jBsHZxQMnRqOydS7Ju4BERB4Q= -github.com/hetznercloud/hcloud-go/v2 v2.18.0/go.mod h1:r5RTzv+qi8IbLcDIskTzxkFIji7Ovc8yNgepQR9M+UA= +github.com/hetznercloud/hcloud-go/v2 v2.19.0 h1:crqbWMywudvlPLLczFf2hBpTPIATjrWMmwfiKSTpUt0= +github.com/hetznercloud/hcloud-go/v2 v2.19.0/go.mod h1:r5RTzv+qi8IbLcDIskTzxkFIji7Ovc8yNgepQR9M+UA= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/ionos-cloud/sdk-go/v6 v6.3.2 h1:2mUmrZZz6cPyT9IRX0T8fBLc/7XU/eTxP2Y5tS7/09k= From 37092fdad78c8c7e989137f7a9f319839aa0af4e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 19:16:54 +0100 Subject: [PATCH 1349/1397] chore(deps): bump github.com/prometheus/sigv4 from 0.1.1 to 0.1.2 (#15922) Bumps [github.com/prometheus/sigv4](https://github.com/prometheus/sigv4) from 0.1.1 to 0.1.2. - [Release notes](https://github.com/prometheus/sigv4/releases) - [Changelog](https://github.com/prometheus/sigv4/blob/main/RELEASE.md) - [Commits](https://github.com/prometheus/sigv4/compare/v0.1.1...v0.1.2) --- updated-dependencies: - dependency-name: github.com/prometheus/sigv4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7070fa80d4..8ca8bd225e 100644 --- a/go.mod +++ b/go.mod @@ -56,7 +56,7 @@ require ( github.com/prometheus/common v0.62.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/exporter-toolkit v0.13.2 - github.com/prometheus/sigv4 v0.1.1 + github.com/prometheus/sigv4 v0.1.2 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.10.0 diff --git a/go.sum b/go.sum index 91d3b157fb..f52cdb3c86 100644 --- a/go.sum +++ b/go.sum @@ -451,8 +451,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/sigv4 v0.1.1 h1:UJxjOqVcXctZlwDjpUpZ2OiMWJdFijgSofwLzO1Xk0Q= -github.com/prometheus/sigv4 v0.1.1/go.mod h1:RAmWVKqx0bwi0Qm4lrKMXFM0nhpesBcenfCtz9qRyH8= +github.com/prometheus/sigv4 v0.1.2 h1:R7570f8AoM5YnTUPFm3mjZH5q2k4D+I/phCWvZ4PXG8= +github.com/prometheus/sigv4 v0.1.2/go.mod h1:GF9fwrvLgkQwDdQ5BXeV9XUSCH/IPNqzvAoaohfjqMU= github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= From 249c67dd19811fa4e01dfc293a147d0fe481531c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 19:17:30 +0100 Subject: [PATCH 1350/1397] chore(deps): bump github.com/linode/linodego from 1.46.0 to 1.47.0 (#15918) Bumps [github.com/linode/linodego](https://github.com/linode/linodego) from 1.46.0 to 1.47.0. - [Release notes](https://github.com/linode/linodego/releases) - [Commits](https://github.com/linode/linodego/compare/v1.46.0...v1.47.0) --- updated-dependencies: - dependency-name: github.com/linode/linodego dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8ca8bd225e..b6a3ed8f74 100644 --- a/go.mod +++ b/go.mod @@ -41,7 +41,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.46.0 + github.com/linode/linodego v1.47.0 github.com/miekg/dns v1.1.63 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f diff --git a/go.sum b/go.sum index f52cdb3c86..1b935c36de 100644 --- a/go.sum +++ b/go.sum @@ -321,8 +321,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linode/linodego v1.46.0 h1:+uOG4SD2MIrhbrLrvOD5HrbdLN3D19Wgn3MgdUNQjeU= -github.com/linode/linodego v1.46.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk= +github.com/linode/linodego v1.47.0 h1:6MFNCyzWbr8Rhl4r7d5DwZLwxvFIsM4ARH6W0KS/R0U= +github.com/linode/linodego v1.47.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= From 994767eb944e9bce4e32960b4682526da86c0b9b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 19:17:37 +0100 Subject: [PATCH 1351/1397] chore(deps): bump google.golang.org/api from 0.218.0 to 0.219.0 (#15919) Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.218.0 to 0.219.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.218.0...v0.219.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index b6a3ed8f74..4ee326ab58 100644 --- a/go.mod +++ b/go.mod @@ -84,7 +84,7 @@ require ( golang.org/x/sys v0.29.0 golang.org/x/text v0.21.0 golang.org/x/tools v0.29.0 - google.golang.org/api v0.218.0 + google.golang.org/api v0.219.0 google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f google.golang.org/grpc v1.70.0 google.golang.org/protobuf v1.36.4 @@ -196,7 +196,7 @@ require ( golang.org/x/net v0.34.0 // indirect golang.org/x/term v0.28.0 // indirect golang.org/x/time v0.9.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250124145028-65684f501c47 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 1b935c36de..53edac83f3 100644 --- a/go.sum +++ b/go.sum @@ -670,8 +670,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.218.0 h1:x6JCjEWeZ9PFCRe9z0FBrNwj7pB7DOAqT35N+IPnAUA= -google.golang.org/api v0.218.0/go.mod h1:5VGHBAkxrA/8EFjLVEYmMUJ8/8+gWWQ3s4cFH0FxG2M= +google.golang.org/api v0.219.0 h1:nnKIvxKs/06jWawp2liznTBnMRQBEPpGo7I+oEypTX0= +google.golang.org/api v0.219.0/go.mod h1:K6OmjGm+NtLrIkHxv1U3a0qIf/0JOvAHd5O/6AoyKYE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -679,8 +679,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250124145028-65684f501c47 h1:91mG8dNTpkC0uChJUQ9zCiRqx3GEEFOWaRZ0mI6Oj2I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250124145028-65684f501c47/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= From 9c5fd0b9fba5aec0ebca2e47e502f2f1e5acba27 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 19:10:17 +0000 Subject: [PATCH 1352/1397] chore(deps): bump actions/setup-go from 5.2.0 to 5.3.0 (#15957) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.2.0 to 5.3.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/3041bf56c941b39c61721a86cd11f3bb1338122a...f111f3307d8850f501ac008e886eec1fd1932a34) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e456734ff4..57c47720c6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -80,7 +80,7 @@ jobs: runs-on: windows-latest steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version: 1.23.x - run: | @@ -171,7 +171,7 @@ jobs: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: cache: false go-version: 1.23.x @@ -184,7 +184,7 @@ jobs: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version: 1.23.x - name: Install snmp_exporter/generator dependencies From 78055a593294323eca70eec4218ca04fb979985d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 21:40:03 +0100 Subject: [PATCH 1353/1397] chore(deps): bump github.com/digitalocean/godo from 1.132.0 to 1.136.0 (#15920) Bumps [github.com/digitalocean/godo](https://github.com/digitalocean/godo) from 1.132.0 to 1.136.0. - [Release notes](https://github.com/digitalocean/godo/releases) - [Changelog](https://github.com/digitalocean/godo/blob/main/CHANGELOG.md) - [Commits](https://github.com/digitalocean/godo/compare/v1.132.0...v1.136.0) --- updated-dependencies: - dependency-name: github.com/digitalocean/godo dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4ee326ab58..7509c5e2ee 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.132.0 + github.com/digitalocean/godo v1.136.0 github.com/docker/docker v27.5.1+incompatible github.com/edsrzf/mmap-go v1.2.0 github.com/envoyproxy/go-control-plane/envoy v1.32.3 diff --git a/go.sum b/go.sum index 53edac83f3..5cf3e3ab67 100644 --- a/go.sum +++ b/go.sum @@ -87,8 +87,8 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/digitalocean/godo v1.132.0 h1:n0x6+ZkwbyQBtIU1wwBhv26EINqHg0wWQiBXlwYg/HQ= -github.com/digitalocean/godo v1.132.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.136.0 h1:DTxugljFJSMBPfEGq4KeXpnKeAHicggNqogcrw/YdZw= +github.com/digitalocean/godo v1.136.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= From 993d4e71c944908830b14e1e0e950b3633cbe88a Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 12 Dec 2024 23:54:15 +0100 Subject: [PATCH 1354/1397] docs: Update PromQL documentation to match the native histogram spec This is also meant to document the actual implementation, but see #13934 for the current state. This also improves and streamlines some parts of the documentation that are not strictly native histogram related, but are colocated with them. In particular, the section about aggregation operators got restructured quite a bit, including the removal of a quite verbose example for `limit_ratio` (which was just too long an this location and also a bit questionabl in its usefulness). Signed-off-by: beorn7 --- docs/querying/basics.md | 14 +- docs/querying/functions.md | 488 ++++++++++++++++++++----------------- docs/querying/operators.md | 216 +++++++++------- 3 files changed, 400 insertions(+), 318 deletions(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index c1c76e7e04..8fc86bc230 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -47,7 +47,19 @@ _Notes about the experimental native histograms:_ disabling the feature flag again), both instant vectors and range vectors may now contain samples that aren't simple floating point numbers (float samples) but complete histograms (histogram samples). A vector may contain a mix of - float samples and histogram samples. + float samples and histogram samples. Note that the term “histogram sample” in + the PromQL documentation always refers to a native histogram. Classic + histograms are broken up into a number of series of float samples. From the + perspective of PromQL, there are no “classic histogram samples”. +* Like float samples, histogram samples can be counters or gauges, also called + counter histograms or gauge histograms, respectively. +* Native histograms can have different bucket layouts, but they are generally + convertible to compatible versions to apply binary and aggregation operations + to them. This is not true for all bucketing schemas. If incompatible + histograms are encountered in an operation, the corresponding output vector + element is removed from the result, flagged with a warn-level annotation. + More details can be found in the + [native histogram specification](https://prometheus.io/docs/specs/native_histograms/#compatibility-between-histograms). ## Literals diff --git a/docs/querying/functions.md b/docs/querying/functions.md index facdfce154..767c45b439 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -11,30 +11,17 @@ instant-vector)`. This means that there is one argument `v` which is an instant vector, which if not provided it will default to the value of the expression `vector(time())`. -_Notes about the experimental native histograms:_ - -* Ingesting native histograms has to be enabled via a [feature - flag](../feature_flags.md#native-histograms). As long as no native histograms - have been ingested into the TSDB, all functions will behave as usual. -* Functions that do not explicitly mention native histograms in their - documentation (see below) will ignore histogram samples. -* Functions that do already act on native histograms might still change their - behavior in the future. -* If a function requires the same bucket layout between multiple native - histograms it acts on, it will automatically convert them - appropriately. (With the currently supported bucket schemas, that's always - possible.) - ## `abs()` -`abs(v instant-vector)` returns the input vector with all sample values converted to -their absolute value. +`abs(v instant-vector)` returns a vector containing all float samples in the +input vector converted to their absolute value. Histogram samples in the input +vector are ignored silently. ## `absent()` `absent(v instant-vector)` returns an empty vector if the vector passed to it -has any elements (floats or native histograms) and a 1-element vector with the -value 1 if the vector passed to it has no elements. +has any elements (float samples or histogram samples) and a 1-element vector +with the value 1 if the vector passed to it has no elements. This is useful for alerting on when no time series exist for a given metric name and label combination. @@ -56,8 +43,9 @@ of the 1-element output vector from the input vector. ## `absent_over_time()` `absent_over_time(v range-vector)` returns an empty vector if the range vector -passed to it has any elements (floats or native histograms) and a 1-element -vector with the value 1 if the range vector passed to it has no elements. +passed to it has any elements (float samples or histogram samples) and a +1-element vector with the value 1 if the range vector passed to it has no +elements. This is useful for alerting on when no time series exist for a given metric name and label combination for a certain amount of time. @@ -78,8 +66,9 @@ labels of the 1-element output vector from the input vector. ## `ceil()` -`ceil(v instant-vector)` rounds the sample values of all elements in `v` up to -the nearest integer value greater than or equal to v. +`ceil(v instant-vector)` returns a vector containing all float samples in the +input vector rounded up to the nearest integer value greater than or equal to +their original value. Histogram samples in the input vector are ignored silently. * `ceil(+Inf) = +Inf` * `ceil(±0) = ±0` @@ -90,49 +79,62 @@ the nearest integer value greater than or equal to v. For each input time series, `changes(v range-vector)` returns the number of times its value has changed within the provided time range as an instant -vector. +vector. A float sample followed by a histogram sample, or vice versa, counts as +a change. A counter histogram sample followed by a gauge histogram sample with +otherwise exactly the same values, or vice versa, does not count as a change. ## `clamp()` -`clamp(v instant-vector, min scalar, max scalar)` -clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`. +`clamp(v instant-vector, min scalar, max scalar)` clamps the values of all +float samples in `v` to have a lower limit of `min` and an upper limit of +`max`. Histogram samples in the input vector are ignored silently. Special cases: * Return an empty vector if `min > max` -* Return `NaN` if `min` or `max` is `NaN` +* Float samples are clamped to `NaN` if `min` or `max` is `NaN` ## `clamp_max()` -`clamp_max(v instant-vector, max scalar)` clamps the sample values of all -elements in `v` to have an upper limit of `max`. +`clamp_max(v instant-vector, max scalar)` clamps the values of all float +samples in `v` to have an upper limit of `max`. Histogram samples in the input +vector are ignored silently. ## `clamp_min()` -`clamp_min(v instant-vector, min scalar)` clamps the sample values of all -elements in `v` to have a lower limit of `min`. +`clamp_min(v instant-vector, min scalar)` clamps the values of all float +samples in `v` to have a lower limit of `min`. Histogram samples in the input +vector are ignored silently. ## `day_of_month()` -`day_of_month(v=vector(time()) instant-vector)` returns the day of the month -for each of the given times in UTC. Returned values are from 1 to 31. +`day_of_month(v=vector(time()) instant-vector)` interpretes float samples in +`v` as timestamps (number of seconds since January 1, 1970 UTC) and returns the +day of the month (in UTC) for each of those timestamps. Returned values are +from 1 to 31. Histogram samples in the input vector are ignored silently. ## `day_of_week()` -`day_of_week(v=vector(time()) instant-vector)` returns the day of the week for -each of the given times in UTC. Returned values are from 0 to 6, where 0 means -Sunday etc. +`day_of_week(v=vector(time()) instant-vector)` interpretes float samples in `v` +as timestamps (number of seconds since January 1, 1970 UTC) and returns the day +of the week (in UTC) for each of those timestamps. Returned values are from 0 +to 6, where 0 means Sunday etc. Histogram samples in the input vector are +ignored silently. -## `day_of_year()` +## `day_of_year()` -`day_of_year(v=vector(time()) instant-vector)` returns the day of the year for -each of the given times in UTC. Returned values are from 1 to 365 for non-leap years, -and 1 to 366 in leap years. +`day_of_year(v=vector(time()) instant-vector)` interpretes float samples in `v` +as timestamps (number of seconds since January 1, 1970 UTC) and returns the day +of the year (in UTC) for each of those timestamps. Returned values are from 1 +to 365 for non-leap years, and 1 to 366 in leap years. Histogram samples in the +input vector are ignored silently. ## `days_in_month()` -`days_in_month(v=vector(time()) instant-vector)` returns number of days in the -month for each of the given times in UTC. Returned values are from 28 to 31. +`days_in_month(v=vector(time()) instant-vector)` interpretes float samples in +`v` as timestamps (number of seconds since January 1, 1970 UTC) and returns the +number of days in the month of each of those timestamps (in UTC). Returned +values are from 28 to 31. Histogram samples in the input vector are ignored silently. ## `delta()` @@ -150,36 +152,67 @@ between now and 2 hours ago: delta(cpu_temp_celsius{host="zeus"}[2h]) ``` -`delta` acts on native histograms by calculating a new histogram where each +`delta` acts on histogram samples by calculating a new histogram where each component (sum and count of observations, buckets) is the difference between -the respective component in the first and last native histogram in -`v`. However, each element in `v` that contains a mix of float and native -histogram samples within the range, will be missing from the result vector. +the respective component in the first and last native histogram in `v`. +However, each element in `v` that contains a mix of float samples and histogram +samples within the range will be omitted from the result vector, flagged by a +warn-level annotation. -`delta` should only be used with gauges and native histograms where the -components behave like gauges (so-called gauge histograms). +`delta` should only be used with gauges (for both floats and histograms). ## `deriv()` -`deriv(v range-vector)` calculates the per-second derivative of the time series in a range -vector `v`, using [simple linear regression](https://en.wikipedia.org/wiki/Simple_linear_regression). -The range vector must have at least two samples in order to perform the calculation. When `+Inf` or -`-Inf` are found in the range vector, the slope and offset value calculated will be `NaN`. +`deriv(v range-vector)` calculates the per-second derivative of each float time +series in the range vector `v`, using [simple linear +regression](https://en.wikipedia.org/wiki/Simple_linear_regression). The range +vector must have at least two float samples in order to perform the +calculation. When `+Inf` or `-Inf` are found in the range vector, the slope and +offset value calculated will be `NaN`. -`deriv` should only be used with gauges. +`deriv` should only be used with gauges and only works for float samples. +Elements in the range vector that contain only histogram samples are ignored +entirely. For elements that contain a mix of float and histogram samples, only +the float samples are used as input, which is flagged by an info-level +annotation. + +## `double_exponential_smoothing()` + +**This function has to be enabled via the [feature +flag](../feature_flags.md#experimental-promql-functions) +`--enable-feature=promql-experimental-functions`.** + +`double_exponential_smoothing(v range-vector, sf scalar, tf scalar)` produces a +smoothed value for each float time series in the range in `v`. The lower the +smoothing factor `sf`, the more importance is given to old data. The higher the +trend factor `tf`, the more trends in the data is considered. Both `sf` and +`tf` must be between 0 and 1. For additional details, refer to [NIST +Engineering Statistics +Handbook](https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc433.htm). In +Prometheus V2 this function was called `holt_winters`. This caused confusion +since the Holt-Winters method usually refers to triple exponential smoothing. +Double exponential smoothing as implemented here is also referred to as "Holt +Linear". + +`double_exponential_smoothing` should only be used with gauges and only works +for float samples. Elements in the range vector that contain only histogram +samples are ignored entirely. For elements that contain a mix of float and +histogram samples, only the float samples are used as input, which is flagged +by an info-level annotation. ## `exp()` -`exp(v instant-vector)` calculates the exponential function for all elements in `v`. -Special cases are: +`exp(v instant-vector)` calculates the exponential function for all float +samples in `v`. Histogram samples are ignored silently. Special cases are: * `Exp(+Inf) = +Inf` * `Exp(NaN) = NaN` ## `floor()` -`floor(v instant-vector)` rounds the sample values of all elements in `v` down -to the nearest integer value smaller than or equal to v. +`floor(v instant-vector)` returns a vector containing all float samples in the +input vector rounded down to the nearest integer value smaller than or equal +to their original value. Histogram samples in the input vector are ignored silently. * `floor(+Inf) = +Inf` * `floor(±0) = ±0` @@ -188,12 +221,8 @@ to the nearest integer value smaller than or equal to v. ## `histogram_avg()` -_This function only acts on native histograms, which are an experimental -feature. The behavior of this function may change in future versions of -Prometheus, including its removal from PromQL._ - -`histogram_avg(v instant-vector)` returns the arithmetic average of observed values stored in -a native histogram. Samples that are not native histograms are ignored and do +`histogram_avg(v instant-vector)` returns the arithmetic average of observed +values stored in each histogram sample in `v`. Float samples are ignored and do not show up in the returned vector. Use `histogram_avg` as demonstrated below to compute the average request duration @@ -209,32 +238,25 @@ Which is equivalent to the following query: ## `histogram_count()` and `histogram_sum()` -_Both functions only act on native histograms, which are an experimental -feature. The behavior of these functions may change in future versions of -Prometheus, including their removal from PromQL._ - `histogram_count(v instant-vector)` returns the count of observations stored in -a native histogram. Samples that are not native histograms are ignored and do -not show up in the returned vector. +each histogram sample in `v`. Float samples are ignored and do not show up in +the returned vector. Similarly, `histogram_sum(v instant-vector)` returns the sum of observations -stored in a native histogram. +stored in each histogram sample. Use `histogram_count` in the following way to calculate a rate of observations -(in this case corresponding to “requests per second”) from a native histogram: +(in this case corresponding to “requests per second”) from a series of +histogram samples: histogram_count(rate(http_request_duration_seconds[10m])) ## `histogram_fraction()` -_This function only acts on native histograms, which are an experimental -feature. The behavior of this function may change in future versions of -Prometheus, including its removal from PromQL._ - -For a native histogram, `histogram_fraction(lower scalar, upper scalar, v -instant-vector)` returns the estimated fraction of observations between the -provided lower and upper values. Samples that are not native histograms are -ignored and do not show up in the returned vector. +`histogram_fraction(lower scalar, upper scalar, v instant-vector)` returns the +estimated fraction of observations between the provided lower and upper values +for each histogram sample in `v`. Float samples are ignored and do not show up +in the returned vector. For example, the following expression calculates the fraction of HTTP requests over the last hour that took 200ms or less: @@ -253,12 +275,13 @@ observations less than or equal 0.2 would be `-Inf` rather than `0`. Whether the provided boundaries are inclusive or exclusive is only relevant if the provided boundaries are precisely aligned with bucket boundaries in the underlying native histogram. In this case, the behavior depends on the schema -definition of the histogram. The currently supported schemas all feature -inclusive upper boundaries and exclusive lower boundaries for positive values -(and vice versa for negative values). Without a precise alignment of -boundaries, the function uses linear interpolation to estimate the -fraction. With the resulting uncertainty, it becomes irrelevant if the -boundaries are inclusive or exclusive. +definition of the histogram. (The usual standard exponential schemas all +feature inclusive upper boundaries and exclusive lower boundaries for positive +values, and vice versa for negative values.) Without a precise alignment of +boundaries, the function uses interpolation to estimate the fraction. With the +resulting uncertainty, it becomes irrelevant if the boundaries are inclusive or +exclusive. The interpolation method is the same as the one used for +`histogram_quantile()`. See there for more details. ## `histogram_quantile()` @@ -270,10 +293,6 @@ summaries](https://prometheus.io/docs/practices/histograms) for a detailed explanation of φ-quantiles and the usage of the (classic) histogram metric type in general.) -_Note that native histograms are an experimental feature. The behavior of this -function when dealing with native histograms may change in future versions of -Prometheus._ - The float samples in `b` are considered the counts of observations in each bucket of one or more classic histograms. Each float sample must have a label `le` where the label value denotes the inclusive upper bound of the bucket. @@ -284,8 +303,8 @@ type](https://prometheus.io/docs/concepts/metric_types/#histogram) automatically provides time series with the `_bucket` suffix and the appropriate labels. -The native histogram samples in `b` are treated each individually as a separate -histogram to calculate the quantile from. +The (native) histogram samples in `b` are treated each individually as a +separate histogram to calculate the quantile from. As long as no naming collisions arise, `b` may contain a mix of classic and native histograms. @@ -336,7 +355,9 @@ non-zero-buckets of native histograms with a standard exponential bucketing schema, the interpolation is done under the assumption that the samples within the bucket are distributed in a way that they would uniformly populate the buckets in a hypothetical histogram with higher resolution. (This is also -called _exponential interpolation_.) +called _exponential interpolation_. See the [native histogram +specification](https://prometheus.io/docs/specs/native_histograms/#interpolation-within-a-bucket) +for more details.) If `b` has 0 observations, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. For φ = `NaN`, `NaN` is returned. @@ -387,63 +408,46 @@ difference between two buckets is a trillionth (1e-12) of the sum of both buckets.) Furthermore, if there are non-monotonic bucket counts even after this adjustment, they are increased to the value of the previous buckets to enforce monotonicity. The latter is evidence for an actual issue with the input data -and is therefore flagged with an informational annotation reading `input to +and is therefore flagged by an info-level annotation reading `input to histogram_quantile needed to be fixed for monotonicity`. If you encounter this annotation, you should find and remove the source of the invalid data. ## `histogram_stddev()` and `histogram_stdvar()` -_Both functions only act on native histograms, which are an experimental -feature. The behavior of these functions may change in future versions of -Prometheus, including their removal from PromQL._ - `histogram_stddev(v instant-vector)` returns the estimated standard deviation -of observations in a native histogram, based on the geometric mean of the buckets -where the observations lie. Samples that are not native histograms are ignored and -do not show up in the returned vector. +of observations for each histogram sample in `v`, based on the geometric mean +of the buckets where the observations lie. Float samples are ignored and do not +show up in the returned vector. Similarly, `histogram_stdvar(v instant-vector)` returns the estimated standard -variance of observations in a native histogram. - -## `double_exponential_smoothing()` - -**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** - -`double_exponential_smoothing(v range-vector, sf scalar, tf scalar)` produces a smoothed value -for time series based on the range in `v`. The lower the smoothing factor `sf`, -the more importance is given to old data. The higher the trend factor `tf`, the -more trends in the data is considered. Both `sf` and `tf` must be between 0 and -1. -For additional details, refer to [NIST Engineering Statistics Handbook](https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc433.htm). -In Prometheus V2 this function was called `holt_winters`. This caused confusion -since the Holt-Winters method usually refers to triple exponential smoothing. -Double exponential smoothing as implemented here is also referred to as "Holt -Linear". - -`double_exponential_smoothing` should only be used with gauges. +variance of observations for each histogram sample in `v`. ## `hour()` -`hour(v=vector(time()) instant-vector)` returns the hour of the day -for each of the given times in UTC. Returned values are from 0 to 23. +`hour(v=vector(time()) instant-vector)` interpretes float samples in `v` as +timestamps (number of seconds since January 1, 1970 UTC) and returns the hour +of the day (in UTC) for each of those timestamps. Returned values are from 0 +to 23. Histogram samples in the input vector are ignored silently. ## `idelta()` `idelta(v range-vector)` calculates the difference between the last two samples in the range vector `v`, returning an instant vector with the given deltas and -equivalent labels. +equivalent labels. Both samples must be either float samples or histogram +samples. Elements in `v` where one of the last two samples is a float sample +and the other is a histogram sample will be omitted from the result vector, +flagged by a warn-level annotation. -`idelta` should only be used with gauges. +`idelta` should only be used with gauges (for both floats and histograms). ## `increase()` -`increase(v range-vector)` calculates the increase in the -time series in the range vector. Breaks in monotonicity (such as counter -resets due to target restarts) are automatically adjusted for. The -increase is extrapolated to cover the full time range as specified -in the range vector selector, so that it is possible to get a -non-integer result even if a counter increases only by integer -increments. +`increase(v range-vector)` calculates the increase in the time series in the +range vector. Breaks in monotonicity (such as counter resets due to target +restarts) are automatically adjusted for. The increase is extrapolated to cover +the full time range as specified in the range vector selector, so that it is +possible to get a non-integer result even if a counter increases only by +integer increments. The following example expression returns the number of HTTP requests as measured over the last 5 minutes, per time series in the range vector: @@ -452,19 +456,20 @@ over the last 5 minutes, per time series in the range vector: increase(http_requests_total{job="api-server"}[5m]) ``` -`increase` acts on native histograms by calculating a new histogram where each -component (sum and count of observations, buckets) is the increase between -the respective component in the first and last native histogram in -`v`. However, each element in `v` that contains a mix of float and native -histogram samples within the range, will be missing from the result vector. +`increase` acts on histogram samples by calculating a new histogram where each +component (sum and count of observations, buckets) is the increase between the +respective component in the first and last native histogram in `v`. However, +each element in `v` that contains a mix of float samples and histogram samples +within the range, will be omitted from the result vector, flagged by a +warn-level annotation. -`increase` should only be used with counters and native histograms where the -components behave like counters. It is syntactic sugar for `rate(v)` multiplied -by the number of seconds under the specified time range window, and should be -used primarily for human readability. Use `rate` in recording rules so that -increases are tracked consistently on a per-second basis. +`increase` should only be used with counters (for both floats and histograms). +It is syntactic sugar for `rate(v)` multiplied by the number of seconds under +the specified time range window, and should be used primarily for human +readability. Use `rate` in recording rules so that increases are tracked +consistently on a per-second basis. -## `info()` (experimental) +## `info()` _The `info` function is an experiment to improve UX around including labels from [info metrics](https://grafana.com/blog/2021/08/04/how-to-use-promql-joins-for-more-effective-queries-of-prometheus-metrics-at-scale/#info-metrics). @@ -560,7 +565,12 @@ consider all matching info series and with their appropriate identifying labels. `irate(v range-vector)` calculates the per-second instant rate of increase of the time series in the range vector. This is based on the last two data points. Breaks in monotonicity (such as counter resets due to target restarts) are -automatically adjusted for. +automatically adjusted for. Both samples must be either float samples or +histogram samples. Elements in `v` where one of the last two samples is a float +sample and the other is a histogram sample will be omitted from the result +vector, flagged by a warn-level annotation. + +`irate` should only be used with counters (for both floats and histograms). The following example expression returns the per-second rate of HTTP requests looking up to 5 minutes back for the two most recent data points, per time @@ -618,8 +628,8 @@ label_replace(up{job="api-server",service="a:c"}, "foo", "$name", "service", "(? ## `ln()` -`ln(v instant-vector)` calculates the natural logarithm for all elements in `v`. -Special cases are: +`ln(v instant-vector)` calculates the natural logarithm for all float samples +in `v`. Histogram samples in the input vector are ignored silently. Special cases are: * `ln(+Inf) = +Inf` * `ln(0) = -Inf` @@ -628,35 +638,45 @@ Special cases are: ## `log2()` -`log2(v instant-vector)` calculates the binary logarithm for all elements in `v`. -The special cases are equivalent to those in `ln`. +`log2(v instant-vector)` calculates the binary logarithm for all float samples +in `v`. Histogram samples in the input vector are ignored silently. The special cases +are equivalent to those in `ln`. ## `log10()` -`log10(v instant-vector)` calculates the decimal logarithm for all elements in `v`. -The special cases are equivalent to those in `ln`. +`log10(v instant-vector)` calculates the decimal logarithm for all float +samples in `v`. Histogram samples in the input vector are ignored silently. The special +cases are equivalent to those in `ln`. ## `minute()` -`minute(v=vector(time()) instant-vector)` returns the minute of the hour for each -of the given times in UTC. Returned values are from 0 to 59. +`minute(v=vector(time()) instant-vector)` interpretes float samples in `v` as +timestamps (number of seconds since January 1, 1970 UTC) and returns the minute +of the hour (in UTC) for each of those timestamps. Returned values are from 0 +to 59. Histogram samples in the input vector are ignored silently. ## `month()` -`month(v=vector(time()) instant-vector)` returns the month of the year for each -of the given times in UTC. Returned values are from 1 to 12, where 1 means -January etc. +`month(v=vector(time()) instant-vector)` interpretes float samples in `v` as +timestamps (number of seconds since January 1, 1970 UTC) and returns the month +of the year (in UTC) for each of those timestamps. Returned values are from 1 +to 12, where 1 means January etc. Histogram samples in the input vector are +ignored silently. ## `predict_linear()` `predict_linear(v range-vector, t scalar)` predicts the value of time series `t` seconds from now, based on the range vector `v`, using [simple linear -regression](https://en.wikipedia.org/wiki/Simple_linear_regression). -The range vector must have at least two samples in order to perform the -calculation. When `+Inf` or `-Inf` are found in the range vector, -the slope and offset value calculated will be `NaN`. +regression](https://en.wikipedia.org/wiki/Simple_linear_regression). The range +vector must have at least two float samples in order to perform the +calculation. When `+Inf` or `-Inf` are found in the range vector, the predicted +value will be `NaN`. -`predict_linear` should only be used with gauges. +`predict_linear` should only be used with gauges and only works for float +samples. Elements in the range vector that contain only histogram samples are +ignored entirely. For elements that contain a mix of float and histogram +samples, only the float samples are used as input, which is flagged by an +info-level annotation. ## `rate()` @@ -675,13 +695,13 @@ rate(http_requests_total{job="api-server"}[5m]) `rate` acts on native histograms by calculating a new histogram where each component (sum and count of observations, buckets) is the rate of increase -between the respective component in the first and last native histogram in -`v`. However, each element in `v` that contains a mix of float and native -histogram samples within the range, will be missing from the result vector. +between the respective component in the first and last native histogram in `v`. +However, each element in `v` that contains a mix of float and native histogram +samples within the range, will be omitted from the result vector, flagged by a +warn-level annotation. -`rate` should only be used with counters and native histograms where the -components behave like counters. It is best suited for alerting, and for -graphing of slow-moving counters. +`rate` should only be used with counters (for both floats and histograms). It +is best suited for alerting, and for graphing of slow-moving counters. Note that when combining `rate()` with an aggregation operator (e.g. `sum()`) or a function aggregating over time (any function ending in `_over_time`), @@ -696,17 +716,15 @@ decrease in the value between two consecutive float samples is interpreted as a counter reset. A reset in a native histogram is detected in a more complex way: Any decrease in any bucket, including the zero bucket, or in the count of observation constitutes a counter reset, but also the disappearance of any -previously populated bucket, an increase in bucket resolution, or a decrease of -the zero-bucket width. +previously populated bucket, a decrease of the zero-bucket width, or any schema +change that is not a compatible decrease of resolution. -`resets` should only be used with counters and counter-like native -histograms. +`resets` should only be used with counters (for both floats and histograms). -If the range vector contains a mix of float and histogram samples for the same -series, counter resets are detected separately and their numbers added up. The -change from a float to a histogram sample is _not_ considered a counter -reset. Each float sample is compared to the next float sample, and each -histogram is comprared to the next histogram. +A float sample followed by a histogram sample, or vice versa, counts as a +reset. A counter histogram sample followed by a gauge histogram sample, or vice +versa, also counts as a reset (but note that `resets` should not be used on +gauges in the first place, see above). ## `round()` @@ -714,53 +732,63 @@ histogram is comprared to the next histogram. elements in `v` to the nearest integer. Ties are resolved by rounding up. The optional `to_nearest` argument allows specifying the nearest multiple to which the sample values should be rounded. This multiple may also be a fraction. +Histogram samples in the input vector are ignored silently. ## `scalar()` -Given a single-element input vector, `scalar(v instant-vector)` returns the -sample value of that single element as a scalar. If the input vector does not -have exactly one element, `scalar` will return `NaN`. +Given an input vector that contains only one element with a float sample, +`scalar(v instant-vector)` returns the sample value of that float sample as a +scalar. If the input vector does not have exactly one element with a float +sample, `scalar` will return `NaN`. Histogram samples in the input vector are +ignored silently. ## `sgn()` -`sgn(v instant-vector)` returns a vector with all sample values converted to their sign, defined as this: 1 if v is positive, -1 if v is negative and 0 if v is equal to zero. +`sgn(v instant-vector)` returns a vector with all float sample values converted +to their sign, defined as this: 1 if v is positive, -1 if v is negative and 0 +if v is equal to zero. Histogram samples in the input vector are ignored silently. ## `sort()` -`sort(v instant-vector)` returns vector elements sorted by their sample values, -in ascending order. Native histograms are sorted by their sum of observations. +`sort(v instant-vector)` returns vector elements sorted by their float sample +values, in ascending order. Histogram samples in the input vector are ignored silently. -Please note that `sort` only affects the results of instant queries, as range query results always have a fixed output ordering. +Please note that `sort` only affects the results of instant queries, as range +query results always have a fixed output ordering. ## `sort_desc()` Same as `sort`, but sorts in descending order. -Like `sort`, `sort_desc` only affects the results of instant queries, as range query results always have a fixed output ordering. - ## `sort_by_label()` -**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** +**This function has to be enabled via the [feature +flag](../feature_flags.md#experimental-promql-functions) +`--enable-feature=promql-experimental-functions`.** -`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by the values of the given labels in ascending order. In case these label values are equal, elements are sorted by their full label sets. +`sort_by_label(v instant-vector, label string, ...)` returns vector elements +sorted by the values of the given labels in ascending order. In case these +label values are equal, elements are sorted by their full label sets. +`sort_by_label` acts on float and histogram samples in the same way. -Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering. +Please note that `sort_by_label` only affect the results of instant queries, as +range query results always have a fixed output ordering. -This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_sort_order). +`sort_by_label` uses [natural sort +order](https://en.wikipedia.org/wiki/Natural_sort_order). ## `sort_by_label_desc()` -**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** +**This function has to be enabled via the [feature +flag](../feature_flags.md#experimental-promql-functions) +`--enable-feature=promql-experimental-functions`.** Same as `sort_by_label`, but sorts in descending order. -Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering. - -This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_sort_order). - ## `sqrt()` -`sqrt(v instant-vector)` calculates the square root of all elements in `v`. +`sqrt(v instant-vector)` calculates the square root of all float samples in +`v`. Histogram samples in the input vector are ignored silently. ## `time()` @@ -771,66 +799,80 @@ expression is to be evaluated. ## `timestamp()` `timestamp(v instant-vector)` returns the timestamp of each of the samples of -the given vector as the number of seconds since January 1, 1970 UTC. It also -works with histogram samples. +the given vector as the number of seconds since January 1, 1970 UTC. It acts on +float and histogram samples in the same way. ## `vector()` -`vector(s scalar)` returns the scalar `s` as a vector with no labels. +`vector(s scalar)` converts the scalar `s` to a float sample and returns it as +a single-element instant vector with no labels. ## `year()` -`year(v=vector(time()) instant-vector)` returns the year -for each of the given times in UTC. +`year(v=vector(time()) instant-vector)` returns the year for each of the given +times in UTC. Histogram samples in the input vector are ignored silently. ## `_over_time()` The following functions allow aggregating each series of a given range vector over time and return an instant vector with per-series aggregation results: -* `avg_over_time(range-vector)`: the average value of all points in the specified interval. -* `min_over_time(range-vector)`: the minimum value of all points in the specified interval. -* `max_over_time(range-vector)`: the maximum value of all points in the specified interval. -* `sum_over_time(range-vector)`: the sum of all values in the specified interval. -* `count_over_time(range-vector)`: the count of all values in the specified interval. -* `quantile_over_time(scalar, range-vector)`: the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified interval. -* `stddev_over_time(range-vector)`: the population standard deviation of the values in the specified interval. -* `stdvar_over_time(range-vector)`: the population standard variance of the values in the specified interval. -* `last_over_time(range-vector)`: the most recent point value in the specified interval. +* `avg_over_time(range-vector)`: the average value of all float or histogram samples in the specified interval (see details below). +* `min_over_time(range-vector)`: the minimum value of all float samples in the specified interval. +* `max_over_time(range-vector)`: the maximum value of all float samples in the specified interval. +* `sum_over_time(range-vector)`: the sum of all float or histogram samples in the specified interval (see details below). +* `count_over_time(range-vector)`: the count of all samples in the specified interval. +* `quantile_over_time(scalar, range-vector)`: the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the specified interval. +* `stddev_over_time(range-vector)`: the population standard deviation of all float samples in the specified interval. +* `stdvar_over_time(range-vector)`: the population standard variance of all float samples in the specified interval. +* `last_over_time(range-vector)`: the most recent sample in the specified interval. * `present_over_time(range-vector)`: the value 1 for any series in the specified interval. If the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions` is set, the following additional functions are available: -* `mad_over_time(range-vector)`: the median absolute deviation of all points in the specified interval. +* `mad_over_time(range-vector)`: the median absolute deviation of all float + samples in the specified interval. Note that all values in the specified interval have the same weight in the aggregation even if the values are not equally spaced throughout the interval. -`avg_over_time`, `sum_over_time`, `count_over_time`, `last_over_time`, and -`present_over_time` handle native histograms as expected. All other functions -ignore histogram samples. +These functions act on histograms in the following way: + +- `count_over_time`, `last_over_time`, and `present_over_time()` act on float + and histogram samples in the same way. +- `avg_over_time()` and `sum_over_time()` act on histogram samples in a way + that corresponds to the respective aggregation operators. If a series + contains a mix of float samples and histogram samples within the range, the + corresponding result is removed entirely from the output vector. Such a + removal is flagged by a warn-level annotation. +- All other functions ignore histogram samples in the following way: Input + ranges containing only histogram samples are silently removed from the + output. For ranges with a mix of histogram and float samples, only the float + samples are processed and the omission of the histogram samples is flagged by + an info-level annotation. ## Trigonometric Functions -The trigonometric functions work in radians: +The trigonometric functions work in radians. They ignore histogram samples in +the input vector. -* `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)). -* `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)). -* `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)). -* `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)). -* `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)). -* `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)). -* `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)). -* `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)). -* `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)). -* `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)). -* `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)). -* `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)). +* `acos(v instant-vector)`: calculates the arccosine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Acos)). +* `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Acosh)). +* `asin(v instant-vector)`: calculates the arcsine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Asin)). +* `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Asinh)). +* `atan(v instant-vector)`: calculates the arctangent of all float samples in `v` ([special cases](https://pkg.go.dev/math#Atan)). +* `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all float samples in `v` ([special cases](https://pkg.go.dev/math#Atanh)). +* `cos(v instant-vector)`: calculates the cosine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Cos)). +* `cosh(v instant-vector)`: calculates the hyperbolic cosine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Cosh)). +* `sin(v instant-vector)`: calculates the sine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Sin)). +* `sinh(v instant-vector)`: calculates the hyperbolic sine of all float samples in `v` ([special cases](https://pkg.go.dev/math#Sinh)). +* `tan(v instant-vector)`: calculates the tangent of all float samples in `v` ([special cases](https://pkg.go.dev/math#Tan)). +* `tanh(v instant-vector)`: calculates the hyperbolic tangent of all float samples in `v` ([special cases](https://pkg.go.dev/math#Tanh)). The following are useful for converting between degrees and radians: -* `deg(v instant-vector)`: converts radians to degrees for all elements in `v`. +* `deg(v instant-vector)`: converts radians to degrees for all float samples in `v`. * `pi()`: returns pi. -* `rad(v instant-vector)`: converts degrees to radians for all elements in `v`. +* `rad(v instant-vector)`: converts degrees to radians for all float samples in `v`. diff --git a/docs/querying/operators.md b/docs/querying/operators.md index f5f217ff63..02cea500a1 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -23,22 +23,51 @@ The following binary arithmetic operators exist in Prometheus: * `^` (power/exponentiation) Binary arithmetic operators are defined between scalar/scalar, vector/scalar, -and vector/vector value pairs. +and vector/vector value pairs. They follow the usual [IEEE 754 floating point +arithmetic](https://en.wikipedia.org/wiki/IEEE_754), including the handling of +special values like `NaN`, `+Inf`, and `-Inf`. **Between two scalars**, the behavior is obvious: they evaluate to another scalar that is the result of the operator applied to both scalar operands. **Between an instant vector and a scalar**, the operator is applied to the -value of every data sample in the vector. E.g. if a time series instant vector -is multiplied by 2, the result is another vector in which every sample value of -the original vector is multiplied by 2. The metric name is dropped. +value of every data sample in the vector. If the data sample is a float, the +operation performed on the data sample is again obvious, e.g. if an instant +vector of float samples is multiplied by 2, the result is another vector of +float samples in which every sample value of the original vector is multiplied +by 2. For vector elements that are histogram samples, the behavior is the +following: For `*`, all bucket populations and the count and the sum of +observations are multiplied by the scalar. For `/`, the histogram sample has to +be on the left hand side (LHS), followed by the scalar on the right hand side +(RHS). All bucket populations and the count and the sum of observations are +then divided by the scalar. A division by zero results in a histogram with no +regular buckets and the zero bucket population and the count and sum of +observations all set to +Inf, -Inf, or NaN, depending on their values in the +input histogram (positive, negative, or zero/NaN, respectively). For `/` with a +scalar on the LHS and a histogram sample on the RHS, and similarly for all +other arithmetic binary operators in any combination of a scalar and a +histogram sample, there is no result and the corresponding element is removed +from the resulting vector. Such a removal is flagged by an info-level +annotation. **Between two instant vectors**, a binary arithmetic operator is applied to -each entry in the left-hand side vector and its [matching element](#vector-matching) -in the right-hand vector. The result is propagated into the result vector with the -grouping labels becoming the output label set. The metric name is dropped. Entries -for which no matching entry in the right-hand vector can be found are not part of -the result. +each entry in the LHS vector and its [matching element](#vector-matching) in +the RHS vector. The result is propagated into the result vector with the +grouping labels becoming the output label set. Entries for which no matching +entry in the right-hand vector can be found are not part of the result. If two +float samples are matched, the behavior is obvious. If a float sample is +matched with a histogram sample, the behavior follows the same logic as between +a scalar and a histogram sample (see above), i.e. `*` and `/` (the latter with +the histogram sample on the LHS) are valid operations, while all others lead to +the removal of the corresponding element from the resulting vector. If two +histogram samples are matched, only `+` and `-` are valid operations, each +adding or substracting all matching bucket populations and the count and the +sum of observations. All other operations result in the removal of the +corresponding element from the output vector, flagged by an info-level +annotation. + +**In any arithmetic binary operation involving vectors**, the metric name is +dropped. ### Trigonometric binary operators @@ -46,9 +75,12 @@ The following trigonometric binary operators, which work in radians, exist in Pr * `atan2` (based on https://pkg.go.dev/math#Atan2) -Trigonometric operators allow trigonometric functions to be executed on two vectors using -vector matching, which isn't available with normal functions. They act in the same manner -as arithmetic operators. +Trigonometric operators allow trigonometric functions to be executed on two +vectors using vector matching, which isn't available with normal functions. +They act in the same manner as arithmetic operators. They only operate on float +samples. Operations involving histogram samples result in the removal of the +corresponding vector elements from the output vector, flagged by an +info-level annotation. ### Comparison binary operators @@ -72,20 +104,28 @@ operators result in another scalar that is either `0` (`false`) or `1` **Between an instant vector and a scalar**, these operators are applied to the value of every data sample in the vector, and vector elements between which the -comparison result is `false` get dropped from the result vector. If the `bool` -modifier is provided, vector elements that would be dropped instead have the value -`0` and vector elements that would be kept have the value `1`. The metric name -is dropped if the `bool` modifier is provided. +comparison result is `false` get dropped from the result vector. These +operation only work with float samples in the vector. For histogram samples, +the corresponding element is removed from the result vector, flagged by an +info-level annotation. **Between two instant vectors**, these operators behave as a filter by default, applied to matching entries. Vector elements for which the expression is not true or which do not find a match on the other side of the expression get dropped from the result, while the others are propagated into a result vector -with the grouping labels becoming the output label set. -If the `bool` modifier is provided, vector elements that would have been -dropped instead have the value `0` and vector elements that would be kept have -the value `1`, with the grouping labels again becoming the output label set. -The metric name is dropped if the `bool` modifier is provided. +with the grouping labels becoming the output label set. Matches between two +float samples work as usual, while matches between a float sample and a +histogram sample are invalid. The corresponding element is removed from the +result vector, flagged by an info-level annotation. Between two histogram +samples, `==` and `!=` work as expected, but all other comparison binary +operations are again invalid. + +**In any comparison binary operation involving vectors**, providing the `bool` +modifier changes the behavior in the following way: Vector elements that would +be dropped instead have the value `0` and vector elements that would be kept +have the value `1`. Additionally, the metric name is dropped. (Note that +invalid operations involving histogram samples still return no result rather +than the value `0`.) ### Logical/set binary operators @@ -108,6 +148,9 @@ which do not have matching label sets in `vector1`. `vector1` for which there are no elements in `vector2` with exactly matching label sets. All matching elements in both vectors are dropped. +As these logical/set binary operators do not interact with the sample values, +they work in the same way for float samples and histogram samples. + ## Vector matching Operations between vectors attempt to find a matching element in the right-hand side @@ -219,19 +262,20 @@ used to aggregate the elements of a single instant vector, resulting in a new vector of fewer elements with aggregated values: * `sum` (calculate sum over dimensions) +* `avg` (calculate the arithmetic average over dimensions) * `min` (select minimum over dimensions) * `max` (select maximum over dimensions) -* `avg` (calculate the average over dimensions) +* `bottomk` (smallest _k_ elements by sample value) +* `topk` (largest _k_ elements by sample value) +* `limitk` (sample _k_ elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`) +* `limit_ratio` (sample a pseudo-randem ratio _r_ of elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`) * `group` (all values in the resulting vector are 1) -* `stddev` (calculate population standard deviation over dimensions) -* `stdvar` (calculate population standard variance over dimensions) * `count` (count number of elements in the vector) * `count_values` (count number of elements with the same value) -* `bottomk` (smallest k elements by sample value) -* `topk` (largest k elements by sample value) + +* `stddev` (calculate population standard deviation over dimensions) +* `stdvar` (calculate population standard variance over dimensions) * `quantile` (calculate φ-quantile (0 ≤ φ ≤ 1) over dimensions) -* `limitk` (sample n elements) -* `limit_ratio` (sample elements with approximately 𝑟 ratio if `𝑟 > 0`, and the complement of such samples if `𝑟 = -(1.0 - 𝑟)`) These operators can either be used to aggregate over **all** label dimensions or preserve distinct dimensions by including a `without` or `by` clause. These @@ -251,29 +295,67 @@ all other labels are preserved in the output. `by` does the opposite and drops labels that are not listed in the `by` clause, even if their label values are identical between all elements of the vector. -`parameter` is only required for `count_values`, `quantile`, `topk`, -`bottomk`, `limitk` and `limit_ratio`. +`parameter` is only required for `topk`, `bottomk`, `limitk`, `limit_ratio`, +`quantile`, and `count_values`. It is used as the value for _k_, _r_, φ, or the +name of the additional label, respectively. + +### Detailed explanations + +`sum` sums up sample values in the same way as the `+` binary operator does +between two values. Similarly, `avg` divides the sum by the number of +aggregated samples in the same way as the `/` binary operator. Therefore, all +sample values aggregation into a single resulting vector element must either be +float samples or histogram samples. An aggregation of a mix of both is invalid, +resulting in the removeal of the corresponding vector element from the output +vector, flagged by a warn-level annotation. + +`min` and `max` only operate on float samples, following IEEE 754 floating +point arithmetic, which in particular implies that `NaN` is only ever +considered a minimum or maximum if all aggregated values are `NaN`. Histogram +samples in the input vector are ignored, flagged by an info-level annotation. + +`topk` and `bottomk` are different from other aggregators in that a subset of +the input samples, including the original labels, are returned in the result +vector. `by` and `without` are only used to bucket the input vector. Similar to +`min` and `max`, they only operate on float samples, considering `NaN` values +to be farthest from the top or bottom, respectively. Histogram samples in the +input vector are ignored, flagged by an info-level annotation. + +`limitk` and `limit_ratio` also return a subset of the input samples, including +the original labels in the result vector. The subset is selected in a +deterministic pseudo-random way. `limitk` picks _k_ samples, while +`limit_ratio` picks a ratio _r_ of samples (each determined by `parameter`). +This happens independent of the sample type. Therefore, it works for both float +samples and histogram samples. _r_ can be between +1 and -1. The absolute value +of _r_ is used as the selection ratio, but the selection order is inverted for +a negative _r_, which can be used to select complements. For example, +`limit_ratio(0.1, ...)` returns a deterministic set of approximatiely 10% of +the input samples, while `limit_ratio(-0.9, ...)` returns precisely the +remaining approximately 90% of the input samples not returned by +`limit_ratio(0.1, ...)`. + +`group` and `count` do not do not interact with the sample values, +they work in the same way for float samples and histogram samples. `count_values` outputs one time series per unique sample value. Each series has an additional label. The name of that label is given by the aggregation parameter, and the label value is the unique sample value. The value of each time series is the number of times that sample value was present. +`count_values` works with both float samples and histogram samples. For the +latter, a compact string representation of the histogram sample value is used +as the label value. -`topk` and `bottomk` are different from other aggregators in that a subset of -the input samples, including the original labels, are returned in the result -vector. `by` and `without` are only used to bucket the input vector. - -`limitk` and `limit_ratio` also return a subset of the input samples, -including the original labels in the result vector, these are experimental -operators that must be enabled with `--enable-feature=promql-experimental-functions`. +`stddev` and `stdvar` only work with float samples, following IEEE 754 floating +point arithmetic. Histogram samples in the input vector are ignored, flagged by +an info-level annotation. `quantile` calculates the φ-quantile, the value that ranks at number φ*N among the N metric values of the dimensions aggregated over. φ is provided as the aggregation parameter. For example, `quantile(0.5, ...)` calculates the median, -`quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. +`quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned. +For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. - -Example: +### Examples If the metric `http_requests_total` had time series that fan out by `application`, `instance`, and `group` labels, we could calculate the total @@ -303,28 +385,6 @@ could write: limitk(10, http_requests_total) -To deterministically sample approximately 10% of timeseries we could write: - - limit_ratio(0.1, http_requests_total) - -Given that `limit_ratio()` implements a deterministic sampling algorithm (based -on labels' hash), you can get the _complement_ of the above samples, i.e. -approximately 90%, but precisely those not returned by `limit_ratio(0.1, ...)` -with: - - limit_ratio(-0.9, http_requests_total) - -You can also use this feature to e.g. verify that `avg()` is a representative -aggregation for your samples' values, by checking that the difference between -averaging two samples' subsets is "small" when compared to the standard -deviation. - - abs( - avg(limit_ratio(0.5, http_requests_total)) - - - avg(limit_ratio(-0.5, http_requests_total)) - ) <= bool stddev(http_requests_total) - ## Binary operator precedence The following list shows the precedence of binary operators in Prometheus, from @@ -340,35 +400,3 @@ highest to lowest. Operators on the same precedence level are left-associative. For example, `2 * 3 % 2` is equivalent to `(2 * 3) % 2`. However `^` is right associative, so `2 ^ 3 ^ 2` is equivalent to `2 ^ (3 ^ 2)`. - -## Operators for native histograms - -Native histograms are an experimental feature. Ingesting native histograms has -to be enabled via a [feature flag](../../feature_flags.md#native-histograms). Once -native histograms have been ingested, they can be queried (even after the -feature flag has been disabled again). However, the operator support for native -histograms is still very limited. - -Logical/set binary operators work as expected even if histogram samples are -involved. They only check for the existence of a vector element and don't -change their behavior depending on the sample type of an element (float or -histogram). The `count` aggregation operator works similarly. - -The binary `+` and `-` operators between two native histograms and the `sum` -and `avg` aggregation operators to aggregate native histograms are fully -supported. Even if the histograms involved have different bucket layouts, the -buckets are automatically converted appropriately so that the operation can be -performed. (With the currently supported bucket schemas, that's always -possible.) If either operator has to aggregate a mix of histogram samples and -float samples, the corresponding vector element is removed from the output -vector entirely. - -The binary `*` operator works between a native histogram and a float in any -order, while the binary `/` operator can be used between a native histogram -and a float in that exact order. - -All other operators (and unmentioned cases for the above operators) do not -behave in a meaningful way. They either treat the histogram sample as if it -were a float sample of value 0, or (in case of arithmetic operations between a -scalar and a vector) they leave the histogram sample unchanged. This behavior -will change to a meaningful one before native histograms are a stable feature. From 402fa38e84f742607de410e7e205575ee0ae5688 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 10 Feb 2025 13:19:56 +0100 Subject: [PATCH 1355/1397] Improve tick rendering (#15999) Move to 24h-based time formatting and unambiguous date formats. Also add more details to the default formatting of each tick instead of only showing e.g. minutes/seconds at rollover ticks for the shorter breakpoints. Fixes https://github.com/prometheus/prometheus/issues/15913 Signed-off-by: Julius Volz --- .../src/pages/query/uPlotChartHelpers.ts | 33 ++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/web/ui/mantine-ui/src/pages/query/uPlotChartHelpers.ts b/web/ui/mantine-ui/src/pages/query/uPlotChartHelpers.ts index e9dad3665f..ac77fee596 100644 --- a/web/ui/mantine-ui/src/pages/query/uPlotChartHelpers.ts +++ b/web/ui/mantine-ui/src/pages/query/uPlotChartHelpers.ts @@ -332,6 +332,7 @@ export const getUPlotOptions = ( axes: [ // X axis (time). { + space: 80, labelSize: 20, stroke: light ? "#333" : "#eee", ticks: { @@ -343,6 +344,33 @@ export const getUPlotOptions = ( width: 2, dash: [], }, + values: [ + // See https://github.com/leeoniya/uPlot/tree/master/docs#axis--grid-opts and https://github.com/leeoniya/uPlot/issues/83. + // + // We want to achieve 24h-based time formatting instead of the default AM/PM-based time formatting. + // We also want to render dates in an unambiguous format that uses the abbreviated month name instead of a US-centric DD/MM/YYYY format. + // + // The "tick incr" column defines the breakpoint in seconds at which the format changes. + // The "default" column defines the default format for a tick at this breakpoint. + // The "year"/"month"/"day"/"hour"/"min"/"sec" columns define additional values to display for year/month/day/... rollovers occurring around a tick. + // The "mode" column value "1" means that rollover values will be concatenated with the default format (instead of replacing it). + // + // tick incr default year month day hour min sec mode + // prettier-ignore + [3600 * 24 * 365, "{YYYY}", null, null, null, null, null, null, 1], + // prettier-ignore + [3600 * 24 * 28, "{MMM}", "\n{YYYY}", null, null, null, null, null, 1], + // prettier-ignore + [3600 * 24, "{MMM} {D}", "\n{YYYY}", null, null, null, null, null, 1], + // prettier-ignore + [3600, "{HH}:{mm}", "\n{MMM} {D} '{YY}", null, "\n{MMM} {D}", null, null, null, 1], + // prettier-ignore + [60, "{HH}:{mm}", "\n{MMM} {D} '{YY}", null, "\n{MMM} {D}", null, null, null, 1], + // prettier-ignore + [1, "{HH}:{mm}:{ss}", "\n{MMM} {D} '{YY}", null, "\n{MMM} {D}", null, null, null, 1], + // prettier-ignore + [0.001, "{HH}:{mm}:{ss}.{fff}", "\n{MMM} {D} '{YY}", null, "\n{MMM} {D}", null, null, null, 1], + ], }, // Y axis (sample value). { @@ -382,7 +410,10 @@ export const getUPlotOptions = ( (self: uPlot) => { // Disallow sub-second zoom as this cause inconsistenices in the X axis in uPlot. const leftVal = self.posToVal(self.select.left, "x"); - const rightVal = Math.max(self.posToVal(self.select.left + self.select.width, "x"), leftVal + 1); + const rightVal = Math.max( + self.posToVal(self.select.left + self.select.width, "x"), + leftVal + 1 + ); onSelectRange(leftVal, rightVal); }, From 8cd9069cf192c88bfd8890280c78a6d583bcc694 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Mon, 10 Feb 2025 13:26:18 +0100 Subject: [PATCH 1356/1397] textparse: Refactor benchmark testdata for all types. (#15998) Also: * split benchmark functions to make sure no one compares across parsers. * testdata file have meaningful names reflecting the type representation * promtestdata.txt now has all types, taken directly from long running Prometheus (https://demo.do.prometheus.io/) Needed for https://github.com/prometheus/prometheus/pull/15731 Signed-off-by: bwplotka --- model/textparse/benchmark_test.go | 358 ++- ...{omhistogramdata.txt => 1histogram.om.txt} | 0 .../testdata/alltypes.237mfs.nometa.prom.txt | 1858 +++++++++++++ .../testdata/alltypes.237mfs.prom.txt | 2332 +++++++++++++++++ .../{omtestdata.txt => alltypes.5mfs.om.txt} | 0 .../testdata/promtestdata.nometa.txt | 411 --- model/textparse/testdata/promtestdata.txt | 529 ---- scrape/scrape_test.go | 25 +- 8 files changed, 4425 insertions(+), 1088 deletions(-) rename model/textparse/testdata/{omhistogramdata.txt => 1histogram.om.txt} (100%) create mode 100644 model/textparse/testdata/alltypes.237mfs.nometa.prom.txt create mode 100644 model/textparse/testdata/alltypes.237mfs.prom.txt rename model/textparse/testdata/{omtestdata.txt => alltypes.5mfs.om.txt} (100%) delete mode 100644 model/textparse/testdata/promtestdata.nometa.txt delete mode 100644 model/textparse/testdata/promtestdata.txt diff --git a/model/textparse/benchmark_test.go b/model/textparse/benchmark_test.go index c0492e3c99..4c5d850930 100644 --- a/model/textparse/benchmark_test.go +++ b/model/textparse/benchmark_test.go @@ -20,6 +20,7 @@ import ( "io" "os" "path/filepath" + "strings" "testing" "github.com/prometheus/prometheus/model/exemplar" @@ -30,32 +31,9 @@ import ( "github.com/stretchr/testify/require" ) -type newParser func([]byte, *labels.SymbolTable) Parser - -var newTestParserFns = map[string]newParser{ - "promtext": NewPromParser, - "promproto": func(b []byte, st *labels.SymbolTable) Parser { - return NewProtobufParser(b, true, st) - }, - "omtext": func(b []byte, st *labels.SymbolTable) Parser { - return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) - }, - "omtext_with_nhcb": func(b []byte, st *labels.SymbolTable) Parser { - p := NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) - return NewNHCBParser(p, st, false) - }, -} - -// BenchmarkParse benchmarks parsing, mimicking how scrape/scrape.go#append use it. -// Typically used as follows: -/* - export bench=v1 && go test ./model/textparse/... \ - -run '^$' -bench '^BenchmarkParse' \ - -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ - | tee ${bench}.txt -*/ -// For profiles, add -memprofile=${bench}.mem.pprof -cpuprofile=${bench}.cpu.pprof -// options. +// BenchmarkParse... set of benchmarks analyze efficiency of parsing various +// datasets with different parsers. It mimics how scrape/scrape.go#append use parsers +// and allows comparison with expfmt decoders if applicable. // // NOTE(bwplotka): Previous iterations of this benchmark had different cases for isolated // Series, Series+Metrics with and without reuse, Series+CT. Those cases are sometimes @@ -63,124 +41,224 @@ var newTestParserFns = map[string]newParser{ // make sense to persist such cases for everybody (e.g. for CI one day). // For local iteration, feel free to adjust cases/comment out code etc. // -// NOTE(bwplotka): Do not try to conclude "what parser (OM, proto, prom) is the fastest" -// as the testdata has different amount and type of metrics and features (e.g. exemplars). -// Use scrape.BenchmarkScrapeLoopAppend for this purpose. -func BenchmarkParse(b *testing.B) { - for _, bcase := range []struct { - dataFile string // Localized to "./testdata". - dataProto []byte - parser string +// NOTE(bwplotka): Those benchmarks are purposefully categorized per data-sets, +// to avoid temptation to assess "what parser (OM, proto, prom) is the fastest, +// in general" here due to not every parser supporting every data set type. +// Use scrape.BenchmarkScrapeLoopAppend if you want one benchmark comparing parsers fairly. - compareToExpfmtFormat expfmt.FormatType - }{ - {dataFile: "promtestdata.txt", parser: "promtext", compareToExpfmtFormat: expfmt.TypeTextPlain}, - {dataFile: "promtestdata.nometa.txt", parser: "promtext", compareToExpfmtFormat: expfmt.TypeTextPlain}, +/* + export bench=v1 && go test ./model/textparse/... \ + -run '^$' -bench '^BenchmarkParsePromText' \ + -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ + | tee ${bench}.txt +*/ +func BenchmarkParsePromText(b *testing.B) { + data := readTestdataFile(b, "alltypes.237mfs.prom.txt") - // We don't pass compareToExpfmtFormat: expfmt.TypeProtoDelim as expfmt does not support GAUGE_HISTOGRAM, see https://github.com/prometheus/common/issues/430. - {dataProto: createTestProtoBuf(b).Bytes(), parser: "promproto"}, - - // We don't pass compareToExpfmtFormat: expfmt.TypeOpenMetrics as expfmt does not support OM exemplars, see https://github.com/prometheus/common/issues/703. - {dataFile: "omtestdata.txt", parser: "omtext"}, - {dataFile: "promtestdata.txt", parser: "omtext"}, // Compare how omtext parser deals with Prometheus text format vs promtext. - - // NHCB. - {dataFile: "omhistogramdata.txt", parser: "omtext"}, // Measure OM parser baseline for histograms. - {dataFile: "omhistogramdata.txt", parser: "omtext_with_nhcb"}, // Measure NHCB over OM parser. + for _, parser := range []string{ + "promtext", + "omtext", // Compare how omtext parser deals with Prometheus text format. + "expfmt-promtext", } { - var buf []byte - dataCase := bcase.dataFile - if len(bcase.dataProto) > 0 { - dataCase = "createTestProtoBuf()" - buf = bcase.dataProto - } else { - f, err := os.Open(filepath.Join("testdata", bcase.dataFile)) - require.NoError(b, err) - b.Cleanup(func() { - _ = f.Close() - }) - buf, err = io.ReadAll(f) - require.NoError(b, err) - } - b.Run(fmt.Sprintf("data=%v/parser=%v", dataCase, bcase.parser), func(b *testing.B) { - newParserFn := newTestParserFns[bcase.parser] - var ( - res labels.Labels - e exemplar.Exemplar - ) - - b.SetBytes(int64(len(buf))) - b.ReportAllocs() - b.ResetTimer() - - st := labels.NewSymbolTable() - for i := 0; i < b.N; i++ { - p := newParserFn(buf, st) - - Inner: - for { - t, err := p.Next() - switch t { - case EntryInvalid: - if errors.Is(err, io.EOF) { - break Inner - } - b.Fatal(err) - case EntryType: - _, _ = p.Type() - continue - case EntryHelp: - _, _ = p.Help() - continue - case EntryUnit: - _, _ = p.Unit() - continue - case EntryComment: - continue - case EntryHistogram: - _, _, _, _ = p.Histogram() - case EntrySeries: - _, _, _ = p.Series() - default: - b.Fatal("not implemented entry", t) - } - - _ = p.Metric(&res) - _ = p.CreatedTimestamp() - for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) { - } - } - } - }) - - b.Run(fmt.Sprintf("data=%v/parser=xpfmt", dataCase), func(b *testing.B) { - if bcase.compareToExpfmtFormat == expfmt.TypeUnknown { - b.Skip("compareToExpfmtFormat not set") - } - - b.SetBytes(int64(len(buf))) - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - decSamples := make(model.Vector, 0, 50) - sdec := expfmt.SampleDecoder{ - Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.NewFormat(bcase.compareToExpfmtFormat)), - Opts: &expfmt.DecodeOptions{ - Timestamp: model.TimeFromUnixNano(0), - }, - } - - for { - if err := sdec.Decode(&decSamples); err != nil { - if errors.Is(err, io.EOF) { - break - } - b.Fatal(err) - } - decSamples = decSamples[:0] - } + b.Run(fmt.Sprintf("parser=%v", parser), func(b *testing.B) { + if strings.HasPrefix(parser, "expfmt-") { + benchExpFmt(b, data, parser) + } else { + benchParse(b, data, parser) } }) } } + +/* + export bench=v1 && go test ./model/textparse/... \ + -run '^$' -bench '^BenchmarkParsePromText_NoMeta' \ + -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ + | tee ${bench}.txt +*/ +func BenchmarkParsePromText_NoMeta(b *testing.B) { + data := readTestdataFile(b, "alltypes.237mfs.nometa.prom.txt") + + for _, parser := range []string{ + "promtext", + "expfmt-promtext", + } { + b.Run(fmt.Sprintf("parser=%v", parser), func(b *testing.B) { + if strings.HasPrefix(parser, "expfmt-") { + benchExpFmt(b, data, parser) + } else { + benchParse(b, data, parser) + } + }) + } +} + +/* + export bench=v1 && go test ./model/textparse/... \ + -run '^$' -bench '^BenchmarkParseOMText' \ + -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ + | tee ${bench}.txt +*/ +func BenchmarkParseOMText(b *testing.B) { + data := readTestdataFile(b, "alltypes.5mfs.om.txt") + // TODO(bwplotka): Add comparison with expfmt.TypeOpenMetrics once expfmt + // support OM exemplars, see https://github.com/prometheus/common/issues/703. + benchParse(b, data, "omtext") +} + +/* + export bench=v1 && go test ./model/textparse/... \ + -run '^$' -bench '^BenchmarkParsePromProto' \ + -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ + | tee ${bench}.txt +*/ +func BenchmarkParsePromProto(b *testing.B) { + data := createTestProtoBuf(b).Bytes() + // TODO(bwplotka): Add comparison with expfmt.TypeProtoDelim once expfmt + // support GAUGE_HISTOGRAM, see https://github.com/prometheus/common/issues/430. + benchParse(b, data, "promproto") +} + +/* + export bench=v1 && go test ./model/textparse/... \ + -run '^$' -bench '^BenchmarkParseOpenMetricsNHCB' \ + -benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \ + | tee ${bench}.txt +*/ +func BenchmarkParseOpenMetricsNHCB(b *testing.B) { + data := readTestdataFile(b, "1histogram.om.txt") + + for _, parser := range []string{ + "omtext", // Measure OM parser baseline for histograms. + "omtext_with_nhcb", // Measure NHCB over OM parser. + } { + b.Run(fmt.Sprintf("parser=%v", parser), func(b *testing.B) { + benchParse(b, data, parser) + }) + } +} + +func benchParse(b *testing.B, data []byte, parser string) { + type newParser func([]byte, *labels.SymbolTable) Parser + + var newParserFn newParser + switch parser { + case "promtext": + newParserFn = NewPromParser + case "promproto": + newParserFn = func(b []byte, st *labels.SymbolTable) Parser { + return NewProtobufParser(b, true, st) + } + case "omtext": + newParserFn = func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) + } + case "omtext_with_nhcb": + newParserFn = func(b []byte, st *labels.SymbolTable) Parser { + p := NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped()) + return NewNHCBParser(p, st, false) + } + default: + b.Fatal("unknown parser", parser) + } + + var ( + res labels.Labels + e exemplar.Exemplar + ) + + b.SetBytes(int64(len(data))) + b.ReportAllocs() + b.ResetTimer() + + st := labels.NewSymbolTable() + for i := 0; i < b.N; i++ { + p := newParserFn(data, st) + + Inner: + for { + t, err := p.Next() + switch t { + case EntryInvalid: + if errors.Is(err, io.EOF) { + break Inner + } + b.Fatal(err) + case EntryType: + _, _ = p.Type() + continue + case EntryHelp: + _, _ = p.Help() + continue + case EntryUnit: + _, _ = p.Unit() + continue + case EntryComment: + continue + case EntryHistogram: + _, _, _, _ = p.Histogram() + case EntrySeries: + _, _, _ = p.Series() + default: + b.Fatal("not implemented entry", t) + } + + _ = p.Metric(&res) + _ = p.CreatedTimestamp() + for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) { + } + } + } +} + +func benchExpFmt(b *testing.B, data []byte, expFormatTypeStr string) { + expfmtFormatType := expfmt.TypeUnknown + switch expFormatTypeStr { + case "expfmt-promtext": + expfmtFormatType = expfmt.TypeProtoText + case "expfmt-promproto": + expfmtFormatType = expfmt.TypeProtoDelim + case "expfmt-omtext": + expfmtFormatType = expfmt.TypeOpenMetrics + default: + b.Fatal("unknown expfmt format type", expFormatTypeStr) + } + + b.SetBytes(int64(len(data))) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + decSamples := make(model.Vector, 0, 50) + sdec := expfmt.SampleDecoder{ + Dec: expfmt.NewDecoder(bytes.NewReader(data), expfmt.NewFormat(expfmtFormatType)), + Opts: &expfmt.DecodeOptions{ + Timestamp: model.TimeFromUnixNano(0), + }, + } + + for { + if err := sdec.Decode(&decSamples); err != nil { + if errors.Is(err, io.EOF) { + break + } + b.Fatal(err) + } + decSamples = decSamples[:0] + } + } +} + +func readTestdataFile(tb testing.TB, file string) []byte { + tb.Helper() + + f, err := os.Open(filepath.Join("testdata", file)) + require.NoError(tb, err) + + tb.Cleanup(func() { + _ = f.Close() + }) + buf, err := io.ReadAll(f) + require.NoError(tb, err) + return buf +} diff --git a/model/textparse/testdata/omhistogramdata.txt b/model/textparse/testdata/1histogram.om.txt similarity index 100% rename from model/textparse/testdata/omhistogramdata.txt rename to model/textparse/testdata/1histogram.om.txt diff --git a/model/textparse/testdata/alltypes.237mfs.nometa.prom.txt b/model/textparse/testdata/alltypes.237mfs.nometa.prom.txt new file mode 100644 index 0000000000..8724a7b837 --- /dev/null +++ b/model/textparse/testdata/alltypes.237mfs.nometa.prom.txt @@ -0,0 +1,1858 @@ +go_gc_cycles_automatic_gc_cycles_total 190932 +go_gc_cycles_forced_gc_cycles_total 0 +go_gc_cycles_total_gc_cycles_total 190932 +go_gc_duration_seconds{quantile="0"} 7.6238e-05 +go_gc_duration_seconds{quantile="0.25"} 0.00010188 +go_gc_duration_seconds{quantile="0.5"} 0.000135819 +go_gc_duration_seconds{quantile="0.75"} 0.000156061 +go_gc_duration_seconds{quantile="1"} 0.002389378 +go_gc_duration_seconds_sum 44.985611511 +go_gc_duration_seconds_count 190932 +go_gc_gogc_percent 75 +go_gc_gomemlimit_bytes 9.03676723e+08 +go_gc_heap_allocs_by_size_bytes_bucket{le="8.999999999999998"} 2.279966416e+09 +go_gc_heap_allocs_by_size_bytes_bucket{le="24.999999999999996"} 1.9429106442e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="64.99999999999999"} 3.2158220609e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="144.99999999999997"} 4.2309744198e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="320.99999999999994"} 4.3418919481e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="704.9999999999999"} 4.374631622e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="1536.9999999999998"} 4.3917578245e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="3200.9999999999995"} 4.396605609e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="6528.999999999999"} 4.4007501305e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="13568.999999999998"} 4.4020325917e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="27264.999999999996"} 4.4036187548e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="+Inf"} 4.4046288803e+10 +go_gc_heap_allocs_by_size_bytes_sum 5.937322571024e+12 +go_gc_heap_allocs_by_size_bytes_count 4.4046288803e+10 +go_gc_heap_allocs_bytes_total 5.937322571024e+12 +go_gc_heap_allocs_objects_total 4.4046288803e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="8.999999999999998"} 2.279951045e+09 +go_gc_heap_frees_by_size_bytes_bucket{le="24.999999999999996"} 1.9428965693e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="64.99999999999999"} 3.2157849717e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="144.99999999999997"} 4.2309204178e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="320.99999999999994"} 4.3418348856e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="704.9999999999999"} 4.3745739652e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="1536.9999999999998"} 4.3916999773e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="3200.9999999999995"} 4.3965477112e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="6528.999999999999"} 4.4006921621e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="13568.999999999998"} 4.4019746017e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="27264.999999999996"} 4.4035607466e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="+Inf"} 4.4045708586e+10 +go_gc_heap_frees_by_size_bytes_sum 5.937239047736e+12 +go_gc_heap_frees_by_size_bytes_count 4.4045708586e+10 +go_gc_heap_frees_bytes_total 5.937239047736e+12 +go_gc_heap_frees_objects_total 4.4045708586e+10 +go_gc_heap_goal_bytes 1.0365948e+08 +go_gc_heap_live_bytes 5.8791024e+07 +go_gc_heap_objects_objects 580217 +go_gc_heap_tiny_allocs_objects_total 8.306064403e+09 +go_gc_limiter_last_enabled_gc_cycle 160896 +go_gc_pauses_seconds_bucket{le="6.399999999999999e-08"} 0 +go_gc_pauses_seconds_bucket{le="6.399999999999999e-07"} 0 +go_gc_pauses_seconds_bucket{le="7.167999999999999e-06"} 137212 +go_gc_pauses_seconds_bucket{le="8.191999999999999e-05"} 208425 +go_gc_pauses_seconds_bucket{le="0.0009175039999999999"} 376121 +go_gc_pauses_seconds_bucket{le="0.010485759999999998"} 381798 +go_gc_pauses_seconds_bucket{le="0.11744051199999998"} 381863 +go_gc_pauses_seconds_bucket{le="+Inf"} 381864 +go_gc_pauses_seconds_sum 20.343611904000003 +go_gc_pauses_seconds_count 381864 +go_gc_scan_globals_bytes 555824 +go_gc_scan_heap_bytes 4.0986192e+07 +go_gc_scan_stack_bytes 477760 +go_gc_scan_total_bytes 4.2019776e+07 +go_gc_stack_starting_size_bytes 4096 +go_goroutines 151 +go_info{version="go1.23.4"} 1 +go_memstats_alloc_bytes 8.3523288e+07 +go_memstats_alloc_bytes_total 5.937322571024e+12 +go_memstats_buck_hash_sys_bytes 9.35076e+06 +go_memstats_frees_total 5.2351772989e+10 +go_memstats_gc_sys_bytes 5.450544e+06 +go_memstats_heap_alloc_bytes 8.3523288e+07 +go_memstats_heap_idle_bytes 1.23691008e+08 +go_memstats_heap_inuse_bytes 9.56416e+07 +go_memstats_heap_objects 580217 +go_memstats_heap_released_bytes 1.05897984e+08 +go_memstats_heap_sys_bytes 2.19332608e+08 +go_memstats_last_gc_time_seconds 1.738949372347471e+09 +go_memstats_mallocs_total 5.2352353206e+10 +go_memstats_mcache_inuse_bytes 1200 +go_memstats_mcache_sys_bytes 15600 +go_memstats_mspan_inuse_bytes 1.21232e+06 +go_memstats_mspan_sys_bytes 2.13792e+06 +go_memstats_next_gc_bytes 1.0365948e+08 +go_memstats_other_sys_bytes 803696 +go_memstats_stack_inuse_bytes 2.818048e+06 +go_memstats_stack_sys_bytes 2.818048e+06 +go_memstats_sys_bytes 2.39909176e+08 +go_sched_gomaxprocs_threads 1 +go_sched_goroutines_goroutines 151 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-08"} 5.0101035e+07 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-07"} 7.4291762e+07 +go_sched_latencies_seconds_bucket{le="7.167999999999999e-06"} 8.1386236e+07 +go_sched_latencies_seconds_bucket{le="8.191999999999999e-05"} 8.2116161e+07 +go_sched_latencies_seconds_bucket{le="0.0009175039999999999"} 8.2802009e+07 +go_sched_latencies_seconds_bucket{le="0.010485759999999998"} 8.6274195e+07 +go_sched_latencies_seconds_bucket{le="0.11744051199999998"} 8.6724074e+07 +go_sched_latencies_seconds_bucket{le="+Inf"} 8.6726596e+07 +go_sched_latencies_seconds_sum 8266.758178496 +go_sched_latencies_seconds_count 8.6726596e+07 +go_sched_pauses_stopping_gc_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_stopping_gc_seconds_bucket{le="6.399999999999999e-07"} 236272 +go_sched_pauses_stopping_gc_seconds_bucket{le="7.167999999999999e-06"} 380799 +go_sched_pauses_stopping_gc_seconds_bucket{le="8.191999999999999e-05"} 381741 +go_sched_pauses_stopping_gc_seconds_bucket{le="0.0009175039999999999"} 381807 +go_sched_pauses_stopping_gc_seconds_bucket{le="0.010485759999999998"} 381862 +go_sched_pauses_stopping_gc_seconds_bucket{le="0.11744051199999998"} 381864 +go_sched_pauses_stopping_gc_seconds_bucket{le="+Inf"} 381864 +go_sched_pauses_stopping_gc_seconds_sum 0.191211904 +go_sched_pauses_stopping_gc_seconds_count 381864 +go_sched_pauses_stopping_other_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="6.399999999999999e-07"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="7.167999999999999e-06"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="8.191999999999999e-05"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="0.0009175039999999999"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="0.010485759999999998"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="0.11744051199999998"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="+Inf"} 0 +go_sched_pauses_stopping_other_seconds_sum 0 +go_sched_pauses_stopping_other_seconds_count 0 +go_sched_pauses_total_gc_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_total_gc_seconds_bucket{le="6.399999999999999e-07"} 0 +go_sched_pauses_total_gc_seconds_bucket{le="7.167999999999999e-06"} 137212 +go_sched_pauses_total_gc_seconds_bucket{le="8.191999999999999e-05"} 208425 +go_sched_pauses_total_gc_seconds_bucket{le="0.0009175039999999999"} 376121 +go_sched_pauses_total_gc_seconds_bucket{le="0.010485759999999998"} 381798 +go_sched_pauses_total_gc_seconds_bucket{le="0.11744051199999998"} 381863 +go_sched_pauses_total_gc_seconds_bucket{le="+Inf"} 381864 +go_sched_pauses_total_gc_seconds_sum 20.343611904000003 +go_sched_pauses_total_gc_seconds_count 381864 +go_sched_pauses_total_other_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_total_other_seconds_bucket{le="6.399999999999999e-07"} 0 +go_sched_pauses_total_other_seconds_bucket{le="7.167999999999999e-06"} 0 +go_sched_pauses_total_other_seconds_bucket{le="8.191999999999999e-05"} 0 +go_sched_pauses_total_other_seconds_bucket{le="0.0009175039999999999"} 0 +go_sched_pauses_total_other_seconds_bucket{le="0.010485759999999998"} 0 +go_sched_pauses_total_other_seconds_bucket{le="0.11744051199999998"} 0 +go_sched_pauses_total_other_seconds_bucket{le="+Inf"} 0 +go_sched_pauses_total_other_seconds_sum 0 +go_sched_pauses_total_other_seconds_count 0 +go_sync_mutex_wait_total_seconds_total 628.29966272 +go_threads 10 +net_conntrack_dialer_conn_attempted_total{dialer_name="alertmanager"} 2 +net_conntrack_dialer_conn_attempted_total{dialer_name="blackbox"} 1 +net_conntrack_dialer_conn_attempted_total{dialer_name="caddy"} 1 +net_conntrack_dialer_conn_attempted_total{dialer_name="cadvisor"} 2 +net_conntrack_dialer_conn_attempted_total{dialer_name="default"} 0 +net_conntrack_dialer_conn_attempted_total{dialer_name="grafana"} 7 +net_conntrack_dialer_conn_attempted_total{dialer_name="node"} 5 +net_conntrack_dialer_conn_attempted_total{dialer_name="prometheus"} 1 +net_conntrack_dialer_conn_attempted_total{dialer_name="random"} 4 +net_conntrack_dialer_conn_closed_total{dialer_name="alertmanager"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="blackbox"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="caddy"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="cadvisor"} 1 +net_conntrack_dialer_conn_closed_total{dialer_name="default"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="grafana"} 4 +net_conntrack_dialer_conn_closed_total{dialer_name="node"} 4 +net_conntrack_dialer_conn_closed_total{dialer_name="prometheus"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="random"} 0 +net_conntrack_dialer_conn_established_total{dialer_name="alertmanager"} 2 +net_conntrack_dialer_conn_established_total{dialer_name="blackbox"} 1 +net_conntrack_dialer_conn_established_total{dialer_name="caddy"} 1 +net_conntrack_dialer_conn_established_total{dialer_name="cadvisor"} 2 +net_conntrack_dialer_conn_established_total{dialer_name="default"} 0 +net_conntrack_dialer_conn_established_total{dialer_name="grafana"} 5 +net_conntrack_dialer_conn_established_total{dialer_name="node"} 5 +net_conntrack_dialer_conn_established_total{dialer_name="prometheus"} 1 +net_conntrack_dialer_conn_established_total{dialer_name="random"} 4 +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="unknown"} 2 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="unknown"} 0 +net_conntrack_listener_conn_accepted_total{listener_name="http"} 561771 +net_conntrack_listener_conn_closed_total{listener_name="http"} 561723 +process_cpu_seconds_total 96501.42 +process_max_fds 65000 +process_network_receive_bytes_total 9.89686846606e+11 +process_network_transmit_bytes_total 3.743928187168e+12 +process_open_fds 124 +process_resident_memory_bytes 1.72822528e+08 +process_start_time_seconds 1.73806369023e+09 +process_virtual_memory_bytes 4.608667648e+09 +process_virtual_memory_max_bytes 1.8446744073709552e+19 +prometheus_api_notification_active_subscribers 2 +prometheus_api_notification_updates_dropped_total 0 +prometheus_api_notification_updates_sent_total 5 +prometheus_api_remote_read_queries 0 +prometheus_build_info{branch="HEAD",goarch="amd64",goos="linux",goversion="go1.23.4",revision="7086161a93b262aa0949dbf2aba15a5a7b13e0a3",tags="netgo,builtinassets,stringlabels",version="3.1.0"} 1 +prometheus_config_last_reload_success_timestamp_seconds 1.7380636976181264e+09 +prometheus_config_last_reload_successful 1 +prometheus_engine_queries 0 +prometheus_engine_queries_concurrent_max 20 +prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.5"} 8.075e-05 +prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.9"} 0.000917449 +prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.99"} 0.009315769 +prometheus_engine_query_duration_seconds_sum{slice="inner_eval"} 12506.67007997419 +prometheus_engine_query_duration_seconds_count{slice="inner_eval"} 8.714071e+06 +prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.5"} 2.228e-05 +prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.9"} 6.2819e-05 +prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.99"} 0.000399637 +prometheus_engine_query_duration_seconds_sum{slice="prepare_time"} 490.326247638047 +prometheus_engine_query_duration_seconds_count{slice="prepare_time"} 8.714071e+06 +prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.5"} 4.628e-06 +prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.9"} 1.6082e-05 +prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.99"} 4.1174e-05 +prometheus_engine_query_duration_seconds_sum{slice="queue_time"} 8720.662071224393 +prometheus_engine_query_duration_seconds_count{slice="queue_time"} 1.7428526e+07 +prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.5"} 7.83e-07 +prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.9"} 1.994e-06 +prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.99"} 1.458e-05 +prometheus_engine_query_duration_seconds_sum{slice="result_sort"} 2.337879348999977 +prometheus_engine_query_duration_seconds_count{slice="result_sort"} 1.208154e+06 +prometheus_engine_query_log_enabled 0 +prometheus_engine_query_log_failures_total 0 +prometheus_engine_query_samples_total 9.1183747239e+10 +prometheus_http_request_duration_seconds_bucket{handler="/",le="0.1"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="0.2"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="0.4"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="1"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="3"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="8"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="20"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="60"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="120"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"} 688 +prometheus_http_request_duration_seconds_sum{handler="/"} 0.026826932000000022 +prometheus_http_request_duration_seconds_count{handler="/"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.1"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.2"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.4"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="1"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="3"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="8"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="20"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="60"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="120"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="+Inf"} 29524 +prometheus_http_request_duration_seconds_sum{handler="/-/healthy"} 0.7400570460000002 +prometheus_http_request_duration_seconds_count{handler="/-/healthy"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.2"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.4"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="3"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="8"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="20"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="60"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="120"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="+Inf"} 49 +prometheus_http_request_duration_seconds_sum{handler="/-/ready"} 0.005040123000000002 +prometheus_http_request_duration_seconds_count{handler="/-/ready"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.1"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.2"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.4"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="1"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="3"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="8"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="20"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="60"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="120"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="+Inf"} 48 +prometheus_http_request_duration_seconds_sum{handler="/alerts"} 0.011801602999999999 +prometheus_http_request_duration_seconds_count{handler="/alerts"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="0.1"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="0.2"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="0.4"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="1"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="3"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="8"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="20"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="60"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="120"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="+Inf"} 27 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/*path"} 0.001724389 +prometheus_http_request_duration_seconds_count{handler="/api/v1/*path"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="0.1"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="0.2"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="0.4"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="1"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="3"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="8"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="20"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="60"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="120"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="+Inf"} 8 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/alertmanagers"} 0.042492975999999995 +prometheus_http_request_duration_seconds_count{handler="/api/v1/alertmanagers"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.1"} 14630 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.2"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.4"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="1"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="3"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="8"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="20"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="60"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="120"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="+Inf"} 14635 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/alerts"} 19.028669391999912 +prometheus_http_request_duration_seconds_count{handler="/api/v1/alerts"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="0.1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="0.4"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="3"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="8"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="20"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="60"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="120"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="+Inf"} 4 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/format_query"} 0.023786675 +prometheus_http_request_duration_seconds_count{handler="/api/v1/format_query"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.1"} 17773 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.2"} 17860 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.4"} 17939 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="1"} 17970 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="3"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="8"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="20"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="60"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="120"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 17971 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/label/:name/values"} 99.95326355699925 +prometheus_http_request_duration_seconds_count{handler="/api/v1/label/:name/values"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.1"} 4905 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.2"} 4912 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.4"} 4916 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="1"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="3"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="8"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="20"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="60"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="120"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="+Inf"} 4921 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/labels"} 12.963155722000025 +prometheus_http_request_duration_seconds_count{handler="/api/v1/labels"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.1"} 2073 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.2"} 2091 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.4"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="1"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="3"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="8"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="20"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="60"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="120"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="+Inf"} 2093 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/metadata"} 16.90794258600001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/metadata"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="0.1"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="0.2"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="0.4"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="1"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="3"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="8"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="20"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="60"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="120"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="+Inf"} 12 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/notifications"} 0.029363403 +prometheus_http_request_duration_seconds_count{handler="/api/v1/notifications"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="0.1"} 3 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="0.4"} 11 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="1"} 29 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="3"} 79 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="8"} 116 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="20"} 187 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="60"} 224 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="120"} 234 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="+Inf"} 263 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/notifications/live"} 85150.78366682903 +prometheus_http_request_duration_seconds_count{handler="/api/v1/notifications/live"} 263 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="0.1"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="0.2"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="0.4"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="1"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="3"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="8"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="20"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="60"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="120"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="+Inf"} 265 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/parse_query"} 0.7265021480000003 +prometheus_http_request_duration_seconds_count{handler="/api/v1/parse_query"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.1"} 3051 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.2"} 3082 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.4"} 3173 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="1"} 3213 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="3"} 3216 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="8"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="20"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="60"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="120"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="+Inf"} 3217 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/query"} 70.23571750499987 +prometheus_http_request_duration_seconds_count{handler="/api/v1/query"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.1"} 831 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.2"} 839 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.4"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="1"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="3"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="8"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="20"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="60"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="120"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="+Inf"} 841 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/query_exemplars"} 3.6569132899999968 +prometheus_http_request_duration_seconds_count{handler="/api/v1/query_exemplars"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.1"} 116338 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.2"} 116784 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.4"} 117924 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="1"} 118147 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="3"} 118155 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="8"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="20"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="60"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="120"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="+Inf"} 118157 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/query_range"} 832.1240439999854 +prometheus_http_request_duration_seconds_count{handler="/api/v1/query_range"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="0.1"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="0.2"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="0.4"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="1"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="3"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="8"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="20"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="60"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="120"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="+Inf"} 16 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/read"} 0.07308876400000001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/read"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.1"} 88415 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.2"} 88627 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.4"} 88659 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="1"} 88661 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="3"} 88661 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="8"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="20"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="60"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="120"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="+Inf"} 88662 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/rules"} 629.5237672670012 +prometheus_http_request_duration_seconds_count{handler="/api/v1/rules"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="0.1"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="0.2"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="0.4"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="1"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="3"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="8"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="20"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="60"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="120"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="+Inf"} 142 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/scrape_pools"} 0.17501777799999996 +prometheus_http_request_duration_seconds_count{handler="/api/v1/scrape_pools"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.1"} 1217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.2"} 1236 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.4"} 1244 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="1"} 1245 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="3"} 1245 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="8"} 1245 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="20"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="60"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="120"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="+Inf"} 1246 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/series"} 19.03228310300001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/series"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.1"} 4412 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.2"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.4"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="1"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="3"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="8"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="20"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="60"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="120"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="+Inf"} 4413 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/buildinfo"} 5.222974727000008 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/buildinfo"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.1"} 84669 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.2"} 84716 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.4"} 84721 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="1"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="3"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="8"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="20"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="60"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="120"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="+Inf"} 84723 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/config"} 228.93575751199964 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/config"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="0.1"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="0.2"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="0.4"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="1"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="3"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="8"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="20"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="60"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="120"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="+Inf"} 32 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/flags"} 0.093302815 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/flags"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.1"} 862 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.2"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.4"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="1"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="3"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="8"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="20"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="60"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="120"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="+Inf"} 863 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/runtimeinfo"} 8.009312817000001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/runtimeinfo"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="0.1"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="0.2"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="0.4"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="1"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="3"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="8"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="20"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="60"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="120"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="+Inf"} 94 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/tsdb"} 1.3378732679999994 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/tsdb"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="0.1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="0.2"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="0.4"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="3"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="8"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="20"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="60"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="120"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="+Inf"} 49 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/walreplay"} 0.034643267 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/walreplay"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.1"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.2"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.4"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="1"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="3"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="8"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="20"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="60"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="120"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="+Inf"} 191 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/targets"} 0.5432347889999999 +prometheus_http_request_duration_seconds_count{handler="/api/v1/targets"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="0.1"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="0.2"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="0.4"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="1"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="3"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="8"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="20"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="60"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="120"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="+Inf"} 18 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/targets/metadata"} 0.043789973 +prometheus_http_request_duration_seconds_count{handler="/api/v1/targets/metadata"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.1"} 385 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.2"} 481 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.4"} 528 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="1"} 612 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="3"} 634 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="8"} 640 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="20"} 641 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="60"} 642 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="120"} 643 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="+Inf"} 643 +prometheus_http_request_duration_seconds_sum{handler="/assets/*filepath"} 280.0660388799997 +prometheus_http_request_duration_seconds_count{handler="/assets/*filepath"} 643 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="0.1"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="0.2"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="0.4"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="1"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="3"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="8"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="20"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="60"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="120"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="+Inf"} 103 +prometheus_http_request_duration_seconds_sum{handler="/classic/static/*filepath"} 0.006113046000000001 +prometheus_http_request_duration_seconds_count{handler="/classic/static/*filepath"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.1"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.2"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.4"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="1"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="3"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="8"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="20"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="60"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="120"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="+Inf"} 13 +prometheus_http_request_duration_seconds_sum{handler="/config"} 0.0024490289999999997 +prometheus_http_request_duration_seconds_count{handler="/config"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="0.1"} 33 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="0.2"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="0.4"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="1"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="3"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="8"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="20"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="60"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="120"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="+Inf"} 34 +prometheus_http_request_duration_seconds_sum{handler="/consoles/*filepath"} 0.5689515199999999 +prometheus_http_request_duration_seconds_count{handler="/consoles/*filepath"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.4"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="3"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="8"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="20"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="60"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="120"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="+Inf"} 4 +prometheus_http_request_duration_seconds_sum{handler="/debug/*subpath"} 0.086499352 +prometheus_http_request_duration_seconds_count{handler="/debug/*subpath"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="0.1"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="0.2"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="0.4"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="1"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="3"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="8"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="20"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="60"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="120"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="+Inf"} 177 +prometheus_http_request_duration_seconds_sum{handler="/favicon.ico"} 0.05591882500000002 +prometheus_http_request_duration_seconds_count{handler="/favicon.ico"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.1"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.2"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.4"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="1"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="3"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="8"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="20"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="60"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="120"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="+Inf"} 770 +prometheus_http_request_duration_seconds_sum{handler="/favicon.svg"} 0.3058455699999999 +prometheus_http_request_duration_seconds_count{handler="/favicon.svg"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="0.1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="0.4"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="3"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="8"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="20"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="60"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="120"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="+Inf"} 4 +prometheus_http_request_duration_seconds_sum{handler="/federate"} 0.05996980699999999 +prometheus_http_request_duration_seconds_count{handler="/federate"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.1"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.2"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.4"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="1"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="3"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="8"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="20"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="60"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="120"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="+Inf"} 9 +prometheus_http_request_duration_seconds_sum{handler="/flags"} 0.001586781 +prometheus_http_request_duration_seconds_count{handler="/flags"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.1"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.2"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.4"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="1"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="3"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="8"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="20"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="60"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="120"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="+Inf"} 751 +prometheus_http_request_duration_seconds_sum{handler="/graph"} 0.032583203000000005 +prometheus_http_request_duration_seconds_count{handler="/graph"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.1"} 3.988654e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.2"} 4.052461e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.4"} 4.057771e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="1"} 4.058809e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="3"} 4.059071e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="8"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="20"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="60"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="120"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="+Inf"} 4.059092e+06 +prometheus_http_request_duration_seconds_sum{handler="/metrics"} 107584.12561354278 +prometheus_http_request_duration_seconds_count{handler="/metrics"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.1"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.2"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.4"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="1"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="3"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="8"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="20"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="60"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="120"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="+Inf"} 1774 +prometheus_http_request_duration_seconds_sum{handler="/query"} 0.5220201759999993 +prometheus_http_request_duration_seconds_count{handler="/query"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.1"} 8672 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.2"} 8672 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.4"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="1"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="3"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="8"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="20"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="60"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="120"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="+Inf"} 8673 +prometheus_http_request_duration_seconds_sum{handler="/rules"} 2.776021043000005 +prometheus_http_request_duration_seconds_count{handler="/rules"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.1"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.2"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.4"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="1"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="3"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="8"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="20"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="60"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="120"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="+Inf"} 20 +prometheus_http_request_duration_seconds_sum{handler="/service-discovery"} 0.004057062 +prometheus_http_request_duration_seconds_count{handler="/service-discovery"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.1"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.2"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.4"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="1"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="3"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="8"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="20"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="60"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="120"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="+Inf"} 46 +prometheus_http_request_duration_seconds_sum{handler="/status"} 0.010107473 +prometheus_http_request_duration_seconds_count{handler="/status"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.1"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.2"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.4"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="1"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="3"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="8"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="20"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="60"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="120"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="+Inf"} 39 +prometheus_http_request_duration_seconds_sum{handler="/targets"} 0.009001180000000001 +prometheus_http_request_duration_seconds_count{handler="/targets"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="0.1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="0.2"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="0.4"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="3"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="8"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="20"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="60"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="120"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="+Inf"} 49 +prometheus_http_request_duration_seconds_sum{handler="/tsdb-status"} 0.018204165 +prometheus_http_request_duration_seconds_count{handler="/tsdb-status"} 49 +prometheus_http_requests_total{code="200",handler="/"} 0 +prometheus_http_requests_total{code="200",handler="/-/healthy"} 29524 +prometheus_http_requests_total{code="200",handler="/-/quit"} 0 +prometheus_http_requests_total{code="200",handler="/-/ready"} 49 +prometheus_http_requests_total{code="200",handler="/-/reload"} 0 +prometheus_http_requests_total{code="200",handler="/alertmanager-discovery"} 0 +prometheus_http_requests_total{code="200",handler="/alerts"} 48 +prometheus_http_requests_total{code="200",handler="/api/v1/*path"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/clean_tombstones"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/delete_series"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/snapshot"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/alertmanagers"} 8 +prometheus_http_requests_total{code="200",handler="/api/v1/alerts"} 14635 +prometheus_http_requests_total{code="200",handler="/api/v1/format_query"} 3 +prometheus_http_requests_total{code="200",handler="/api/v1/label/:name/values"} 14679 +prometheus_http_requests_total{code="200",handler="/api/v1/labels"} 4915 +prometheus_http_requests_total{code="200",handler="/api/v1/metadata"} 2093 +prometheus_http_requests_total{code="200",handler="/api/v1/notifications"} 12 +prometheus_http_requests_total{code="200",handler="/api/v1/notifications/live"} 4529 +prometheus_http_requests_total{code="200",handler="/api/v1/otlp/v1/metrics"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/parse_query"} 247 +prometheus_http_requests_total{code="200",handler="/api/v1/query"} 326411 +prometheus_http_requests_total{code="200",handler="/api/v1/query_exemplars"} 841 +prometheus_http_requests_total{code="200",handler="/api/v1/query_range"} 1.183734e+06 +prometheus_http_requests_total{code="200",handler="/api/v1/read"} 16 +prometheus_http_requests_total{code="200",handler="/api/v1/rules"} 88658 +prometheus_http_requests_total{code="200",handler="/api/v1/scrape_pools"} 142 +prometheus_http_requests_total{code="200",handler="/api/v1/series"} 1235 +prometheus_http_requests_total{code="200",handler="/api/v1/status/buildinfo"} 4413 +prometheus_http_requests_total{code="200",handler="/api/v1/status/config"} 84722 +prometheus_http_requests_total{code="200",handler="/api/v1/status/flags"} 32 +prometheus_http_requests_total{code="200",handler="/api/v1/status/runtimeinfo"} 863 +prometheus_http_requests_total{code="200",handler="/api/v1/status/tsdb"} 94 +prometheus_http_requests_total{code="200",handler="/api/v1/status/walreplay"} 49 +prometheus_http_requests_total{code="200",handler="/api/v1/targets"} 191 +prometheus_http_requests_total{code="200",handler="/api/v1/targets/metadata"} 16 +prometheus_http_requests_total{code="200",handler="/api/v1/write"} 0 +prometheus_http_requests_total{code="200",handler="/assets/*filepath"} 2213 +prometheus_http_requests_total{code="200",handler="/classic/static/*filepath"} 0 +prometheus_http_requests_total{code="200",handler="/config"} 13 +prometheus_http_requests_total{code="200",handler="/consoles/*filepath"} 17 +prometheus_http_requests_total{code="200",handler="/debug/*subpath"} 3 +prometheus_http_requests_total{code="200",handler="/favicon.ico"} 0 +prometheus_http_requests_total{code="200",handler="/favicon.svg"} 769 +prometheus_http_requests_total{code="200",handler="/federate"} 4 +prometheus_http_requests_total{code="200",handler="/flags"} 9 +prometheus_http_requests_total{code="200",handler="/graph"} 0 +prometheus_http_requests_total{code="200",handler="/manifest.json"} 0 +prometheus_http_requests_total{code="200",handler="/metrics"} 4.059092e+06 +prometheus_http_requests_total{code="200",handler="/query"} 1774 +prometheus_http_requests_total{code="200",handler="/rules"} 8673 +prometheus_http_requests_total{code="200",handler="/service-discovery"} 20 +prometheus_http_requests_total{code="200",handler="/status"} 46 +prometheus_http_requests_total{code="200",handler="/targets"} 39 +prometheus_http_requests_total{code="200",handler="/tsdb-status"} 49 +prometheus_http_requests_total{code="200",handler="/version"} 0 +prometheus_http_requests_total{code="204",handler="/api/v1/*path"} 27 +prometheus_http_requests_total{code="204",handler="/api/v1/notifications/live"} 5 +prometheus_http_requests_total{code="301",handler="/debug/*subpath"} 1 +prometheus_http_requests_total{code="302",handler="/"} 688 +prometheus_http_requests_total{code="302",handler="/graph"} 751 +prometheus_http_requests_total{code="400",handler="/api/v1/format_query"} 1 +prometheus_http_requests_total{code="400",handler="/api/v1/label/:name/values"} 3292 +prometheus_http_requests_total{code="400",handler="/api/v1/labels"} 6 +prometheus_http_requests_total{code="400",handler="/api/v1/parse_query"} 18 +prometheus_http_requests_total{code="400",handler="/api/v1/query"} 155 +prometheus_http_requests_total{code="400",handler="/api/v1/query_range"} 263 +prometheus_http_requests_total{code="400",handler="/api/v1/rules"} 4 +prometheus_http_requests_total{code="400",handler="/api/v1/series"} 11 +prometheus_http_requests_total{code="400",handler="/api/v1/targets/metadata"} 2 +prometheus_http_requests_total{code="404",handler="/assets/*filepath"} 12 +prometheus_http_requests_total{code="404",handler="/classic/static/*filepath"} 103 +prometheus_http_requests_total{code="404",handler="/consoles/*filepath"} 17 +prometheus_http_requests_total{code="404",handler="/favicon.ico"} 177 +prometheus_http_requests_total{code="416",handler="/favicon.svg"} 1 +prometheus_http_requests_total{code="422",handler="/api/v1/query"} 114 +prometheus_http_requests_total{code="422",handler="/api/v1/query_range"} 31 +prometheus_http_requests_total{code="499",handler="/api/v1/query"} 7 +prometheus_http_requests_total{code="499",handler="/api/v1/query_range"} 20 +prometheus_http_requests_total{code="503",handler="/api/v1/query"} 7 +prometheus_http_requests_total{code="503",handler="/api/v1/query_range"} 4 +prometheus_http_requests_total{code="503",handler="/api/v1/status/config"} 1 +prometheus_http_response_size_bytes_bucket{handler="/",le="100"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1000"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="10000"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="100000"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+06"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+07"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+08"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+09"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="+Inf"} 688 +prometheus_http_response_size_bytes_sum{handler="/"} 19952 +prometheus_http_response_size_bytes_count{handler="/"} 688 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="100"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1000"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="10000"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="100000"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+06"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+07"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+08"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+09"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="+Inf"} 29524 +prometheus_http_response_size_bytes_sum{handler="/-/healthy"} 885720 +prometheus_http_response_size_bytes_count{handler="/-/healthy"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="100"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="10000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="100000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+06"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+07"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+08"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+09"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="+Inf"} 49 +prometheus_http_response_size_bytes_sum{handler="/-/ready"} 1372 +prometheus_http_response_size_bytes_count{handler="/-/ready"} 49 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="10000"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="100000"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+06"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+07"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+08"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+09"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="+Inf"} 48 +prometheus_http_response_size_bytes_sum{handler="/alerts"} 84096 +prometheus_http_response_size_bytes_count{handler="/alerts"} 48 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="100"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1000"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="10000"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="100000"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+06"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+07"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+08"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+09"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="+Inf"} 27 +prometheus_http_response_size_bytes_sum{handler="/api/v1/*path"} 0 +prometheus_http_response_size_bytes_count{handler="/api/v1/*path"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1000"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="10000"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="100000"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+06"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+07"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+08"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+09"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="+Inf"} 8 +prometheus_http_response_size_bytes_sum{handler="/api/v1/alertmanagers"} 1064 +prometheus_http_response_size_bytes_count{handler="/api/v1/alertmanagers"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1000"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="10000"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="100000"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+06"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+07"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+08"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+09"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="+Inf"} 14635 +prometheus_http_response_size_bytes_sum{handler="/api/v1/alerts"} 1.0260926e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/alerts"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="10000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="100000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+06"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+07"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+08"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+09"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="+Inf"} 4 +prometheus_http_response_size_bytes_sum{handler="/api/v1/format_query"} 495 +prometheus_http_response_size_bytes_count{handler="/api/v1/format_query"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100"} 8167 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1000"} 15939 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="10000"} 17901 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100000"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+06"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+07"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+08"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+09"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 17971 +prometheus_http_response_size_bytes_sum{handler="/api/v1/label/:name/values"} 2.0009454e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/label/:name/values"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="100"} 102 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1000"} 4908 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="10000"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="100000"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+06"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+07"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+08"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+09"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="+Inf"} 4921 +prometheus_http_response_size_bytes_sum{handler="/api/v1/labels"} 3.16046e+06 +prometheus_http_response_size_bytes_count{handler="/api/v1/labels"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="100"} 170 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1000"} 1368 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="10000"} 1368 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="100000"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+06"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+07"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+08"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+09"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="+Inf"} 2093 +prometheus_http_response_size_bytes_sum{handler="/api/v1/metadata"} 1.5950362e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/metadata"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="100"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="10000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="100000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+06"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+07"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+08"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+09"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="+Inf"} 12 +prometheus_http_response_size_bytes_sum{handler="/api/v1/notifications"} 360 +prometheus_http_response_size_bytes_count{handler="/api/v1/notifications"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="100"} 4529 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1000"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="10000"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="100000"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+06"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+07"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+08"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+09"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="+Inf"} 4534 +prometheus_http_response_size_bytes_sum{handler="/api/v1/notifications/live"} 1365 +prometheus_http_response_size_bytes_count{handler="/api/v1/notifications/live"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1000"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="10000"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="100000"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+06"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+07"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+08"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+09"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="+Inf"} 265 +prometheus_http_response_size_bytes_sum{handler="/api/v1/parse_query"} 67620 +prometheus_http_response_size_bytes_count{handler="/api/v1/parse_query"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100"} 25147 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1000"} 325005 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="10000"} 325143 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100000"} 326635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+06"} 326692 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+07"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+08"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+09"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="+Inf"} 326694 +prometheus_http_response_size_bytes_sum{handler="/api/v1/query"} 2.34481756e+08 +prometheus_http_response_size_bytes_count{handler="/api/v1/query"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="100"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1000"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="10000"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="100000"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+06"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+07"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+08"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+09"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="+Inf"} 841 +prometheus_http_response_size_bytes_sum{handler="/api/v1/query_exemplars"} 50460 +prometheus_http_response_size_bytes_count{handler="/api/v1/query_exemplars"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100"} 69552 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1000"} 917165 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="10000"} 1.152103e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100000"} 1.183391e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+06"} 1.183621e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+07"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+08"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+09"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="+Inf"} 1.184052e+06 +prometheus_http_response_size_bytes_sum{handler="/api/v1/query_range"} 1.869092376e+09 +prometheus_http_response_size_bytes_count{handler="/api/v1/query_range"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="10000"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="100000"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+06"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+07"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+08"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+09"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="+Inf"} 16 +prometheus_http_response_size_bytes_sum{handler="/api/v1/read"} 50800 +prometheus_http_response_size_bytes_count{handler="/api/v1/read"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="10000"} 88656 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="100000"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+06"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+07"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+08"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+09"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="+Inf"} 88662 +prometheus_http_response_size_bytes_sum{handler="/api/v1/rules"} 7.01925022e+08 +prometheus_http_response_size_bytes_count{handler="/api/v1/rules"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="10000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="100000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+06"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+07"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+08"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+09"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="+Inf"} 142 +prometheus_http_response_size_bytes_sum{handler="/api/v1/scrape_pools"} 18318 +prometheus_http_response_size_bytes_count{handler="/api/v1/scrape_pools"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="100"} 1011 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1000"} 1233 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="10000"} 1244 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="100000"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+06"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+07"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+08"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+09"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="+Inf"} 1246 +prometheus_http_response_size_bytes_sum{handler="/api/v1/series"} 235829 +prometheus_http_response_size_bytes_count{handler="/api/v1/series"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1000"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="10000"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="100000"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+06"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+07"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+08"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+09"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="+Inf"} 4413 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/buildinfo"} 807579 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/buildinfo"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1000"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="10000"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="100000"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+06"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+07"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+08"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+09"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="+Inf"} 84723 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/config"} 6.3710963e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/config"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1000"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="10000"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="100000"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+06"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+07"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+08"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+09"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="+Inf"} 32 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/flags"} 31968 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/flags"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1000"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="10000"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="100000"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+06"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+07"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+08"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+09"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="+Inf"} 863 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/runtimeinfo"} 240400 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/runtimeinfo"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1000"} 92 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="10000"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="100000"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+06"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+07"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+08"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+09"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="+Inf"} 94 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/tsdb"} 66441 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/tsdb"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="100"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="10000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="100000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+06"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+07"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+08"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+09"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="+Inf"} 49 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/walreplay"} 3381 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/walreplay"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1000"} 184 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="10000"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="100000"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+06"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+07"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+08"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+09"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="+Inf"} 191 +prometheus_http_response_size_bytes_sum{handler="/api/v1/targets"} 191150 +prometheus_http_response_size_bytes_count{handler="/api/v1/targets"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1000"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="10000"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="100000"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+06"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+07"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+08"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+09"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="+Inf"} 18 +prometheus_http_response_size_bytes_sum{handler="/api/v1/targets/metadata"} 2736 +prometheus_http_response_size_bytes_count{handler="/api/v1/targets/metadata"} 18 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="100"} 12 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="10000"} 13 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="100000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+06"} 958 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+07"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+08"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+09"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="+Inf"} 2225 +prometheus_http_response_size_bytes_sum{handler="/assets/*filepath"} 3.525958804e+09 +prometheus_http_response_size_bytes_count{handler="/assets/*filepath"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="100"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1000"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="10000"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="100000"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+06"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+07"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+08"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+09"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="+Inf"} 103 +prometheus_http_response_size_bytes_sum{handler="/classic/static/*filepath"} 1957 +prometheus_http_response_size_bytes_count{handler="/classic/static/*filepath"} 103 +prometheus_http_response_size_bytes_bucket{handler="/config",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/config",le="10000"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="100000"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+06"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+07"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+08"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+09"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="+Inf"} 13 +prometheus_http_response_size_bytes_sum{handler="/config"} 22776 +prometheus_http_response_size_bytes_count{handler="/config"} 13 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="100"} 17 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1000"} 17 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="10000"} 31 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="100000"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+06"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+07"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+08"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+09"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="+Inf"} 34 +prometheus_http_response_size_bytes_sum{handler="/consoles/*filepath"} 175556 +prometheus_http_response_size_bytes_count{handler="/consoles/*filepath"} 34 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1000"} 1 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="10000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="100000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+06"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+07"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+08"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+09"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="+Inf"} 4 +prometheus_http_response_size_bytes_sum{handler="/debug/*subpath"} 7062 +prometheus_http_response_size_bytes_count{handler="/debug/*subpath"} 4 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="100"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1000"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="10000"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="100000"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+06"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+07"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+08"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+09"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="+Inf"} 177 +prometheus_http_response_size_bytes_sum{handler="/favicon.ico"} 3363 +prometheus_http_response_size_bytes_count{handler="/favicon.ico"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1000"} 1 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="10000"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="100000"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+06"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+07"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+08"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+09"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="+Inf"} 770 +prometheus_http_response_size_bytes_sum{handler="/favicon.svg"} 2.135541e+06 +prometheus_http_response_size_bytes_count{handler="/favicon.svg"} 770 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="100"} 2 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1000"} 2 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="10000"} 2 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="100000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+06"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+07"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+08"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+09"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="+Inf"} 4 +prometheus_http_response_size_bytes_sum{handler="/federate"} 28439 +prometheus_http_response_size_bytes_count{handler="/federate"} 4 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="10000"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="100000"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+06"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+07"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+08"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+09"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="+Inf"} 9 +prometheus_http_response_size_bytes_sum{handler="/flags"} 15768 +prometheus_http_response_size_bytes_count{handler="/flags"} 9 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="100"} 130 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1000"} 744 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="10000"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="100000"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+06"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+07"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+08"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+09"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="+Inf"} 751 +prometheus_http_response_size_bytes_sum{handler="/graph"} 164775 +prometheus_http_response_size_bytes_count{handler="/graph"} 751 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="10000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100000"} 4.059088e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+06"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+07"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+08"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+09"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="+Inf"} 4.059092e+06 +prometheus_http_response_size_bytes_sum{handler="/metrics"} 8.2515439707e+10 +prometheus_http_response_size_bytes_count{handler="/metrics"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/query",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/query",le="10000"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="100000"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+06"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+07"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+08"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+09"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="+Inf"} 1774 +prometheus_http_response_size_bytes_sum{handler="/query"} 3.108048e+06 +prometheus_http_response_size_bytes_count{handler="/query"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="10000"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="100000"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+06"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+07"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+08"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+09"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="+Inf"} 8673 +prometheus_http_response_size_bytes_sum{handler="/rules"} 1.5195096e+07 +prometheus_http_response_size_bytes_count{handler="/rules"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="10000"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="100000"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+06"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+07"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+08"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+09"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="+Inf"} 20 +prometheus_http_response_size_bytes_sum{handler="/service-discovery"} 35040 +prometheus_http_response_size_bytes_count{handler="/service-discovery"} 20 +prometheus_http_response_size_bytes_bucket{handler="/status",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/status",le="10000"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="100000"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+06"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+07"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+08"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+09"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="+Inf"} 46 +prometheus_http_response_size_bytes_sum{handler="/status"} 80592 +prometheus_http_response_size_bytes_count{handler="/status"} 46 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="10000"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="100000"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+06"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+07"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+08"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+09"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="+Inf"} 39 +prometheus_http_response_size_bytes_sum{handler="/targets"} 68328 +prometheus_http_response_size_bytes_count{handler="/targets"} 39 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="10000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="100000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+06"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+07"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+08"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+09"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="+Inf"} 49 +prometheus_http_response_size_bytes_sum{handler="/tsdb-status"} 85848 +prometheus_http_response_size_bytes_count{handler="/tsdb-status"} 49 +prometheus_notifications_alertmanagers_discovered 1 +prometheus_notifications_dropped_total 0 +prometheus_notifications_errors_total{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 0 +prometheus_notifications_latency_seconds{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts",quantile="0.5"} 0.001566044 +prometheus_notifications_latency_seconds{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts",quantile="0.9"} 0.003927931 +prometheus_notifications_latency_seconds{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts",quantile="0.99"} 0.013928135 +prometheus_notifications_latency_seconds_sum{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 194.15032606200046 +prometheus_notifications_latency_seconds_count{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 75180 +prometheus_notifications_queue_capacity 10000 +prometheus_notifications_queue_length 0 +prometheus_notifications_sent_total{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 141616 +prometheus_ready 1 +prometheus_remote_storage_exemplars_in_total 4.959738e+06 +prometheus_remote_storage_highest_timestamp_in_seconds 1.738949375e+09 +prometheus_remote_storage_histograms_in_total 0 +prometheus_remote_storage_samples_in_total 6.00447296e+08 +prometheus_remote_storage_string_interner_zero_reference_releases_total 0 +prometheus_rule_evaluation_duration_seconds{quantile="0.5"} 0.000214623 +prometheus_rule_evaluation_duration_seconds{quantile="0.9"} 0.001456135 +prometheus_rule_evaluation_duration_seconds{quantile="0.99"} 0.008111814 +prometheus_rule_evaluation_duration_seconds_sum 5209.704794862625 +prometheus_rule_evaluation_duration_seconds_count 7.203456e+06 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 118092 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 118090 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 1.4761e+06 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 1.476125e+06 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 649495 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 649484 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 1.358035e+06 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 1.358035e+06 +prometheus_rule_group_duration_seconds{quantile="0.01"} 0.000735928 +prometheus_rule_group_duration_seconds{quantile="0.05"} 0.000818857 +prometheus_rule_group_duration_seconds{quantile="0.5"} 0.004852081 +prometheus_rule_group_duration_seconds{quantile="0.9"} 0.022897759 +prometheus_rule_group_duration_seconds{quantile="0.99"} 0.069327797 +prometheus_rule_group_duration_seconds_sum 5335.451440133042 +prometheus_rule_group_duration_seconds_count 472359 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 15 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 1 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 1 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 59046 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 59045 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0.000754015 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0.001104624 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0.022040842 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0.048928087 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0.002766703 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 0.002218466 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0.010245447 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0.009232393 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 2 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 2 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 10 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 10 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 11 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 11 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 1.738949373753179e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 1.738949366900781e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 1.7389493632087085e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 1.7389493666743164e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 1.7389493675395968e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 1.7389493623381927e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 1.738949366856423e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 1.738949369917499e+09 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0.002032625 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 9.1853e-05 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0.000166088 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0.000108127 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 6.408e-06 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 2.621e-06 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 8.4979e-05 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0.000104444 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0.000685176 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0.001035099 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0.021769662 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0.042017156 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0.00260535 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 0.002069356 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0.009905481 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0.009056893 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 2 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 2 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 25 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 25 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 11 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 11 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 23 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 23 +prometheus_sd_azure_cache_hit_total 0 +prometheus_sd_azure_failures_total 0 +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_failures_total 0 +prometheus_sd_discovered_targets{config="alertmanager",name="scrape"} 1 +prometheus_sd_discovered_targets{config="blackbox",name="scrape"} 1 +prometheus_sd_discovered_targets{config="caddy",name="scrape"} 1 +prometheus_sd_discovered_targets{config="cadvisor",name="scrape"} 1 +prometheus_sd_discovered_targets{config="config-0",name="notify"} 1 +prometheus_sd_discovered_targets{config="grafana",name="scrape"} 1 +prometheus_sd_discovered_targets{config="node",name="scrape"} 1 +prometheus_sd_discovered_targets{config="prometheus",name="scrape"} 1 +prometheus_sd_discovered_targets{config="random",name="scrape"} 4 +prometheus_sd_dns_lookup_failures_total 0 +prometheus_sd_dns_lookups_total 0 +prometheus_sd_failed_configs{name="notify"} 0 +prometheus_sd_failed_configs{name="scrape"} 0 +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/alertmanager.yml"} 1.701295557e+09 +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/cadvisor.yml"} 1.705433682e+09 +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/node.yml"} 1.701295553e+09 +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/random.yml"} 1.579263729e+09 +prometheus_sd_file_read_errors_total 0 +prometheus_sd_file_scan_duration_seconds{quantile="0.5"} 9.5384e-05 +prometheus_sd_file_scan_duration_seconds{quantile="0.9"} 0.000275679 +prometheus_sd_file_scan_duration_seconds{quantile="0.99"} 0.000275679 +prometheus_sd_file_scan_duration_seconds_sum 4.751347856999997 +prometheus_sd_file_scan_duration_seconds_count 11812 +prometheus_sd_file_watcher_errors_total 0 +prometheus_sd_http_failures_total 0 +prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="endpointslice"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="ingress"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpointslice"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="ingress"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpointslice"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="ingress"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 +prometheus_sd_kubernetes_failures_total 0 +prometheus_sd_kuma_fetch_duration_seconds{quantile="0.5"} NaN +prometheus_sd_kuma_fetch_duration_seconds{quantile="0.9"} NaN +prometheus_sd_kuma_fetch_duration_seconds{quantile="0.99"} NaN +prometheus_sd_kuma_fetch_duration_seconds_sum 0 +prometheus_sd_kuma_fetch_duration_seconds_count 0 +prometheus_sd_kuma_fetch_failures_total 0 +prometheus_sd_kuma_fetch_skipped_updates_total 0 +prometheus_sd_linode_failures_total 0 +prometheus_sd_nomad_failures_total 0 +prometheus_sd_received_updates_total{name="notify"} 2 +prometheus_sd_received_updates_total{name="scrape"} 11820 +prometheus_sd_updates_delayed_total{name="notify"} 0 +prometheus_sd_updates_delayed_total{name="scrape"} 0 +prometheus_sd_updates_total{name="notify"} 1 +prometheus_sd_updates_total{name="scrape"} 2953 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14.982591232 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14.997567414 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 14.999977915 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15.000793403 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15.017607167 +prometheus_target_interval_length_seconds_sum{interval="15s"} 9.742237453453667e+06 +prometheus_target_interval_length_seconds_count{interval="15s"} 649482 +prometheus_target_metadata_cache_bytes{scrape_job="alertmanager"} 6817 +prometheus_target_metadata_cache_bytes{scrape_job="blackbox"} 661 +prometheus_target_metadata_cache_bytes{scrape_job="caddy"} 2365 +prometheus_target_metadata_cache_bytes{scrape_job="cadvisor"} 4547 +prometheus_target_metadata_cache_bytes{scrape_job="grafana"} 37608 +prometheus_target_metadata_cache_bytes{scrape_job="node"} 13900 +prometheus_target_metadata_cache_bytes{scrape_job="prometheus"} 20265 +prometheus_target_metadata_cache_bytes{scrape_job="random"} 792 +prometheus_target_metadata_cache_entries{scrape_job="alertmanager"} 86 +prometheus_target_metadata_cache_entries{scrape_job="blackbox"} 13 +prometheus_target_metadata_cache_entries{scrape_job="caddy"} 47 +prometheus_target_metadata_cache_entries{scrape_job="cadvisor"} 93 +prometheus_target_metadata_cache_entries{scrape_job="grafana"} 373 +prometheus_target_metadata_cache_entries{scrape_job="node"} 293 +prometheus_target_metadata_cache_entries{scrape_job="prometheus"} 237 +prometheus_target_metadata_cache_entries{scrape_job="random"} 16 +prometheus_target_scrape_pool_exceeded_label_limits_total 0 +prometheus_target_scrape_pool_exceeded_target_limit_total 0 +prometheus_target_scrape_pool_reloads_failed_total 0 +prometheus_target_scrape_pool_reloads_total 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="alertmanager"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="blackbox"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="caddy"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="cadvisor"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="grafana"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="node"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="prometheus"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="random"} 0 +prometheus_target_scrape_pool_sync_total{scrape_job="alertmanager"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="blackbox"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="caddy"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="cadvisor"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="grafana"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="node"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="random"} 2953 +prometheus_target_scrape_pool_target_limit{scrape_job="alertmanager"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="blackbox"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="caddy"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="cadvisor"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="grafana"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="node"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="prometheus"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="random"} 0 +prometheus_target_scrape_pool_targets{scrape_job="alertmanager"} 1 +prometheus_target_scrape_pool_targets{scrape_job="blackbox"} 1 +prometheus_target_scrape_pool_targets{scrape_job="caddy"} 1 +prometheus_target_scrape_pool_targets{scrape_job="cadvisor"} 1 +prometheus_target_scrape_pool_targets{scrape_job="grafana"} 1 +prometheus_target_scrape_pool_targets{scrape_job="node"} 1 +prometheus_target_scrape_pool_targets{scrape_job="prometheus"} 1 +prometheus_target_scrape_pool_targets{scrape_job="random"} 4 +prometheus_target_scrape_pools_failed_total 0 +prometheus_target_scrape_pools_total 8 +prometheus_target_scrapes_cache_flush_forced_total 0 +prometheus_target_scrapes_exceeded_body_size_limit_total 0 +prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total 0 +prometheus_target_scrapes_exceeded_sample_limit_total 0 +prometheus_target_scrapes_exemplar_out_of_order_total 0 +prometheus_target_scrapes_sample_duplicate_timestamp_total 0 +prometheus_target_scrapes_sample_out_of_bounds_total 0 +prometheus_target_scrapes_sample_out_of_order_total 455 +prometheus_target_sync_failed_total{scrape_job="alertmanager"} 0 +prometheus_target_sync_failed_total{scrape_job="blackbox"} 0 +prometheus_target_sync_failed_total{scrape_job="caddy"} 0 +prometheus_target_sync_failed_total{scrape_job="cadvisor"} 0 +prometheus_target_sync_failed_total{scrape_job="grafana"} 0 +prometheus_target_sync_failed_total{scrape_job="node"} 0 +prometheus_target_sync_failed_total{scrape_job="prometheus"} 0 +prometheus_target_sync_failed_total{scrape_job="random"} 0 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.01"} 2.0522e-05 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.05"} 2.0522e-05 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.5"} 2.0522e-05 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.9"} 0.000141485 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.99"} 0.000141485 +prometheus_target_sync_length_seconds_sum{scrape_job="alertmanager"} 0.13103036 +prometheus_target_sync_length_seconds_count{scrape_job="alertmanager"} 2953 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.01"} 3.9252e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.05"} 3.9252e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.5"} 3.9252e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.9"} 6.2134e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.99"} 6.2134e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="blackbox"} 0.6044201539999996 +prometheus_target_sync_length_seconds_count{scrape_job="blackbox"} 2953 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.01"} 1.3759e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.05"} 1.3759e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.5"} 1.3759e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.9"} 7.8256e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.99"} 7.8256e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="caddy"} 0.10369844599999971 +prometheus_target_sync_length_seconds_count{scrape_job="caddy"} 2953 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.01"} 2.0452e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.05"} 2.0452e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.5"} 2.0452e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.9"} 4.2337e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.99"} 4.2337e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="cadvisor"} 0.10489659999999998 +prometheus_target_sync_length_seconds_count{scrape_job="cadvisor"} 2953 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.01"} 1.4995e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.05"} 1.4995e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.5"} 1.4995e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.9"} 1.7284e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.99"} 1.7284e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="grafana"} 0.09031192700000017 +prometheus_target_sync_length_seconds_count{scrape_job="grafana"} 2953 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.01"} 4.1607e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.05"} 4.1607e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.5"} 4.1607e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.9"} 7.416e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.99"} 7.416e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="node"} 0.11539821299999993 +prometheus_target_sync_length_seconds_count{scrape_job="node"} 2953 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 1.5564e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 1.5564e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 1.5564e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 1.961e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 1.961e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.10655758600000016 +prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 2953 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.01"} 4.1299e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.05"} 4.1299e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.5"} 4.1299e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.9"} 4.8586e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.99"} 4.8586e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="random"} 0.20406449899999993 +prometheus_target_sync_length_seconds_count{scrape_job="random"} 2953 +prometheus_template_text_expansion_failures_total 0 +prometheus_template_text_expansions_total 2.126277e+06 +prometheus_treecache_watcher_goroutines 0 +prometheus_treecache_zookeeper_failures_total 0 +prometheus_tsdb_blocks_loaded 17 +prometheus_tsdb_checkpoint_creations_failed_total 0 +prometheus_tsdb_checkpoint_creations_total 62 +prometheus_tsdb_checkpoint_deletions_failed_total 0 +prometheus_tsdb_checkpoint_deletions_total 62 +prometheus_tsdb_clean_start 1 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="100"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="400"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1600"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6400"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="25600"} 676 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="102400"} 1555 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="409600"} 2379 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1.6384e+06"} 34068 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6.5536e+06"} 4.819758e+06 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="2.62144e+07"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="+Inf"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_range_seconds_sum 8.917524773366e+12 +prometheus_tsdb_compaction_chunk_range_seconds_count 4.861956e+06 +prometheus_tsdb_compaction_chunk_samples_bucket{le="4"} 1407 +prometheus_tsdb_compaction_chunk_samples_bucket{le="6"} 1544 +prometheus_tsdb_compaction_chunk_samples_bucket{le="9"} 1881 +prometheus_tsdb_compaction_chunk_samples_bucket{le="13.5"} 2065 +prometheus_tsdb_compaction_chunk_samples_bucket{le="20.25"} 2782 +prometheus_tsdb_compaction_chunk_samples_bucket{le="30.375"} 4342 +prometheus_tsdb_compaction_chunk_samples_bucket{le="45.5625"} 6180 +prometheus_tsdb_compaction_chunk_samples_bucket{le="68.34375"} 11518 +prometheus_tsdb_compaction_chunk_samples_bucket{le="102.515625"} 13890 +prometheus_tsdb_compaction_chunk_samples_bucket{le="153.7734375"} 4.810155e+06 +prometheus_tsdb_compaction_chunk_samples_bucket{le="230.66015625"} 4.86058e+06 +prometheus_tsdb_compaction_chunk_samples_bucket{le="345.990234375"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_samples_bucket{le="+Inf"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_samples_sum 5.86000708e+08 +prometheus_tsdb_compaction_chunk_samples_count 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="32"} 1233 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="48"} 156238 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="72"} 2.006456e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="108"} 3.568405e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="162"} 3.835144e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="243"} 4.034591e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="364.5"} 4.505646e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="546.75"} 4.69694e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="820.125"} 4.78551e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1230.1875"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1845.28125"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="2767.921875"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="+Inf"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_sum 6.81852972e+08 +prometheus_tsdb_compaction_chunk_size_bytes_count 4.861956e+06 +prometheus_tsdb_compaction_duration_seconds_bucket{le="1"} 61 +prometheus_tsdb_compaction_duration_seconds_bucket{le="2"} 155 +prometheus_tsdb_compaction_duration_seconds_bucket{le="4"} 180 +prometheus_tsdb_compaction_duration_seconds_bucket{le="8"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="16"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="32"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="64"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="128"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="256"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="512"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="1024"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="2048"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="4096"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="8192"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="+Inf"} 183 +prometheus_tsdb_compaction_duration_seconds_sum 254.9095696899999 +prometheus_tsdb_compaction_duration_seconds_count 183 +prometheus_tsdb_compaction_populating_block 0 +prometheus_tsdb_compactions_failed_total 0 +prometheus_tsdb_compactions_skipped_total 0 +prometheus_tsdb_compactions_total 183 +prometheus_tsdb_compactions_triggered_total 14855 +prometheus_tsdb_data_replay_duration_seconds 5.550163617 +prometheus_tsdb_exemplar_exemplars_appended_total 0 +prometheus_tsdb_exemplar_exemplars_in_storage 0 +prometheus_tsdb_exemplar_last_exemplars_timestamp_seconds 0 +prometheus_tsdb_exemplar_max_exemplars 0 +prometheus_tsdb_exemplar_out_of_order_exemplars_total 0 +prometheus_tsdb_exemplar_series_with_exemplars_in_storage 0 +prometheus_tsdb_head_active_appenders 0 +prometheus_tsdb_head_chunks 31476 +prometheus_tsdb_head_chunks_created_total 4.893432e+06 +prometheus_tsdb_head_chunks_removed_total 4.861956e+06 +prometheus_tsdb_head_chunks_storage_size_bytes 7.237299e+06 +prometheus_tsdb_head_gc_duration_seconds_sum 4.773801686000001 +prometheus_tsdb_head_gc_duration_seconds_count 123 +prometheus_tsdb_head_max_time 1.738949375191e+12 +prometheus_tsdb_head_max_time_seconds 1.738949375e+09 +prometheus_tsdb_head_min_time 1.738944000171e+12 +prometheus_tsdb_head_min_time_seconds 1.738944e+09 +prometheus_tsdb_head_out_of_order_samples_appended_total{type="float"} 0 +prometheus_tsdb_head_out_of_order_samples_appended_total{type="histogram"} 0 +prometheus_tsdb_head_samples_appended_total{type="float"} 5.85543187e+08 +prometheus_tsdb_head_samples_appended_total{type="histogram"} 0 +prometheus_tsdb_head_series 10720 +prometheus_tsdb_head_series_created_total 18541 +prometheus_tsdb_head_series_not_found_total 0 +prometheus_tsdb_head_series_removed_total 7821 +prometheus_tsdb_head_truncations_failed_total 0 +prometheus_tsdb_head_truncations_total 123 +prometheus_tsdb_isolation_high_watermark 7.852949e+06 +prometheus_tsdb_isolation_low_watermark 7.852949e+06 +prometheus_tsdb_lowest_timestamp 1.73618640004e+12 +prometheus_tsdb_lowest_timestamp_seconds 1.7361864e+09 +prometheus_tsdb_mmap_chunk_corruptions_total 0 +prometheus_tsdb_mmap_chunks_total 4.851264e+06 +prometheus_tsdb_out_of_bound_samples_total{type="float"} 0 +prometheus_tsdb_out_of_order_samples_total{type="float"} 517 +prometheus_tsdb_out_of_order_samples_total{type="histogram"} 0 +prometheus_tsdb_reloads_failures_total 0 +prometheus_tsdb_reloads_total 14822 +prometheus_tsdb_retention_limit_bytes 0 +prometheus_tsdb_retention_limit_seconds 2.6784e+06 +prometheus_tsdb_size_retentions_total 0 +prometheus_tsdb_snapshot_replay_error_total 0 +prometheus_tsdb_storage_blocks_bytes 2.762863592e+09 +prometheus_tsdb_symbol_table_size_bytes 10616 +prometheus_tsdb_time_retentions_total 5 +prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="+Inf"} 0 +prometheus_tsdb_tombstone_cleanup_seconds_sum 0 +prometheus_tsdb_tombstone_cleanup_seconds_count 0 +prometheus_tsdb_too_old_samples_total{type="float"} 0 +prometheus_tsdb_vertical_compactions_total 0 +prometheus_tsdb_wal_completed_pages_total 109271 +prometheus_tsdb_wal_corruptions_total 0 +prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.5"} NaN +prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.9"} NaN +prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.99"} NaN +prometheus_tsdb_wal_fsync_duration_seconds_sum 4.842524568000002 +prometheus_tsdb_wal_fsync_duration_seconds_count 123 +prometheus_tsdb_wal_page_flushes_total 2.293951e+06 +prometheus_tsdb_wal_segment_current 24726 +prometheus_tsdb_wal_storage_size_bytes 6.9168385e+07 +prometheus_tsdb_wal_truncate_duration_seconds_sum 121.61954577099996 +prometheus_tsdb_wal_truncate_duration_seconds_count 62 +prometheus_tsdb_wal_truncations_failed_total 0 +prometheus_tsdb_wal_truncations_total 62 +prometheus_tsdb_wal_writes_failed_total 0 +prometheus_web_federation_errors_total 0 +prometheus_web_federation_warnings_total 0 +promhttp_metric_handler_requests_in_flight 1 +promhttp_metric_handler_requests_total{code="200"} 4.059092e+06 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 +# EOF diff --git a/model/textparse/testdata/alltypes.237mfs.prom.txt b/model/textparse/testdata/alltypes.237mfs.prom.txt new file mode 100644 index 0000000000..01138055cf --- /dev/null +++ b/model/textparse/testdata/alltypes.237mfs.prom.txt @@ -0,0 +1,2332 @@ +# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime. Sourced from /gc/cycles/automatic:gc-cycles +# TYPE go_gc_cycles_automatic_gc_cycles_total counter +go_gc_cycles_automatic_gc_cycles_total 190932 +# HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application. Sourced from /gc/cycles/forced:gc-cycles +# TYPE go_gc_cycles_forced_gc_cycles_total counter +go_gc_cycles_forced_gc_cycles_total 0 +# HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles. Sourced from /gc/cycles/total:gc-cycles +# TYPE go_gc_cycles_total_gc_cycles_total counter +go_gc_cycles_total_gc_cycles_total 190932 +# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 7.6238e-05 +go_gc_duration_seconds{quantile="0.25"} 0.00010188 +go_gc_duration_seconds{quantile="0.5"} 0.000135819 +go_gc_duration_seconds{quantile="0.75"} 0.000156061 +go_gc_duration_seconds{quantile="1"} 0.002389378 +go_gc_duration_seconds_sum 44.985611511 +go_gc_duration_seconds_count 190932 +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent +# TYPE go_gc_gogc_percent gauge +go_gc_gogc_percent 75 +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes +# TYPE go_gc_gomemlimit_bytes gauge +go_gc_gomemlimit_bytes 9.03676723e+08 +# HELP go_gc_heap_allocs_by_size_bytes Distribution of heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. Sourced from /gc/heap/allocs-by-size:bytes +# TYPE go_gc_heap_allocs_by_size_bytes histogram +go_gc_heap_allocs_by_size_bytes_bucket{le="8.999999999999998"} 2.279966416e+09 +go_gc_heap_allocs_by_size_bytes_bucket{le="24.999999999999996"} 1.9429106442e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="64.99999999999999"} 3.2158220609e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="144.99999999999997"} 4.2309744198e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="320.99999999999994"} 4.3418919481e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="704.9999999999999"} 4.374631622e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="1536.9999999999998"} 4.3917578245e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="3200.9999999999995"} 4.396605609e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="6528.999999999999"} 4.4007501305e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="13568.999999999998"} 4.4020325917e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="27264.999999999996"} 4.4036187548e+10 +go_gc_heap_allocs_by_size_bytes_bucket{le="+Inf"} 4.4046288803e+10 +go_gc_heap_allocs_by_size_bytes_sum 5.937322571024e+12 +go_gc_heap_allocs_by_size_bytes_count 4.4046288803e+10 +# HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application. Sourced from /gc/heap/allocs:bytes +# TYPE go_gc_heap_allocs_bytes_total counter +go_gc_heap_allocs_bytes_total 5.937322571024e+12 +# HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. Sourced from /gc/heap/allocs:objects +# TYPE go_gc_heap_allocs_objects_total counter +go_gc_heap_allocs_objects_total 4.4046288803e+10 +# HELP go_gc_heap_frees_by_size_bytes Distribution of freed heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. Sourced from /gc/heap/frees-by-size:bytes +# TYPE go_gc_heap_frees_by_size_bytes histogram +go_gc_heap_frees_by_size_bytes_bucket{le="8.999999999999998"} 2.279951045e+09 +go_gc_heap_frees_by_size_bytes_bucket{le="24.999999999999996"} 1.9428965693e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="64.99999999999999"} 3.2157849717e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="144.99999999999997"} 4.2309204178e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="320.99999999999994"} 4.3418348856e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="704.9999999999999"} 4.3745739652e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="1536.9999999999998"} 4.3916999773e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="3200.9999999999995"} 4.3965477112e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="6528.999999999999"} 4.4006921621e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="13568.999999999998"} 4.4019746017e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="27264.999999999996"} 4.4035607466e+10 +go_gc_heap_frees_by_size_bytes_bucket{le="+Inf"} 4.4045708586e+10 +go_gc_heap_frees_by_size_bytes_sum 5.937239047736e+12 +go_gc_heap_frees_by_size_bytes_count 4.4045708586e+10 +# HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector. Sourced from /gc/heap/frees:bytes +# TYPE go_gc_heap_frees_bytes_total counter +go_gc_heap_frees_bytes_total 5.937239047736e+12 +# HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. Sourced from /gc/heap/frees:objects +# TYPE go_gc_heap_frees_objects_total counter +go_gc_heap_frees_objects_total 4.4045708586e+10 +# HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle. Sourced from /gc/heap/goal:bytes +# TYPE go_gc_heap_goal_bytes gauge +go_gc_heap_goal_bytes 1.0365948e+08 +# HELP go_gc_heap_live_bytes Heap memory occupied by live objects that were marked by the previous GC. Sourced from /gc/heap/live:bytes +# TYPE go_gc_heap_live_bytes gauge +go_gc_heap_live_bytes 5.8791024e+07 +# HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory. Sourced from /gc/heap/objects:objects +# TYPE go_gc_heap_objects_objects gauge +go_gc_heap_objects_objects 580217 +# HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size. Sourced from /gc/heap/tiny/allocs:objects +# TYPE go_gc_heap_tiny_allocs_objects_total counter +go_gc_heap_tiny_allocs_objects_total 8.306064403e+09 +# HELP go_gc_limiter_last_enabled_gc_cycle GC cycle the last time the GC CPU limiter was enabled. This metric is useful for diagnosing the root cause of an out-of-memory error, because the limiter trades memory for CPU time when the GC's CPU time gets too high. This is most likely to occur with use of SetMemoryLimit. The first GC cycle is cycle 1, so a value of 0 indicates that it was never enabled. Sourced from /gc/limiter/last-enabled:gc-cycle +# TYPE go_gc_limiter_last_enabled_gc_cycle gauge +go_gc_limiter_last_enabled_gc_cycle 160896 +# HELP go_gc_pauses_seconds Deprecated. Prefer the identical /sched/pauses/total/gc:seconds. Sourced from /gc/pauses:seconds +# TYPE go_gc_pauses_seconds histogram +go_gc_pauses_seconds_bucket{le="6.399999999999999e-08"} 0 +go_gc_pauses_seconds_bucket{le="6.399999999999999e-07"} 0 +go_gc_pauses_seconds_bucket{le="7.167999999999999e-06"} 137212 +go_gc_pauses_seconds_bucket{le="8.191999999999999e-05"} 208425 +go_gc_pauses_seconds_bucket{le="0.0009175039999999999"} 376121 +go_gc_pauses_seconds_bucket{le="0.010485759999999998"} 381798 +go_gc_pauses_seconds_bucket{le="0.11744051199999998"} 381863 +go_gc_pauses_seconds_bucket{le="+Inf"} 381864 +go_gc_pauses_seconds_sum 20.343611904000003 +go_gc_pauses_seconds_count 381864 +# HELP go_gc_scan_globals_bytes The total amount of global variable space that is scannable. Sourced from /gc/scan/globals:bytes +# TYPE go_gc_scan_globals_bytes gauge +go_gc_scan_globals_bytes 555824 +# HELP go_gc_scan_heap_bytes The total amount of heap space that is scannable. Sourced from /gc/scan/heap:bytes +# TYPE go_gc_scan_heap_bytes gauge +go_gc_scan_heap_bytes 4.0986192e+07 +# HELP go_gc_scan_stack_bytes The number of bytes of stack that were scanned last GC cycle. Sourced from /gc/scan/stack:bytes +# TYPE go_gc_scan_stack_bytes gauge +go_gc_scan_stack_bytes 477760 +# HELP go_gc_scan_total_bytes The total amount space that is scannable. Sum of all metrics in /gc/scan. Sourced from /gc/scan/total:bytes +# TYPE go_gc_scan_total_bytes gauge +go_gc_scan_total_bytes 4.2019776e+07 +# HELP go_gc_stack_starting_size_bytes The stack size of new goroutines. Sourced from /gc/stack/starting-size:bytes +# TYPE go_gc_stack_starting_size_bytes gauge +go_gc_stack_starting_size_bytes 4096 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 151 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.23.4"} 1 +# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 8.3523288e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 5.937322571024e+12 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 9.35076e+06 +# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 5.2351772989e+10 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 5.450544e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 8.3523288e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 1.23691008e+08 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 9.56416e+07 +# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 580217 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 1.05897984e+08 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 2.19332608e+08 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.738949372347471e+09 +# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 5.2352353206e+10 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 1200 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 15600 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 1.21232e+06 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 2.13792e+06 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 1.0365948e+08 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 803696 +# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 2.818048e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 2.818048e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 2.39909176e+08 +# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads +# TYPE go_sched_gomaxprocs_threads gauge +go_sched_gomaxprocs_threads 1 +# HELP go_sched_goroutines_goroutines Count of live goroutines. Sourced from /sched/goroutines:goroutines +# TYPE go_sched_goroutines_goroutines gauge +go_sched_goroutines_goroutines 151 +# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. Bucket counts increase monotonically. Sourced from /sched/latencies:seconds +# TYPE go_sched_latencies_seconds histogram +go_sched_latencies_seconds_bucket{le="6.399999999999999e-08"} 5.0101035e+07 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-07"} 7.4291762e+07 +go_sched_latencies_seconds_bucket{le="7.167999999999999e-06"} 8.1386236e+07 +go_sched_latencies_seconds_bucket{le="8.191999999999999e-05"} 8.2116161e+07 +go_sched_latencies_seconds_bucket{le="0.0009175039999999999"} 8.2802009e+07 +go_sched_latencies_seconds_bucket{le="0.010485759999999998"} 8.6274195e+07 +go_sched_latencies_seconds_bucket{le="0.11744051199999998"} 8.6724074e+07 +go_sched_latencies_seconds_bucket{le="+Inf"} 8.6726596e+07 +go_sched_latencies_seconds_sum 8266.758178496 +go_sched_latencies_seconds_count 8.6726596e+07 +# HELP go_sched_pauses_stopping_gc_seconds Distribution of individual GC-related stop-the-world stopping latencies. This is the time it takes from deciding to stop the world until all Ps are stopped. This is a subset of the total GC-related stop-the-world time (/sched/pauses/total/gc:seconds). During this time, some threads may be executing. Bucket counts increase monotonically. Sourced from /sched/pauses/stopping/gc:seconds +# TYPE go_sched_pauses_stopping_gc_seconds histogram +go_sched_pauses_stopping_gc_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_stopping_gc_seconds_bucket{le="6.399999999999999e-07"} 236272 +go_sched_pauses_stopping_gc_seconds_bucket{le="7.167999999999999e-06"} 380799 +go_sched_pauses_stopping_gc_seconds_bucket{le="8.191999999999999e-05"} 381741 +go_sched_pauses_stopping_gc_seconds_bucket{le="0.0009175039999999999"} 381807 +go_sched_pauses_stopping_gc_seconds_bucket{le="0.010485759999999998"} 381862 +go_sched_pauses_stopping_gc_seconds_bucket{le="0.11744051199999998"} 381864 +go_sched_pauses_stopping_gc_seconds_bucket{le="+Inf"} 381864 +go_sched_pauses_stopping_gc_seconds_sum 0.191211904 +go_sched_pauses_stopping_gc_seconds_count 381864 +# HELP go_sched_pauses_stopping_other_seconds Distribution of individual non-GC-related stop-the-world stopping latencies. This is the time it takes from deciding to stop the world until all Ps are stopped. This is a subset of the total non-GC-related stop-the-world time (/sched/pauses/total/other:seconds). During this time, some threads may be executing. Bucket counts increase monotonically. Sourced from /sched/pauses/stopping/other:seconds +# TYPE go_sched_pauses_stopping_other_seconds histogram +go_sched_pauses_stopping_other_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="6.399999999999999e-07"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="7.167999999999999e-06"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="8.191999999999999e-05"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="0.0009175039999999999"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="0.010485759999999998"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="0.11744051199999998"} 0 +go_sched_pauses_stopping_other_seconds_bucket{le="+Inf"} 0 +go_sched_pauses_stopping_other_seconds_sum 0 +go_sched_pauses_stopping_other_seconds_count 0 +# HELP go_sched_pauses_total_gc_seconds Distribution of individual GC-related stop-the-world pause latencies. This is the time from deciding to stop the world until the world is started again. Some of this time is spent getting all threads to stop (this is measured directly in /sched/pauses/stopping/gc:seconds), during which some threads may still be running. Bucket counts increase monotonically. Sourced from /sched/pauses/total/gc:seconds +# TYPE go_sched_pauses_total_gc_seconds histogram +go_sched_pauses_total_gc_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_total_gc_seconds_bucket{le="6.399999999999999e-07"} 0 +go_sched_pauses_total_gc_seconds_bucket{le="7.167999999999999e-06"} 137212 +go_sched_pauses_total_gc_seconds_bucket{le="8.191999999999999e-05"} 208425 +go_sched_pauses_total_gc_seconds_bucket{le="0.0009175039999999999"} 376121 +go_sched_pauses_total_gc_seconds_bucket{le="0.010485759999999998"} 381798 +go_sched_pauses_total_gc_seconds_bucket{le="0.11744051199999998"} 381863 +go_sched_pauses_total_gc_seconds_bucket{le="+Inf"} 381864 +go_sched_pauses_total_gc_seconds_sum 20.343611904000003 +go_sched_pauses_total_gc_seconds_count 381864 +# HELP go_sched_pauses_total_other_seconds Distribution of individual non-GC-related stop-the-world pause latencies. This is the time from deciding to stop the world until the world is started again. Some of this time is spent getting all threads to stop (measured directly in /sched/pauses/stopping/other:seconds). Bucket counts increase monotonically. Sourced from /sched/pauses/total/other:seconds +# TYPE go_sched_pauses_total_other_seconds histogram +go_sched_pauses_total_other_seconds_bucket{le="6.399999999999999e-08"} 0 +go_sched_pauses_total_other_seconds_bucket{le="6.399999999999999e-07"} 0 +go_sched_pauses_total_other_seconds_bucket{le="7.167999999999999e-06"} 0 +go_sched_pauses_total_other_seconds_bucket{le="8.191999999999999e-05"} 0 +go_sched_pauses_total_other_seconds_bucket{le="0.0009175039999999999"} 0 +go_sched_pauses_total_other_seconds_bucket{le="0.010485759999999998"} 0 +go_sched_pauses_total_other_seconds_bucket{le="0.11744051199999998"} 0 +go_sched_pauses_total_other_seconds_bucket{le="+Inf"} 0 +go_sched_pauses_total_other_seconds_sum 0 +go_sched_pauses_total_other_seconds_count 0 +# HELP go_sync_mutex_wait_total_seconds_total Approximate cumulative time goroutines have spent blocked on a sync.Mutex, sync.RWMutex, or runtime-internal lock. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data. Sourced from /sync/mutex/wait/total:seconds +# TYPE go_sync_mutex_wait_total_seconds_total counter +go_sync_mutex_wait_total_seconds_total 628.29966272 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 10 +# HELP net_conntrack_dialer_conn_attempted_total Total number of connections attempted by the given dialer a given name. +# TYPE net_conntrack_dialer_conn_attempted_total counter +net_conntrack_dialer_conn_attempted_total{dialer_name="alertmanager"} 2 +net_conntrack_dialer_conn_attempted_total{dialer_name="blackbox"} 1 +net_conntrack_dialer_conn_attempted_total{dialer_name="caddy"} 1 +net_conntrack_dialer_conn_attempted_total{dialer_name="cadvisor"} 2 +net_conntrack_dialer_conn_attempted_total{dialer_name="default"} 0 +net_conntrack_dialer_conn_attempted_total{dialer_name="grafana"} 7 +net_conntrack_dialer_conn_attempted_total{dialer_name="node"} 5 +net_conntrack_dialer_conn_attempted_total{dialer_name="prometheus"} 1 +net_conntrack_dialer_conn_attempted_total{dialer_name="random"} 4 +# HELP net_conntrack_dialer_conn_closed_total Total number of connections closed which originated from the dialer of a given name. +# TYPE net_conntrack_dialer_conn_closed_total counter +net_conntrack_dialer_conn_closed_total{dialer_name="alertmanager"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="blackbox"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="caddy"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="cadvisor"} 1 +net_conntrack_dialer_conn_closed_total{dialer_name="default"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="grafana"} 4 +net_conntrack_dialer_conn_closed_total{dialer_name="node"} 4 +net_conntrack_dialer_conn_closed_total{dialer_name="prometheus"} 0 +net_conntrack_dialer_conn_closed_total{dialer_name="random"} 0 +# HELP net_conntrack_dialer_conn_established_total Total number of connections successfully established by the given dialer a given name. +# TYPE net_conntrack_dialer_conn_established_total counter +net_conntrack_dialer_conn_established_total{dialer_name="alertmanager"} 2 +net_conntrack_dialer_conn_established_total{dialer_name="blackbox"} 1 +net_conntrack_dialer_conn_established_total{dialer_name="caddy"} 1 +net_conntrack_dialer_conn_established_total{dialer_name="cadvisor"} 2 +net_conntrack_dialer_conn_established_total{dialer_name="default"} 0 +net_conntrack_dialer_conn_established_total{dialer_name="grafana"} 5 +net_conntrack_dialer_conn_established_total{dialer_name="node"} 5 +net_conntrack_dialer_conn_established_total{dialer_name="prometheus"} 1 +net_conntrack_dialer_conn_established_total{dialer_name="random"} 4 +# HELP net_conntrack_dialer_conn_failed_total Total number of connections failed to dial by the dialer a given name. +# TYPE net_conntrack_dialer_conn_failed_total counter +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="blackbox",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="caddy",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="cadvisor",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="grafana",reason="unknown"} 2 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="node",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="unknown"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="refused"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="resolution"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="timeout"} 0 +net_conntrack_dialer_conn_failed_total{dialer_name="random",reason="unknown"} 0 +# HELP net_conntrack_listener_conn_accepted_total Total number of connections opened to the listener of a given name. +# TYPE net_conntrack_listener_conn_accepted_total counter +net_conntrack_listener_conn_accepted_total{listener_name="http"} 561771 +# HELP net_conntrack_listener_conn_closed_total Total number of connections closed that were made to the listener of a given name. +# TYPE net_conntrack_listener_conn_closed_total counter +net_conntrack_listener_conn_closed_total{listener_name="http"} 561723 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 96501.42 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 65000 +# HELP process_network_receive_bytes_total Number of bytes received by the process over the network. +# TYPE process_network_receive_bytes_total counter +process_network_receive_bytes_total 9.89686846606e+11 +# HELP process_network_transmit_bytes_total Number of bytes sent by the process over the network. +# TYPE process_network_transmit_bytes_total counter +process_network_transmit_bytes_total 3.743928187168e+12 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 124 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 1.72822528e+08 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.73806369023e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 4.608667648e+09 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes 1.8446744073709552e+19 +# HELP prometheus_api_notification_active_subscribers The current number of active notification subscribers. +# TYPE prometheus_api_notification_active_subscribers gauge +prometheus_api_notification_active_subscribers 2 +# HELP prometheus_api_notification_updates_dropped_total Total number of notification updates dropped. +# TYPE prometheus_api_notification_updates_dropped_total counter +prometheus_api_notification_updates_dropped_total 0 +# HELP prometheus_api_notification_updates_sent_total Total number of notification updates sent. +# TYPE prometheus_api_notification_updates_sent_total counter +prometheus_api_notification_updates_sent_total 5 +# HELP prometheus_api_remote_read_queries The current number of remote read queries being executed or waiting. +# TYPE prometheus_api_remote_read_queries gauge +prometheus_api_remote_read_queries 0 +# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which prometheus was built, and the goos and goarch for the build. +# TYPE prometheus_build_info gauge +prometheus_build_info{branch="HEAD",goarch="amd64",goos="linux",goversion="go1.23.4",revision="7086161a93b262aa0949dbf2aba15a5a7b13e0a3",tags="netgo,builtinassets,stringlabels",version="3.1.0"} 1 +# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. +# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge +prometheus_config_last_reload_success_timestamp_seconds 1.7380636976181264e+09 +# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful. +# TYPE prometheus_config_last_reload_successful gauge +prometheus_config_last_reload_successful 1 +# HELP prometheus_engine_queries The current number of queries being executed or waiting. +# TYPE prometheus_engine_queries gauge +prometheus_engine_queries 0 +# HELP prometheus_engine_queries_concurrent_max The max number of concurrent queries. +# TYPE prometheus_engine_queries_concurrent_max gauge +prometheus_engine_queries_concurrent_max 20 +# HELP prometheus_engine_query_duration_seconds Query timings +# TYPE prometheus_engine_query_duration_seconds summary +prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.5"} 8.075e-05 +prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.9"} 0.000917449 +prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.99"} 0.009315769 +prometheus_engine_query_duration_seconds_sum{slice="inner_eval"} 12506.67007997419 +prometheus_engine_query_duration_seconds_count{slice="inner_eval"} 8.714071e+06 +prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.5"} 2.228e-05 +prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.9"} 6.2819e-05 +prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.99"} 0.000399637 +prometheus_engine_query_duration_seconds_sum{slice="prepare_time"} 490.326247638047 +prometheus_engine_query_duration_seconds_count{slice="prepare_time"} 8.714071e+06 +prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.5"} 4.628e-06 +prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.9"} 1.6082e-05 +prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.99"} 4.1174e-05 +prometheus_engine_query_duration_seconds_sum{slice="queue_time"} 8720.662071224393 +prometheus_engine_query_duration_seconds_count{slice="queue_time"} 1.7428526e+07 +prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.5"} 7.83e-07 +prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.9"} 1.994e-06 +prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.99"} 1.458e-05 +prometheus_engine_query_duration_seconds_sum{slice="result_sort"} 2.337879348999977 +prometheus_engine_query_duration_seconds_count{slice="result_sort"} 1.208154e+06 +# HELP prometheus_engine_query_log_enabled State of the query log. +# TYPE prometheus_engine_query_log_enabled gauge +prometheus_engine_query_log_enabled 0 +# HELP prometheus_engine_query_log_failures_total The number of query log failures. +# TYPE prometheus_engine_query_log_failures_total counter +prometheus_engine_query_log_failures_total 0 +# HELP prometheus_engine_query_samples_total The total number of samples loaded by all queries. +# TYPE prometheus_engine_query_samples_total counter +prometheus_engine_query_samples_total 9.1183747239e+10 +# HELP prometheus_http_request_duration_seconds Histogram of latencies for HTTP requests. +# TYPE prometheus_http_request_duration_seconds histogram +prometheus_http_request_duration_seconds_bucket{handler="/",le="0.1"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="0.2"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="0.4"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="1"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="3"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="8"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="20"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="60"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="120"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"} 688 +prometheus_http_request_duration_seconds_sum{handler="/"} 0.026826932000000022 +prometheus_http_request_duration_seconds_count{handler="/"} 688 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.1"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.2"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.4"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="1"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="3"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="8"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="20"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="60"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="120"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="+Inf"} 29524 +prometheus_http_request_duration_seconds_sum{handler="/-/healthy"} 0.7400570460000002 +prometheus_http_request_duration_seconds_count{handler="/-/healthy"} 29524 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.2"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.4"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="3"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="8"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="20"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="60"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="120"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="+Inf"} 49 +prometheus_http_request_duration_seconds_sum{handler="/-/ready"} 0.005040123000000002 +prometheus_http_request_duration_seconds_count{handler="/-/ready"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.1"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.2"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.4"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="1"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="3"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="8"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="20"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="60"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="120"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="+Inf"} 48 +prometheus_http_request_duration_seconds_sum{handler="/alerts"} 0.011801602999999999 +prometheus_http_request_duration_seconds_count{handler="/alerts"} 48 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="0.1"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="0.2"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="0.4"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="1"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="3"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="8"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="20"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="60"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="120"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/*path",le="+Inf"} 27 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/*path"} 0.001724389 +prometheus_http_request_duration_seconds_count{handler="/api/v1/*path"} 27 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="0.1"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="0.2"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="0.4"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="1"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="3"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="8"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="20"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="60"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="120"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alertmanagers",le="+Inf"} 8 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/alertmanagers"} 0.042492975999999995 +prometheus_http_request_duration_seconds_count{handler="/api/v1/alertmanagers"} 8 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.1"} 14630 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.2"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="0.4"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="1"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="3"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="8"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="20"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="60"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="120"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/alerts",le="+Inf"} 14635 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/alerts"} 19.028669391999912 +prometheus_http_request_duration_seconds_count{handler="/api/v1/alerts"} 14635 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="0.1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="0.4"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="3"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="8"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="20"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="60"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="120"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/format_query",le="+Inf"} 4 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/format_query"} 0.023786675 +prometheus_http_request_duration_seconds_count{handler="/api/v1/format_query"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.1"} 17773 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.2"} 17860 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.4"} 17939 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="1"} 17970 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="3"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="8"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="20"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="60"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="120"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 17971 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/label/:name/values"} 99.95326355699925 +prometheus_http_request_duration_seconds_count{handler="/api/v1/label/:name/values"} 17971 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.1"} 4905 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.2"} 4912 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.4"} 4916 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="1"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="3"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="8"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="20"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="60"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="120"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="+Inf"} 4921 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/labels"} 12.963155722000025 +prometheus_http_request_duration_seconds_count{handler="/api/v1/labels"} 4921 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.1"} 2073 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.2"} 2091 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="0.4"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="1"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="3"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="8"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="20"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="60"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="120"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/metadata",le="+Inf"} 2093 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/metadata"} 16.90794258600001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/metadata"} 2093 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="0.1"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="0.2"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="0.4"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="1"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="3"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="8"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="20"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="60"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="120"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications",le="+Inf"} 12 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/notifications"} 0.029363403 +prometheus_http_request_duration_seconds_count{handler="/api/v1/notifications"} 12 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="0.1"} 3 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="0.4"} 11 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="1"} 29 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="3"} 79 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="8"} 116 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="20"} 187 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="60"} 224 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="120"} 234 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/notifications/live",le="+Inf"} 263 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/notifications/live"} 85150.78366682903 +prometheus_http_request_duration_seconds_count{handler="/api/v1/notifications/live"} 263 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="0.1"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="0.2"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="0.4"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="1"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="3"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="8"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="20"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="60"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="120"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/parse_query",le="+Inf"} 265 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/parse_query"} 0.7265021480000003 +prometheus_http_request_duration_seconds_count{handler="/api/v1/parse_query"} 265 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.1"} 3051 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.2"} 3082 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.4"} 3173 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="1"} 3213 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="3"} 3216 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="8"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="20"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="60"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="120"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="+Inf"} 3217 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/query"} 70.23571750499987 +prometheus_http_request_duration_seconds_count{handler="/api/v1/query"} 3217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.1"} 831 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.2"} 839 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="0.4"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="1"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="3"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="8"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="20"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="60"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="120"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_exemplars",le="+Inf"} 841 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/query_exemplars"} 3.6569132899999968 +prometheus_http_request_duration_seconds_count{handler="/api/v1/query_exemplars"} 841 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.1"} 116338 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.2"} 116784 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.4"} 117924 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="1"} 118147 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="3"} 118155 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="8"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="20"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="60"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="120"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="+Inf"} 118157 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/query_range"} 832.1240439999854 +prometheus_http_request_duration_seconds_count{handler="/api/v1/query_range"} 118157 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="0.1"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="0.2"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="0.4"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="1"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="3"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="8"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="20"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="60"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="120"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/read",le="+Inf"} 16 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/read"} 0.07308876400000001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/read"} 16 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.1"} 88415 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.2"} 88627 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.4"} 88659 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="1"} 88661 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="3"} 88661 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="8"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="20"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="60"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="120"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="+Inf"} 88662 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/rules"} 629.5237672670012 +prometheus_http_request_duration_seconds_count{handler="/api/v1/rules"} 88662 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="0.1"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="0.2"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="0.4"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="1"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="3"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="8"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="20"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="60"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="120"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/scrape_pools",le="+Inf"} 142 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/scrape_pools"} 0.17501777799999996 +prometheus_http_request_duration_seconds_count{handler="/api/v1/scrape_pools"} 142 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.1"} 1217 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.2"} 1236 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.4"} 1244 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="1"} 1245 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="3"} 1245 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="8"} 1245 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="20"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="60"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="120"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="+Inf"} 1246 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/series"} 19.03228310300001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/series"} 1246 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.1"} 4412 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.2"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="0.4"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="1"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="3"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="8"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="20"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="60"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="120"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/buildinfo",le="+Inf"} 4413 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/buildinfo"} 5.222974727000008 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/buildinfo"} 4413 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.1"} 84669 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.2"} 84716 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.4"} 84721 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="1"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="3"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="8"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="20"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="60"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="120"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="+Inf"} 84723 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/config"} 228.93575751199964 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/config"} 84723 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="0.1"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="0.2"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="0.4"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="1"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="3"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="8"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="20"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="60"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="120"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/flags",le="+Inf"} 32 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/flags"} 0.093302815 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/flags"} 32 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.1"} 862 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.2"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="0.4"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="1"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="3"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="8"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="20"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="60"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="120"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/runtimeinfo",le="+Inf"} 863 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/runtimeinfo"} 8.009312817000001 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/runtimeinfo"} 863 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="0.1"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="0.2"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="0.4"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="1"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="3"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="8"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="20"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="60"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="120"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/tsdb",le="+Inf"} 94 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/tsdb"} 1.3378732679999994 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/tsdb"} 94 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="0.1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="0.2"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="0.4"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="3"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="8"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="20"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="60"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="120"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/walreplay",le="+Inf"} 49 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/walreplay"} 0.034643267 +prometheus_http_request_duration_seconds_count{handler="/api/v1/status/walreplay"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.1"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.2"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="0.4"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="1"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="3"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="8"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="20"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="60"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="120"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets",le="+Inf"} 191 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/targets"} 0.5432347889999999 +prometheus_http_request_duration_seconds_count{handler="/api/v1/targets"} 191 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="0.1"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="0.2"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="0.4"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="1"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="3"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="8"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="20"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="60"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="120"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/api/v1/targets/metadata",le="+Inf"} 18 +prometheus_http_request_duration_seconds_sum{handler="/api/v1/targets/metadata"} 0.043789973 +prometheus_http_request_duration_seconds_count{handler="/api/v1/targets/metadata"} 18 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.1"} 385 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.2"} 481 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="0.4"} 528 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="1"} 612 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="3"} 634 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="8"} 640 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="20"} 641 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="60"} 642 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="120"} 643 +prometheus_http_request_duration_seconds_bucket{handler="/assets/*filepath",le="+Inf"} 643 +prometheus_http_request_duration_seconds_sum{handler="/assets/*filepath"} 280.0660388799997 +prometheus_http_request_duration_seconds_count{handler="/assets/*filepath"} 643 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="0.1"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="0.2"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="0.4"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="1"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="3"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="8"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="20"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="60"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="120"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/classic/static/*filepath",le="+Inf"} 103 +prometheus_http_request_duration_seconds_sum{handler="/classic/static/*filepath"} 0.006113046000000001 +prometheus_http_request_duration_seconds_count{handler="/classic/static/*filepath"} 103 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.1"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.2"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.4"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="1"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="3"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="8"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="20"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="60"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="120"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/config",le="+Inf"} 13 +prometheus_http_request_duration_seconds_sum{handler="/config"} 0.0024490289999999997 +prometheus_http_request_duration_seconds_count{handler="/config"} 13 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="0.1"} 33 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="0.2"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="0.4"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="1"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="3"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="8"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="20"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="60"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="120"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/consoles/*filepath",le="+Inf"} 34 +prometheus_http_request_duration_seconds_sum{handler="/consoles/*filepath"} 0.5689515199999999 +prometheus_http_request_duration_seconds_count{handler="/consoles/*filepath"} 34 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.4"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="3"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="8"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="20"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="60"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="120"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="+Inf"} 4 +prometheus_http_request_duration_seconds_sum{handler="/debug/*subpath"} 0.086499352 +prometheus_http_request_duration_seconds_count{handler="/debug/*subpath"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="0.1"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="0.2"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="0.4"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="1"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="3"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="8"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="20"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="60"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="120"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.ico",le="+Inf"} 177 +prometheus_http_request_duration_seconds_sum{handler="/favicon.ico"} 0.05591882500000002 +prometheus_http_request_duration_seconds_count{handler="/favicon.ico"} 177 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.1"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.2"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="0.4"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="1"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="3"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="8"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="20"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="60"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="120"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/favicon.svg",le="+Inf"} 770 +prometheus_http_request_duration_seconds_sum{handler="/favicon.svg"} 0.3058455699999999 +prometheus_http_request_duration_seconds_count{handler="/favicon.svg"} 770 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="0.1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="0.2"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="0.4"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="1"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="3"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="8"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="20"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="60"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="120"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/federate",le="+Inf"} 4 +prometheus_http_request_duration_seconds_sum{handler="/federate"} 0.05996980699999999 +prometheus_http_request_duration_seconds_count{handler="/federate"} 4 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.1"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.2"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.4"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="1"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="3"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="8"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="20"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="60"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="120"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/flags",le="+Inf"} 9 +prometheus_http_request_duration_seconds_sum{handler="/flags"} 0.001586781 +prometheus_http_request_duration_seconds_count{handler="/flags"} 9 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.1"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.2"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.4"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="1"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="3"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="8"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="20"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="60"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="120"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/graph",le="+Inf"} 751 +prometheus_http_request_duration_seconds_sum{handler="/graph"} 0.032583203000000005 +prometheus_http_request_duration_seconds_count{handler="/graph"} 751 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.1"} 3.988654e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.2"} 4.052461e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.4"} 4.057771e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="1"} 4.058809e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="3"} 4.059071e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="8"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="20"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="60"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="120"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="+Inf"} 4.059092e+06 +prometheus_http_request_duration_seconds_sum{handler="/metrics"} 107584.12561354278 +prometheus_http_request_duration_seconds_count{handler="/metrics"} 4.059092e+06 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.1"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.2"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="0.4"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="1"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="3"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="8"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="20"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="60"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="120"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/query",le="+Inf"} 1774 +prometheus_http_request_duration_seconds_sum{handler="/query"} 0.5220201759999993 +prometheus_http_request_duration_seconds_count{handler="/query"} 1774 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.1"} 8672 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.2"} 8672 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.4"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="1"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="3"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="8"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="20"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="60"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="120"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/rules",le="+Inf"} 8673 +prometheus_http_request_duration_seconds_sum{handler="/rules"} 2.776021043000005 +prometheus_http_request_duration_seconds_count{handler="/rules"} 8673 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.1"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.2"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.4"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="1"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="3"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="8"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="20"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="60"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="120"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="+Inf"} 20 +prometheus_http_request_duration_seconds_sum{handler="/service-discovery"} 0.004057062 +prometheus_http_request_duration_seconds_count{handler="/service-discovery"} 20 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.1"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.2"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.4"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="1"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="3"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="8"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="20"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="60"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="120"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/status",le="+Inf"} 46 +prometheus_http_request_duration_seconds_sum{handler="/status"} 0.010107473 +prometheus_http_request_duration_seconds_count{handler="/status"} 46 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.1"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.2"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.4"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="1"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="3"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="8"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="20"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="60"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="120"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/targets",le="+Inf"} 39 +prometheus_http_request_duration_seconds_sum{handler="/targets"} 0.009001180000000001 +prometheus_http_request_duration_seconds_count{handler="/targets"} 39 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="0.1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="0.2"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="0.4"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="1"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="3"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="8"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="20"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="60"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="120"} 49 +prometheus_http_request_duration_seconds_bucket{handler="/tsdb-status",le="+Inf"} 49 +prometheus_http_request_duration_seconds_sum{handler="/tsdb-status"} 0.018204165 +prometheus_http_request_duration_seconds_count{handler="/tsdb-status"} 49 +# HELP prometheus_http_requests_total Counter of HTTP requests. +# TYPE prometheus_http_requests_total counter +prometheus_http_requests_total{code="200",handler="/"} 0 +prometheus_http_requests_total{code="200",handler="/-/healthy"} 29524 +prometheus_http_requests_total{code="200",handler="/-/quit"} 0 +prometheus_http_requests_total{code="200",handler="/-/ready"} 49 +prometheus_http_requests_total{code="200",handler="/-/reload"} 0 +prometheus_http_requests_total{code="200",handler="/alertmanager-discovery"} 0 +prometheus_http_requests_total{code="200",handler="/alerts"} 48 +prometheus_http_requests_total{code="200",handler="/api/v1/*path"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/clean_tombstones"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/delete_series"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/admin/tsdb/snapshot"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/alertmanagers"} 8 +prometheus_http_requests_total{code="200",handler="/api/v1/alerts"} 14635 +prometheus_http_requests_total{code="200",handler="/api/v1/format_query"} 3 +prometheus_http_requests_total{code="200",handler="/api/v1/label/:name/values"} 14679 +prometheus_http_requests_total{code="200",handler="/api/v1/labels"} 4915 +prometheus_http_requests_total{code="200",handler="/api/v1/metadata"} 2093 +prometheus_http_requests_total{code="200",handler="/api/v1/notifications"} 12 +prometheus_http_requests_total{code="200",handler="/api/v1/notifications/live"} 4529 +prometheus_http_requests_total{code="200",handler="/api/v1/otlp/v1/metrics"} 0 +prometheus_http_requests_total{code="200",handler="/api/v1/parse_query"} 247 +prometheus_http_requests_total{code="200",handler="/api/v1/query"} 326411 +prometheus_http_requests_total{code="200",handler="/api/v1/query_exemplars"} 841 +prometheus_http_requests_total{code="200",handler="/api/v1/query_range"} 1.183734e+06 +prometheus_http_requests_total{code="200",handler="/api/v1/read"} 16 +prometheus_http_requests_total{code="200",handler="/api/v1/rules"} 88658 +prometheus_http_requests_total{code="200",handler="/api/v1/scrape_pools"} 142 +prometheus_http_requests_total{code="200",handler="/api/v1/series"} 1235 +prometheus_http_requests_total{code="200",handler="/api/v1/status/buildinfo"} 4413 +prometheus_http_requests_total{code="200",handler="/api/v1/status/config"} 84722 +prometheus_http_requests_total{code="200",handler="/api/v1/status/flags"} 32 +prometheus_http_requests_total{code="200",handler="/api/v1/status/runtimeinfo"} 863 +prometheus_http_requests_total{code="200",handler="/api/v1/status/tsdb"} 94 +prometheus_http_requests_total{code="200",handler="/api/v1/status/walreplay"} 49 +prometheus_http_requests_total{code="200",handler="/api/v1/targets"} 191 +prometheus_http_requests_total{code="200",handler="/api/v1/targets/metadata"} 16 +prometheus_http_requests_total{code="200",handler="/api/v1/write"} 0 +prometheus_http_requests_total{code="200",handler="/assets/*filepath"} 2213 +prometheus_http_requests_total{code="200",handler="/classic/static/*filepath"} 0 +prometheus_http_requests_total{code="200",handler="/config"} 13 +prometheus_http_requests_total{code="200",handler="/consoles/*filepath"} 17 +prometheus_http_requests_total{code="200",handler="/debug/*subpath"} 3 +prometheus_http_requests_total{code="200",handler="/favicon.ico"} 0 +prometheus_http_requests_total{code="200",handler="/favicon.svg"} 769 +prometheus_http_requests_total{code="200",handler="/federate"} 4 +prometheus_http_requests_total{code="200",handler="/flags"} 9 +prometheus_http_requests_total{code="200",handler="/graph"} 0 +prometheus_http_requests_total{code="200",handler="/manifest.json"} 0 +prometheus_http_requests_total{code="200",handler="/metrics"} 4.059092e+06 +prometheus_http_requests_total{code="200",handler="/query"} 1774 +prometheus_http_requests_total{code="200",handler="/rules"} 8673 +prometheus_http_requests_total{code="200",handler="/service-discovery"} 20 +prometheus_http_requests_total{code="200",handler="/status"} 46 +prometheus_http_requests_total{code="200",handler="/targets"} 39 +prometheus_http_requests_total{code="200",handler="/tsdb-status"} 49 +prometheus_http_requests_total{code="200",handler="/version"} 0 +prometheus_http_requests_total{code="204",handler="/api/v1/*path"} 27 +prometheus_http_requests_total{code="204",handler="/api/v1/notifications/live"} 5 +prometheus_http_requests_total{code="301",handler="/debug/*subpath"} 1 +prometheus_http_requests_total{code="302",handler="/"} 688 +prometheus_http_requests_total{code="302",handler="/graph"} 751 +prometheus_http_requests_total{code="400",handler="/api/v1/format_query"} 1 +prometheus_http_requests_total{code="400",handler="/api/v1/label/:name/values"} 3292 +prometheus_http_requests_total{code="400",handler="/api/v1/labels"} 6 +prometheus_http_requests_total{code="400",handler="/api/v1/parse_query"} 18 +prometheus_http_requests_total{code="400",handler="/api/v1/query"} 155 +prometheus_http_requests_total{code="400",handler="/api/v1/query_range"} 263 +prometheus_http_requests_total{code="400",handler="/api/v1/rules"} 4 +prometheus_http_requests_total{code="400",handler="/api/v1/series"} 11 +prometheus_http_requests_total{code="400",handler="/api/v1/targets/metadata"} 2 +prometheus_http_requests_total{code="404",handler="/assets/*filepath"} 12 +prometheus_http_requests_total{code="404",handler="/classic/static/*filepath"} 103 +prometheus_http_requests_total{code="404",handler="/consoles/*filepath"} 17 +prometheus_http_requests_total{code="404",handler="/favicon.ico"} 177 +prometheus_http_requests_total{code="416",handler="/favicon.svg"} 1 +prometheus_http_requests_total{code="422",handler="/api/v1/query"} 114 +prometheus_http_requests_total{code="422",handler="/api/v1/query_range"} 31 +prometheus_http_requests_total{code="499",handler="/api/v1/query"} 7 +prometheus_http_requests_total{code="499",handler="/api/v1/query_range"} 20 +prometheus_http_requests_total{code="503",handler="/api/v1/query"} 7 +prometheus_http_requests_total{code="503",handler="/api/v1/query_range"} 4 +prometheus_http_requests_total{code="503",handler="/api/v1/status/config"} 1 +# HELP prometheus_http_response_size_bytes Histogram of response size for HTTP requests. +# TYPE prometheus_http_response_size_bytes histogram +prometheus_http_response_size_bytes_bucket{handler="/",le="100"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1000"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="10000"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="100000"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+06"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+07"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+08"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="1e+09"} 688 +prometheus_http_response_size_bytes_bucket{handler="/",le="+Inf"} 688 +prometheus_http_response_size_bytes_sum{handler="/"} 19952 +prometheus_http_response_size_bytes_count{handler="/"} 688 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="100"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1000"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="10000"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="100000"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+06"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+07"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+08"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+09"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="+Inf"} 29524 +prometheus_http_response_size_bytes_sum{handler="/-/healthy"} 885720 +prometheus_http_response_size_bytes_count{handler="/-/healthy"} 29524 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="100"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="10000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="100000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+06"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+07"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+08"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+09"} 49 +prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="+Inf"} 49 +prometheus_http_response_size_bytes_sum{handler="/-/ready"} 1372 +prometheus_http_response_size_bytes_count{handler="/-/ready"} 49 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="10000"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="100000"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+06"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+07"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+08"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+09"} 48 +prometheus_http_response_size_bytes_bucket{handler="/alerts",le="+Inf"} 48 +prometheus_http_response_size_bytes_sum{handler="/alerts"} 84096 +prometheus_http_response_size_bytes_count{handler="/alerts"} 48 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="100"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1000"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="10000"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="100000"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+06"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+07"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+08"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="1e+09"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/*path",le="+Inf"} 27 +prometheus_http_response_size_bytes_sum{handler="/api/v1/*path"} 0 +prometheus_http_response_size_bytes_count{handler="/api/v1/*path"} 27 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1000"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="10000"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="100000"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+06"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+07"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+08"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="1e+09"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alertmanagers",le="+Inf"} 8 +prometheus_http_response_size_bytes_sum{handler="/api/v1/alertmanagers"} 1064 +prometheus_http_response_size_bytes_count{handler="/api/v1/alertmanagers"} 8 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1000"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="10000"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="100000"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+06"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+07"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+08"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="1e+09"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/alerts",le="+Inf"} 14635 +prometheus_http_response_size_bytes_sum{handler="/api/v1/alerts"} 1.0260926e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/alerts"} 14635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="10000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="100000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+06"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+07"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+08"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="1e+09"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/format_query",le="+Inf"} 4 +prometheus_http_response_size_bytes_sum{handler="/api/v1/format_query"} 495 +prometheus_http_response_size_bytes_count{handler="/api/v1/format_query"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100"} 8167 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1000"} 15939 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="10000"} 17901 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100000"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+06"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+07"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+08"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+09"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 17971 +prometheus_http_response_size_bytes_sum{handler="/api/v1/label/:name/values"} 2.0009454e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/label/:name/values"} 17971 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="100"} 102 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1000"} 4908 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="10000"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="100000"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+06"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+07"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+08"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+09"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="+Inf"} 4921 +prometheus_http_response_size_bytes_sum{handler="/api/v1/labels"} 3.16046e+06 +prometheus_http_response_size_bytes_count{handler="/api/v1/labels"} 4921 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="100"} 170 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1000"} 1368 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="10000"} 1368 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="100000"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+06"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+07"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+08"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="1e+09"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/metadata",le="+Inf"} 2093 +prometheus_http_response_size_bytes_sum{handler="/api/v1/metadata"} 1.5950362e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/metadata"} 2093 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="100"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="10000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="100000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+06"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+07"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+08"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="1e+09"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications",le="+Inf"} 12 +prometheus_http_response_size_bytes_sum{handler="/api/v1/notifications"} 360 +prometheus_http_response_size_bytes_count{handler="/api/v1/notifications"} 12 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="100"} 4529 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1000"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="10000"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="100000"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+06"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+07"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+08"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="1e+09"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/notifications/live",le="+Inf"} 4534 +prometheus_http_response_size_bytes_sum{handler="/api/v1/notifications/live"} 1365 +prometheus_http_response_size_bytes_count{handler="/api/v1/notifications/live"} 4534 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1000"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="10000"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="100000"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+06"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+07"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+08"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="1e+09"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/parse_query",le="+Inf"} 265 +prometheus_http_response_size_bytes_sum{handler="/api/v1/parse_query"} 67620 +prometheus_http_response_size_bytes_count{handler="/api/v1/parse_query"} 265 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100"} 25147 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1000"} 325005 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="10000"} 325143 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100000"} 326635 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+06"} 326692 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+07"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+08"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+09"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="+Inf"} 326694 +prometheus_http_response_size_bytes_sum{handler="/api/v1/query"} 2.34481756e+08 +prometheus_http_response_size_bytes_count{handler="/api/v1/query"} 326694 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="100"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1000"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="10000"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="100000"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+06"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+07"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+08"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="1e+09"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_exemplars",le="+Inf"} 841 +prometheus_http_response_size_bytes_sum{handler="/api/v1/query_exemplars"} 50460 +prometheus_http_response_size_bytes_count{handler="/api/v1/query_exemplars"} 841 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100"} 69552 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1000"} 917165 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="10000"} 1.152103e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100000"} 1.183391e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+06"} 1.183621e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+07"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+08"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+09"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="+Inf"} 1.184052e+06 +prometheus_http_response_size_bytes_sum{handler="/api/v1/query_range"} 1.869092376e+09 +prometheus_http_response_size_bytes_count{handler="/api/v1/query_range"} 1.184052e+06 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="10000"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="100000"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+06"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+07"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+08"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="1e+09"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/read",le="+Inf"} 16 +prometheus_http_response_size_bytes_sum{handler="/api/v1/read"} 50800 +prometheus_http_response_size_bytes_count{handler="/api/v1/read"} 16 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="10000"} 88656 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="100000"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+06"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+07"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+08"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+09"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="+Inf"} 88662 +prometheus_http_response_size_bytes_sum{handler="/api/v1/rules"} 7.01925022e+08 +prometheus_http_response_size_bytes_count{handler="/api/v1/rules"} 88662 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="10000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="100000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+06"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+07"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+08"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="1e+09"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/scrape_pools",le="+Inf"} 142 +prometheus_http_response_size_bytes_sum{handler="/api/v1/scrape_pools"} 18318 +prometheus_http_response_size_bytes_count{handler="/api/v1/scrape_pools"} 142 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="100"} 1011 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1000"} 1233 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="10000"} 1244 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="100000"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+06"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+07"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+08"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+09"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="+Inf"} 1246 +prometheus_http_response_size_bytes_sum{handler="/api/v1/series"} 235829 +prometheus_http_response_size_bytes_count{handler="/api/v1/series"} 1246 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1000"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="10000"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="100000"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+06"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+07"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+08"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="1e+09"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/buildinfo",le="+Inf"} 4413 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/buildinfo"} 807579 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/buildinfo"} 4413 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1000"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="10000"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="100000"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+06"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+07"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+08"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+09"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="+Inf"} 84723 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/config"} 6.3710963e+07 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/config"} 84723 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1000"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="10000"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="100000"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+06"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+07"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+08"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="1e+09"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/flags",le="+Inf"} 32 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/flags"} 31968 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/flags"} 32 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1000"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="10000"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="100000"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+06"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+07"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+08"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="1e+09"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/runtimeinfo",le="+Inf"} 863 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/runtimeinfo"} 240400 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/runtimeinfo"} 863 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1000"} 92 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="10000"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="100000"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+06"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+07"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+08"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="1e+09"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/tsdb",le="+Inf"} 94 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/tsdb"} 66441 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/tsdb"} 94 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="100"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="10000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="100000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+06"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+07"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+08"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="1e+09"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/walreplay",le="+Inf"} 49 +prometheus_http_response_size_bytes_sum{handler="/api/v1/status/walreplay"} 3381 +prometheus_http_response_size_bytes_count{handler="/api/v1/status/walreplay"} 49 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1000"} 184 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="10000"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="100000"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+06"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+07"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+08"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="1e+09"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets",le="+Inf"} 191 +prometheus_http_response_size_bytes_sum{handler="/api/v1/targets"} 191150 +prometheus_http_response_size_bytes_count{handler="/api/v1/targets"} 191 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1000"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="10000"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="100000"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+06"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+07"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+08"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="1e+09"} 18 +prometheus_http_response_size_bytes_bucket{handler="/api/v1/targets/metadata",le="+Inf"} 18 +prometheus_http_response_size_bytes_sum{handler="/api/v1/targets/metadata"} 2736 +prometheus_http_response_size_bytes_count{handler="/api/v1/targets/metadata"} 18 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="100"} 12 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1000"} 12 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="10000"} 13 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="100000"} 142 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+06"} 958 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+07"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+08"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="1e+09"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/assets/*filepath",le="+Inf"} 2225 +prometheus_http_response_size_bytes_sum{handler="/assets/*filepath"} 3.525958804e+09 +prometheus_http_response_size_bytes_count{handler="/assets/*filepath"} 2225 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="100"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1000"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="10000"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="100000"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+06"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+07"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+08"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="1e+09"} 103 +prometheus_http_response_size_bytes_bucket{handler="/classic/static/*filepath",le="+Inf"} 103 +prometheus_http_response_size_bytes_sum{handler="/classic/static/*filepath"} 1957 +prometheus_http_response_size_bytes_count{handler="/classic/static/*filepath"} 103 +prometheus_http_response_size_bytes_bucket{handler="/config",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/config",le="10000"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="100000"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+06"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+07"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+08"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+09"} 13 +prometheus_http_response_size_bytes_bucket{handler="/config",le="+Inf"} 13 +prometheus_http_response_size_bytes_sum{handler="/config"} 22776 +prometheus_http_response_size_bytes_count{handler="/config"} 13 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="100"} 17 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1000"} 17 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="10000"} 31 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="100000"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+06"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+07"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+08"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="1e+09"} 34 +prometheus_http_response_size_bytes_bucket{handler="/consoles/*filepath",le="+Inf"} 34 +prometheus_http_response_size_bytes_sum{handler="/consoles/*filepath"} 175556 +prometheus_http_response_size_bytes_count{handler="/consoles/*filepath"} 34 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1000"} 1 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="10000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="100000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+06"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+07"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+08"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+09"} 4 +prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="+Inf"} 4 +prometheus_http_response_size_bytes_sum{handler="/debug/*subpath"} 7062 +prometheus_http_response_size_bytes_count{handler="/debug/*subpath"} 4 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="100"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1000"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="10000"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="100000"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+06"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+07"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+08"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="1e+09"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.ico",le="+Inf"} 177 +prometheus_http_response_size_bytes_sum{handler="/favicon.ico"} 3363 +prometheus_http_response_size_bytes_count{handler="/favicon.ico"} 177 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="100"} 1 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1000"} 1 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="10000"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="100000"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+06"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+07"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+08"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="1e+09"} 770 +prometheus_http_response_size_bytes_bucket{handler="/favicon.svg",le="+Inf"} 770 +prometheus_http_response_size_bytes_sum{handler="/favicon.svg"} 2.135541e+06 +prometheus_http_response_size_bytes_count{handler="/favicon.svg"} 770 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="100"} 2 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1000"} 2 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="10000"} 2 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="100000"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+06"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+07"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+08"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="1e+09"} 4 +prometheus_http_response_size_bytes_bucket{handler="/federate",le="+Inf"} 4 +prometheus_http_response_size_bytes_sum{handler="/federate"} 28439 +prometheus_http_response_size_bytes_count{handler="/federate"} 4 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="10000"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="100000"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+06"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+07"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+08"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+09"} 9 +prometheus_http_response_size_bytes_bucket{handler="/flags",le="+Inf"} 9 +prometheus_http_response_size_bytes_sum{handler="/flags"} 15768 +prometheus_http_response_size_bytes_count{handler="/flags"} 9 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="100"} 130 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1000"} 744 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="10000"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="100000"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+06"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+07"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+08"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+09"} 751 +prometheus_http_response_size_bytes_bucket{handler="/graph",le="+Inf"} 751 +prometheus_http_response_size_bytes_sum{handler="/graph"} 164775 +prometheus_http_response_size_bytes_count{handler="/graph"} 751 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="10000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100000"} 4.059088e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+06"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+07"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+08"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+09"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/metrics",le="+Inf"} 4.059092e+06 +prometheus_http_response_size_bytes_sum{handler="/metrics"} 8.2515439707e+10 +prometheus_http_response_size_bytes_count{handler="/metrics"} 4.059092e+06 +prometheus_http_response_size_bytes_bucket{handler="/query",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/query",le="10000"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="100000"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+06"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+07"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+08"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="1e+09"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/query",le="+Inf"} 1774 +prometheus_http_response_size_bytes_sum{handler="/query"} 3.108048e+06 +prometheus_http_response_size_bytes_count{handler="/query"} 1774 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="10000"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="100000"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+06"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+07"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+08"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+09"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/rules",le="+Inf"} 8673 +prometheus_http_response_size_bytes_sum{handler="/rules"} 1.5195096e+07 +prometheus_http_response_size_bytes_count{handler="/rules"} 8673 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="10000"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="100000"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+06"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+07"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+08"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+09"} 20 +prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="+Inf"} 20 +prometheus_http_response_size_bytes_sum{handler="/service-discovery"} 35040 +prometheus_http_response_size_bytes_count{handler="/service-discovery"} 20 +prometheus_http_response_size_bytes_bucket{handler="/status",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/status",le="10000"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="100000"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+06"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+07"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+08"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+09"} 46 +prometheus_http_response_size_bytes_bucket{handler="/status",le="+Inf"} 46 +prometheus_http_response_size_bytes_sum{handler="/status"} 80592 +prometheus_http_response_size_bytes_count{handler="/status"} 46 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="10000"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="100000"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+06"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+07"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+08"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+09"} 39 +prometheus_http_response_size_bytes_bucket{handler="/targets",le="+Inf"} 39 +prometheus_http_response_size_bytes_sum{handler="/targets"} 68328 +prometheus_http_response_size_bytes_count{handler="/targets"} 39 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="100"} 0 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1000"} 0 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="10000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="100000"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+06"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+07"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+08"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="1e+09"} 49 +prometheus_http_response_size_bytes_bucket{handler="/tsdb-status",le="+Inf"} 49 +prometheus_http_response_size_bytes_sum{handler="/tsdb-status"} 85848 +prometheus_http_response_size_bytes_count{handler="/tsdb-status"} 49 +# HELP prometheus_notifications_alertmanagers_discovered The number of alertmanagers discovered and active. +# TYPE prometheus_notifications_alertmanagers_discovered gauge +prometheus_notifications_alertmanagers_discovered 1 +# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to errors when sending to Alertmanager. +# TYPE prometheus_notifications_dropped_total counter +prometheus_notifications_dropped_total 0 +# HELP prometheus_notifications_errors_total Total number of sent alerts affected by errors. +# TYPE prometheus_notifications_errors_total counter +prometheus_notifications_errors_total{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 0 +# HELP prometheus_notifications_latency_seconds Latency quantiles for sending alert notifications. +# TYPE prometheus_notifications_latency_seconds summary +prometheus_notifications_latency_seconds{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts",quantile="0.5"} 0.001566044 +prometheus_notifications_latency_seconds{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts",quantile="0.9"} 0.003927931 +prometheus_notifications_latency_seconds{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts",quantile="0.99"} 0.013928135 +prometheus_notifications_latency_seconds_sum{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 194.15032606200046 +prometheus_notifications_latency_seconds_count{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 75180 +# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. +# TYPE prometheus_notifications_queue_capacity gauge +prometheus_notifications_queue_capacity 10000 +# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. +# TYPE prometheus_notifications_queue_length gauge +prometheus_notifications_queue_length 0 +# HELP prometheus_notifications_sent_total Total number of alerts sent. +# TYPE prometheus_notifications_sent_total counter +prometheus_notifications_sent_total{alertmanager="http://demo.do.prometheus.io:9093/api/v2/alerts"} 141616 +# HELP prometheus_ready Whether Prometheus startup was fully completed and the server is ready for normal operation. +# TYPE prometheus_ready gauge +prometheus_ready 1 +# HELP prometheus_remote_storage_exemplars_in_total Exemplars in to remote storage, compare to exemplars out for queue managers. +# TYPE prometheus_remote_storage_exemplars_in_total counter +prometheus_remote_storage_exemplars_in_total 4.959738e+06 +# HELP prometheus_remote_storage_highest_timestamp_in_seconds Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet. +# TYPE prometheus_remote_storage_highest_timestamp_in_seconds gauge +prometheus_remote_storage_highest_timestamp_in_seconds 1.738949375e+09 +# HELP prometheus_remote_storage_histograms_in_total HistogramSamples in to remote storage, compare to histograms out for queue managers. +# TYPE prometheus_remote_storage_histograms_in_total counter +prometheus_remote_storage_histograms_in_total 0 +# HELP prometheus_remote_storage_samples_in_total Samples in to remote storage, compare to samples out for queue managers. +# TYPE prometheus_remote_storage_samples_in_total counter +prometheus_remote_storage_samples_in_total 6.00447296e+08 +# HELP prometheus_remote_storage_string_interner_zero_reference_releases_total The number of times release has been called for strings that are not interned. +# TYPE prometheus_remote_storage_string_interner_zero_reference_releases_total counter +prometheus_remote_storage_string_interner_zero_reference_releases_total 0 +# HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute. +# TYPE prometheus_rule_evaluation_duration_seconds summary +prometheus_rule_evaluation_duration_seconds{quantile="0.5"} 0.000214623 +prometheus_rule_evaluation_duration_seconds{quantile="0.9"} 0.001456135 +prometheus_rule_evaluation_duration_seconds{quantile="0.99"} 0.008111814 +prometheus_rule_evaluation_duration_seconds_sum 5209.704794862625 +prometheus_rule_evaluation_duration_seconds_count 7.203456e+06 +# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. +# TYPE prometheus_rule_evaluation_failures_total counter +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0 +prometheus_rule_evaluation_failures_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0 +# HELP prometheus_rule_evaluations_total The total number of rule evaluations. +# TYPE prometheus_rule_evaluations_total counter +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 118092 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 118090 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 1.4761e+06 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 1.476125e+06 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 649495 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 649484 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 1.358035e+06 +prometheus_rule_evaluations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 1.358035e+06 +# HELP prometheus_rule_group_duration_seconds The duration of rule group evaluations. +# TYPE prometheus_rule_group_duration_seconds summary +prometheus_rule_group_duration_seconds{quantile="0.01"} 0.000735928 +prometheus_rule_group_duration_seconds{quantile="0.05"} 0.000818857 +prometheus_rule_group_duration_seconds{quantile="0.5"} 0.004852081 +prometheus_rule_group_duration_seconds{quantile="0.9"} 0.022897759 +prometheus_rule_group_duration_seconds{quantile="0.99"} 0.069327797 +prometheus_rule_group_duration_seconds_sum 5335.451440133042 +prometheus_rule_group_duration_seconds_count 472359 +# HELP prometheus_rule_group_interval_seconds The interval of a rule group. +# TYPE prometheus_rule_group_interval_seconds gauge +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 15 +prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 15 +# HELP prometheus_rule_group_iterations_missed_total The total number of rule group evaluations missed due to slow rule group evaluation. +# TYPE prometheus_rule_group_iterations_missed_total counter +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 1 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 1 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0 +prometheus_rule_group_iterations_missed_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0 +# HELP prometheus_rule_group_iterations_total The total number of scheduled rule group evaluations, whether executed or missed. +# TYPE prometheus_rule_group_iterations_total counter +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 59046 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 59045 +prometheus_rule_group_iterations_total{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 59045 +# HELP prometheus_rule_group_last_duration_seconds The duration of the last rule group evaluation. +# TYPE prometheus_rule_group_last_duration_seconds gauge +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0.000754015 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0.001104624 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0.022040842 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0.048928087 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0.002766703 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 0.002218466 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0.010245447 +prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0.009232393 +# HELP prometheus_rule_group_last_evaluation_samples The number of samples returned during the last rule group evaluation. +# TYPE prometheus_rule_group_last_evaluation_samples gauge +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 2 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 2 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 10 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 10 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 11 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 11 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0 +prometheus_rule_group_last_evaluation_samples{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0 +# HELP prometheus_rule_group_last_evaluation_timestamp_seconds The timestamp of the last rule group evaluation in seconds. +# TYPE prometheus_rule_group_last_evaluation_timestamp_seconds gauge +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 1.738949373753179e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 1.738949366900781e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 1.7389493632087085e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 1.7389493666743164e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 1.7389493675395968e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 1.7389493623381927e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 1.738949366856423e+09 +prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 1.738949369917499e+09 +# HELP prometheus_rule_group_last_restore_duration_seconds The duration of the last alert rules alerts restoration using the `ALERTS_FOR_STATE` series. +# TYPE prometheus_rule_group_last_restore_duration_seconds gauge +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0.002032625 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 9.1853e-05 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0.000166088 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0.000108127 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 6.408e-06 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 2.621e-06 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 8.4979e-05 +prometheus_rule_group_last_restore_duration_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0.000104444 +# HELP prometheus_rule_group_last_rule_duration_sum_seconds The sum of time in seconds it took to evaluate each rule in the group regardless of concurrency. This should be higher than the group duration if rules are evaluated concurrently. +# TYPE prometheus_rule_group_last_rule_duration_sum_seconds gauge +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 0.000685176 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 0.001035099 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 0.021769662 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 0.042017156 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 0.00260535 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 0.002069356 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 0.009905481 +prometheus_rule_group_last_rule_duration_sum_seconds{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 0.009056893 +# HELP prometheus_rule_group_rules The number of rules. +# TYPE prometheus_rule_group_rules gauge +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/ansible_managed.rules;ansible managed alert rules"} 2 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/ansible_managed.yml;ansible managed alert rules"} 2 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_alerts.rules;node-exporter"} 25 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_alerts.yaml;node-exporter"} 25 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_rules.rules;node-exporter.rules"} 11 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/node_rules.yaml;node-exporter.rules"} 11 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/prometheus_alerts.rules;prometheus"} 23 +prometheus_rule_group_rules{rule_group="/etc/prometheus/rules/prometheus_alerts.yaml;prometheus"} 23 +# HELP prometheus_sd_azure_cache_hit_total Number of cache hit during refresh. +# TYPE prometheus_sd_azure_cache_hit_total counter +prometheus_sd_azure_cache_hit_total 0 +# HELP prometheus_sd_azure_failures_total Number of Azure service discovery refresh failures. +# TYPE prometheus_sd_azure_failures_total counter +prometheus_sd_azure_failures_total 0 +# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds. +# TYPE prometheus_sd_consul_rpc_duration_seconds summary +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 +# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures. +# TYPE prometheus_sd_consul_rpc_failures_total counter +prometheus_sd_consul_rpc_failures_total 0 +# HELP prometheus_sd_discovered_targets Current number of discovered targets. +# TYPE prometheus_sd_discovered_targets gauge +prometheus_sd_discovered_targets{config="alertmanager",name="scrape"} 1 +prometheus_sd_discovered_targets{config="blackbox",name="scrape"} 1 +prometheus_sd_discovered_targets{config="caddy",name="scrape"} 1 +prometheus_sd_discovered_targets{config="cadvisor",name="scrape"} 1 +prometheus_sd_discovered_targets{config="config-0",name="notify"} 1 +prometheus_sd_discovered_targets{config="grafana",name="scrape"} 1 +prometheus_sd_discovered_targets{config="node",name="scrape"} 1 +prometheus_sd_discovered_targets{config="prometheus",name="scrape"} 1 +prometheus_sd_discovered_targets{config="random",name="scrape"} 4 +# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures. +# TYPE prometheus_sd_dns_lookup_failures_total counter +prometheus_sd_dns_lookup_failures_total 0 +# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups. +# TYPE prometheus_sd_dns_lookups_total counter +prometheus_sd_dns_lookups_total 0 +# HELP prometheus_sd_failed_configs Current number of service discovery configurations that failed to load. +# TYPE prometheus_sd_failed_configs gauge +prometheus_sd_failed_configs{name="notify"} 0 +prometheus_sd_failed_configs{name="scrape"} 0 +# HELP prometheus_sd_file_mtime_seconds Timestamp (mtime) of files read by FileSD. Timestamp is set at read time. +# TYPE prometheus_sd_file_mtime_seconds gauge +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/alertmanager.yml"} 1.701295557e+09 +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/cadvisor.yml"} 1.705433682e+09 +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/node.yml"} 1.701295553e+09 +prometheus_sd_file_mtime_seconds{filename="/etc/prometheus/file_sd/random.yml"} 1.579263729e+09 +# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors. +# TYPE prometheus_sd_file_read_errors_total counter +prometheus_sd_file_read_errors_total 0 +# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds. +# TYPE prometheus_sd_file_scan_duration_seconds summary +prometheus_sd_file_scan_duration_seconds{quantile="0.5"} 9.5384e-05 +prometheus_sd_file_scan_duration_seconds{quantile="0.9"} 0.000275679 +prometheus_sd_file_scan_duration_seconds{quantile="0.99"} 0.000275679 +prometheus_sd_file_scan_duration_seconds_sum 4.751347856999997 +prometheus_sd_file_scan_duration_seconds_count 11812 +# HELP prometheus_sd_file_watcher_errors_total The number of File-SD errors caused by filesystem watch failures. +# TYPE prometheus_sd_file_watcher_errors_total counter +prometheus_sd_file_watcher_errors_total 0 +# HELP prometheus_sd_http_failures_total Number of HTTP service discovery refresh failures. +# TYPE prometheus_sd_http_failures_total counter +prometheus_sd_http_failures_total 0 +# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled. +# TYPE prometheus_sd_kubernetes_events_total counter +prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="endpointslice"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="ingress"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpointslice"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="ingress"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpointslice"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="ingress"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 +# HELP prometheus_sd_kubernetes_failures_total The number of failed WATCH/LIST requests. +# TYPE prometheus_sd_kubernetes_failures_total counter +prometheus_sd_kubernetes_failures_total 0 +# HELP prometheus_sd_kuma_fetch_duration_seconds The duration of a Kuma MADS fetch call. +# TYPE prometheus_sd_kuma_fetch_duration_seconds summary +prometheus_sd_kuma_fetch_duration_seconds{quantile="0.5"} NaN +prometheus_sd_kuma_fetch_duration_seconds{quantile="0.9"} NaN +prometheus_sd_kuma_fetch_duration_seconds{quantile="0.99"} NaN +prometheus_sd_kuma_fetch_duration_seconds_sum 0 +prometheus_sd_kuma_fetch_duration_seconds_count 0 +# HELP prometheus_sd_kuma_fetch_failures_total The number of Kuma MADS fetch call failures. +# TYPE prometheus_sd_kuma_fetch_failures_total counter +prometheus_sd_kuma_fetch_failures_total 0 +# HELP prometheus_sd_kuma_fetch_skipped_updates_total The number of Kuma MADS fetch calls that result in no updates to the targets. +# TYPE prometheus_sd_kuma_fetch_skipped_updates_total counter +prometheus_sd_kuma_fetch_skipped_updates_total 0 +# HELP prometheus_sd_linode_failures_total Number of Linode service discovery refresh failures. +# TYPE prometheus_sd_linode_failures_total counter +prometheus_sd_linode_failures_total 0 +# HELP prometheus_sd_nomad_failures_total Number of nomad service discovery refresh failures. +# TYPE prometheus_sd_nomad_failures_total counter +prometheus_sd_nomad_failures_total 0 +# HELP prometheus_sd_received_updates_total Total number of update events received from the SD providers. +# TYPE prometheus_sd_received_updates_total counter +prometheus_sd_received_updates_total{name="notify"} 2 +prometheus_sd_received_updates_total{name="scrape"} 11820 +# HELP prometheus_sd_updates_delayed_total Total number of update events that couldn't be sent immediately. +# TYPE prometheus_sd_updates_delayed_total counter +prometheus_sd_updates_delayed_total{name="notify"} 0 +prometheus_sd_updates_delayed_total{name="scrape"} 0 +# HELP prometheus_sd_updates_total Total number of update events sent to the SD consumers. +# TYPE prometheus_sd_updates_total counter +prometheus_sd_updates_total{name="notify"} 1 +prometheus_sd_updates_total{name="scrape"} 2953 +# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. +# TYPE prometheus_target_interval_length_seconds summary +prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14.982591232 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14.997567414 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 14.999977915 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15.000793403 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15.017607167 +prometheus_target_interval_length_seconds_sum{interval="15s"} 9.742237453453667e+06 +prometheus_target_interval_length_seconds_count{interval="15s"} 649482 +# HELP prometheus_target_metadata_cache_bytes The number of bytes that are currently used for storing metric metadata in the cache +# TYPE prometheus_target_metadata_cache_bytes gauge +prometheus_target_metadata_cache_bytes{scrape_job="alertmanager"} 6817 +prometheus_target_metadata_cache_bytes{scrape_job="blackbox"} 661 +prometheus_target_metadata_cache_bytes{scrape_job="caddy"} 2365 +prometheus_target_metadata_cache_bytes{scrape_job="cadvisor"} 4547 +prometheus_target_metadata_cache_bytes{scrape_job="grafana"} 37608 +prometheus_target_metadata_cache_bytes{scrape_job="node"} 13900 +prometheus_target_metadata_cache_bytes{scrape_job="prometheus"} 20265 +prometheus_target_metadata_cache_bytes{scrape_job="random"} 792 +# HELP prometheus_target_metadata_cache_entries Total number of metric metadata entries in the cache +# TYPE prometheus_target_metadata_cache_entries gauge +prometheus_target_metadata_cache_entries{scrape_job="alertmanager"} 86 +prometheus_target_metadata_cache_entries{scrape_job="blackbox"} 13 +prometheus_target_metadata_cache_entries{scrape_job="caddy"} 47 +prometheus_target_metadata_cache_entries{scrape_job="cadvisor"} 93 +prometheus_target_metadata_cache_entries{scrape_job="grafana"} 373 +prometheus_target_metadata_cache_entries{scrape_job="node"} 293 +prometheus_target_metadata_cache_entries{scrape_job="prometheus"} 237 +prometheus_target_metadata_cache_entries{scrape_job="random"} 16 +# HELP prometheus_target_scrape_pool_exceeded_label_limits_total Total number of times scrape pools hit the label limits, during sync or config reload. +# TYPE prometheus_target_scrape_pool_exceeded_label_limits_total counter +prometheus_target_scrape_pool_exceeded_label_limits_total 0 +# HELP prometheus_target_scrape_pool_exceeded_target_limit_total Total number of times scrape pools hit the target limit, during sync or config reload. +# TYPE prometheus_target_scrape_pool_exceeded_target_limit_total counter +prometheus_target_scrape_pool_exceeded_target_limit_total 0 +# HELP prometheus_target_scrape_pool_reloads_failed_total Total number of failed scrape pool reloads. +# TYPE prometheus_target_scrape_pool_reloads_failed_total counter +prometheus_target_scrape_pool_reloads_failed_total 0 +# HELP prometheus_target_scrape_pool_reloads_total Total number of scrape pool reloads. +# TYPE prometheus_target_scrape_pool_reloads_total counter +prometheus_target_scrape_pool_reloads_total 0 +# HELP prometheus_target_scrape_pool_symboltable_items Current number of symbols in table for this scrape pool. +# TYPE prometheus_target_scrape_pool_symboltable_items gauge +prometheus_target_scrape_pool_symboltable_items{scrape_job="alertmanager"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="blackbox"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="caddy"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="cadvisor"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="grafana"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="node"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="prometheus"} 0 +prometheus_target_scrape_pool_symboltable_items{scrape_job="random"} 0 +# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool. +# TYPE prometheus_target_scrape_pool_sync_total counter +prometheus_target_scrape_pool_sync_total{scrape_job="alertmanager"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="blackbox"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="caddy"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="cadvisor"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="grafana"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="node"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 2953 +prometheus_target_scrape_pool_sync_total{scrape_job="random"} 2953 +# HELP prometheus_target_scrape_pool_target_limit Maximum number of targets allowed in this scrape pool. +# TYPE prometheus_target_scrape_pool_target_limit gauge +prometheus_target_scrape_pool_target_limit{scrape_job="alertmanager"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="blackbox"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="caddy"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="cadvisor"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="grafana"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="node"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="prometheus"} 0 +prometheus_target_scrape_pool_target_limit{scrape_job="random"} 0 +# HELP prometheus_target_scrape_pool_targets Current number of targets in this scrape pool. +# TYPE prometheus_target_scrape_pool_targets gauge +prometheus_target_scrape_pool_targets{scrape_job="alertmanager"} 1 +prometheus_target_scrape_pool_targets{scrape_job="blackbox"} 1 +prometheus_target_scrape_pool_targets{scrape_job="caddy"} 1 +prometheus_target_scrape_pool_targets{scrape_job="cadvisor"} 1 +prometheus_target_scrape_pool_targets{scrape_job="grafana"} 1 +prometheus_target_scrape_pool_targets{scrape_job="node"} 1 +prometheus_target_scrape_pool_targets{scrape_job="prometheus"} 1 +prometheus_target_scrape_pool_targets{scrape_job="random"} 4 +# HELP prometheus_target_scrape_pools_failed_total Total number of scrape pool creations that failed. +# TYPE prometheus_target_scrape_pools_failed_total counter +prometheus_target_scrape_pools_failed_total 0 +# HELP prometheus_target_scrape_pools_total Total number of scrape pool creation attempts. +# TYPE prometheus_target_scrape_pools_total counter +prometheus_target_scrape_pools_total 8 +# HELP prometheus_target_scrapes_cache_flush_forced_total How many times a scrape cache was flushed due to getting big while scrapes are failing. +# TYPE prometheus_target_scrapes_cache_flush_forced_total counter +prometheus_target_scrapes_cache_flush_forced_total 0 +# HELP prometheus_target_scrapes_exceeded_body_size_limit_total Total number of scrapes that hit the body size limit +# TYPE prometheus_target_scrapes_exceeded_body_size_limit_total counter +prometheus_target_scrapes_exceeded_body_size_limit_total 0 +# HELP prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total Total number of scrapes that hit the native histogram bucket limit and were rejected. +# TYPE prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total counter +prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total 0 +# HELP prometheus_target_scrapes_exceeded_sample_limit_total Total number of scrapes that hit the sample limit and were rejected. +# TYPE prometheus_target_scrapes_exceeded_sample_limit_total counter +prometheus_target_scrapes_exceeded_sample_limit_total 0 +# HELP prometheus_target_scrapes_exemplar_out_of_order_total Total number of exemplar rejected due to not being out of the expected order. +# TYPE prometheus_target_scrapes_exemplar_out_of_order_total counter +prometheus_target_scrapes_exemplar_out_of_order_total 0 +# HELP prometheus_target_scrapes_sample_duplicate_timestamp_total Total number of samples rejected due to duplicate timestamps but different values. +# TYPE prometheus_target_scrapes_sample_duplicate_timestamp_total counter +prometheus_target_scrapes_sample_duplicate_timestamp_total 0 +# HELP prometheus_target_scrapes_sample_out_of_bounds_total Total number of samples rejected due to timestamp falling outside of the time bounds. +# TYPE prometheus_target_scrapes_sample_out_of_bounds_total counter +prometheus_target_scrapes_sample_out_of_bounds_total 0 +# HELP prometheus_target_scrapes_sample_out_of_order_total Total number of samples rejected due to not being out of the expected order. +# TYPE prometheus_target_scrapes_sample_out_of_order_total counter +prometheus_target_scrapes_sample_out_of_order_total 455 +# HELP prometheus_target_sync_failed_total Total number of target sync failures. +# TYPE prometheus_target_sync_failed_total counter +prometheus_target_sync_failed_total{scrape_job="alertmanager"} 0 +prometheus_target_sync_failed_total{scrape_job="blackbox"} 0 +prometheus_target_sync_failed_total{scrape_job="caddy"} 0 +prometheus_target_sync_failed_total{scrape_job="cadvisor"} 0 +prometheus_target_sync_failed_total{scrape_job="grafana"} 0 +prometheus_target_sync_failed_total{scrape_job="node"} 0 +prometheus_target_sync_failed_total{scrape_job="prometheus"} 0 +prometheus_target_sync_failed_total{scrape_job="random"} 0 +# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool. +# TYPE prometheus_target_sync_length_seconds summary +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.01"} 2.0522e-05 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.05"} 2.0522e-05 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.5"} 2.0522e-05 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.9"} 0.000141485 +prometheus_target_sync_length_seconds{scrape_job="alertmanager",quantile="0.99"} 0.000141485 +prometheus_target_sync_length_seconds_sum{scrape_job="alertmanager"} 0.13103036 +prometheus_target_sync_length_seconds_count{scrape_job="alertmanager"} 2953 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.01"} 3.9252e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.05"} 3.9252e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.5"} 3.9252e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.9"} 6.2134e-05 +prometheus_target_sync_length_seconds{scrape_job="blackbox",quantile="0.99"} 6.2134e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="blackbox"} 0.6044201539999996 +prometheus_target_sync_length_seconds_count{scrape_job="blackbox"} 2953 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.01"} 1.3759e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.05"} 1.3759e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.5"} 1.3759e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.9"} 7.8256e-05 +prometheus_target_sync_length_seconds{scrape_job="caddy",quantile="0.99"} 7.8256e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="caddy"} 0.10369844599999971 +prometheus_target_sync_length_seconds_count{scrape_job="caddy"} 2953 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.01"} 2.0452e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.05"} 2.0452e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.5"} 2.0452e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.9"} 4.2337e-05 +prometheus_target_sync_length_seconds{scrape_job="cadvisor",quantile="0.99"} 4.2337e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="cadvisor"} 0.10489659999999998 +prometheus_target_sync_length_seconds_count{scrape_job="cadvisor"} 2953 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.01"} 1.4995e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.05"} 1.4995e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.5"} 1.4995e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.9"} 1.7284e-05 +prometheus_target_sync_length_seconds{scrape_job="grafana",quantile="0.99"} 1.7284e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="grafana"} 0.09031192700000017 +prometheus_target_sync_length_seconds_count{scrape_job="grafana"} 2953 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.01"} 4.1607e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.05"} 4.1607e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.5"} 4.1607e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.9"} 7.416e-05 +prometheus_target_sync_length_seconds{scrape_job="node",quantile="0.99"} 7.416e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="node"} 0.11539821299999993 +prometheus_target_sync_length_seconds_count{scrape_job="node"} 2953 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 1.5564e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 1.5564e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 1.5564e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 1.961e-05 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 1.961e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.10655758600000016 +prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 2953 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.01"} 4.1299e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.05"} 4.1299e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.5"} 4.1299e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.9"} 4.8586e-05 +prometheus_target_sync_length_seconds{scrape_job="random",quantile="0.99"} 4.8586e-05 +prometheus_target_sync_length_seconds_sum{scrape_job="random"} 0.20406449899999993 +prometheus_target_sync_length_seconds_count{scrape_job="random"} 2953 +# HELP prometheus_template_text_expansion_failures_total The total number of template text expansion failures. +# TYPE prometheus_template_text_expansion_failures_total counter +prometheus_template_text_expansion_failures_total 0 +# HELP prometheus_template_text_expansions_total The total number of template text expansions. +# TYPE prometheus_template_text_expansions_total counter +prometheus_template_text_expansions_total 2.126277e+06 +# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines. +# TYPE prometheus_treecache_watcher_goroutines gauge +prometheus_treecache_watcher_goroutines 0 +# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. +# TYPE prometheus_treecache_zookeeper_failures_total counter +prometheus_treecache_zookeeper_failures_total 0 +# HELP prometheus_tsdb_blocks_loaded Number of currently loaded data blocks +# TYPE prometheus_tsdb_blocks_loaded gauge +prometheus_tsdb_blocks_loaded 17 +# HELP prometheus_tsdb_checkpoint_creations_failed_total Total number of checkpoint creations that failed. +# TYPE prometheus_tsdb_checkpoint_creations_failed_total counter +prometheus_tsdb_checkpoint_creations_failed_total 0 +# HELP prometheus_tsdb_checkpoint_creations_total Total number of checkpoint creations attempted. +# TYPE prometheus_tsdb_checkpoint_creations_total counter +prometheus_tsdb_checkpoint_creations_total 62 +# HELP prometheus_tsdb_checkpoint_deletions_failed_total Total number of checkpoint deletions that failed. +# TYPE prometheus_tsdb_checkpoint_deletions_failed_total counter +prometheus_tsdb_checkpoint_deletions_failed_total 0 +# HELP prometheus_tsdb_checkpoint_deletions_total Total number of checkpoint deletions attempted. +# TYPE prometheus_tsdb_checkpoint_deletions_total counter +prometheus_tsdb_checkpoint_deletions_total 62 +# HELP prometheus_tsdb_clean_start -1: lockfile is disabled. 0: a lockfile from a previous execution was replaced. 1: lockfile creation was clean +# TYPE prometheus_tsdb_clean_start gauge +prometheus_tsdb_clean_start 1 +# HELP prometheus_tsdb_compaction_chunk_range_seconds Final time range of chunks on their first compaction +# TYPE prometheus_tsdb_compaction_chunk_range_seconds histogram +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="100"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="400"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1600"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6400"} 86 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="25600"} 676 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="102400"} 1555 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="409600"} 2379 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1.6384e+06"} 34068 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6.5536e+06"} 4.819758e+06 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="2.62144e+07"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="+Inf"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_range_seconds_sum 8.917524773366e+12 +prometheus_tsdb_compaction_chunk_range_seconds_count 4.861956e+06 +# HELP prometheus_tsdb_compaction_chunk_samples Final number of samples on their first compaction +# TYPE prometheus_tsdb_compaction_chunk_samples histogram +prometheus_tsdb_compaction_chunk_samples_bucket{le="4"} 1407 +prometheus_tsdb_compaction_chunk_samples_bucket{le="6"} 1544 +prometheus_tsdb_compaction_chunk_samples_bucket{le="9"} 1881 +prometheus_tsdb_compaction_chunk_samples_bucket{le="13.5"} 2065 +prometheus_tsdb_compaction_chunk_samples_bucket{le="20.25"} 2782 +prometheus_tsdb_compaction_chunk_samples_bucket{le="30.375"} 4342 +prometheus_tsdb_compaction_chunk_samples_bucket{le="45.5625"} 6180 +prometheus_tsdb_compaction_chunk_samples_bucket{le="68.34375"} 11518 +prometheus_tsdb_compaction_chunk_samples_bucket{le="102.515625"} 13890 +prometheus_tsdb_compaction_chunk_samples_bucket{le="153.7734375"} 4.810155e+06 +prometheus_tsdb_compaction_chunk_samples_bucket{le="230.66015625"} 4.86058e+06 +prometheus_tsdb_compaction_chunk_samples_bucket{le="345.990234375"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_samples_bucket{le="+Inf"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_samples_sum 5.86000708e+08 +prometheus_tsdb_compaction_chunk_samples_count 4.861956e+06 +# HELP prometheus_tsdb_compaction_chunk_size_bytes Final size of chunks on their first compaction +# TYPE prometheus_tsdb_compaction_chunk_size_bytes histogram +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="32"} 1233 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="48"} 156238 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="72"} 2.006456e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="108"} 3.568405e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="162"} 3.835144e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="243"} 4.034591e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="364.5"} 4.505646e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="546.75"} 4.69694e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="820.125"} 4.78551e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1230.1875"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1845.28125"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="2767.921875"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="+Inf"} 4.861956e+06 +prometheus_tsdb_compaction_chunk_size_bytes_sum 6.81852972e+08 +prometheus_tsdb_compaction_chunk_size_bytes_count 4.861956e+06 +# HELP prometheus_tsdb_compaction_duration_seconds Duration of compaction runs +# TYPE prometheus_tsdb_compaction_duration_seconds histogram +prometheus_tsdb_compaction_duration_seconds_bucket{le="1"} 61 +prometheus_tsdb_compaction_duration_seconds_bucket{le="2"} 155 +prometheus_tsdb_compaction_duration_seconds_bucket{le="4"} 180 +prometheus_tsdb_compaction_duration_seconds_bucket{le="8"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="16"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="32"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="64"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="128"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="256"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="512"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="1024"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="2048"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="4096"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="8192"} 183 +prometheus_tsdb_compaction_duration_seconds_bucket{le="+Inf"} 183 +prometheus_tsdb_compaction_duration_seconds_sum 254.9095696899999 +prometheus_tsdb_compaction_duration_seconds_count 183 +# HELP prometheus_tsdb_compaction_populating_block Set to 1 when a block is currently being written to the disk. +# TYPE prometheus_tsdb_compaction_populating_block gauge +prometheus_tsdb_compaction_populating_block 0 +# HELP prometheus_tsdb_compactions_failed_total Total number of compactions that failed for the partition. +# TYPE prometheus_tsdb_compactions_failed_total counter +prometheus_tsdb_compactions_failed_total 0 +# HELP prometheus_tsdb_compactions_skipped_total Total number of skipped compactions due to disabled auto compaction. +# TYPE prometheus_tsdb_compactions_skipped_total counter +prometheus_tsdb_compactions_skipped_total 0 +# HELP prometheus_tsdb_compactions_total Total number of compactions that were executed for the partition. +# TYPE prometheus_tsdb_compactions_total counter +prometheus_tsdb_compactions_total 183 +# HELP prometheus_tsdb_compactions_triggered_total Total number of triggered compactions for the partition. +# TYPE prometheus_tsdb_compactions_triggered_total counter +prometheus_tsdb_compactions_triggered_total 14855 +# HELP prometheus_tsdb_data_replay_duration_seconds Time taken to replay the data on disk. +# TYPE prometheus_tsdb_data_replay_duration_seconds gauge +prometheus_tsdb_data_replay_duration_seconds 5.550163617 +# HELP prometheus_tsdb_exemplar_exemplars_appended_total Total number of appended exemplars. +# TYPE prometheus_tsdb_exemplar_exemplars_appended_total counter +prometheus_tsdb_exemplar_exemplars_appended_total 0 +# HELP prometheus_tsdb_exemplar_exemplars_in_storage Number of exemplars currently in circular storage. +# TYPE prometheus_tsdb_exemplar_exemplars_in_storage gauge +prometheus_tsdb_exemplar_exemplars_in_storage 0 +# HELP prometheus_tsdb_exemplar_last_exemplars_timestamp_seconds The timestamp of the oldest exemplar stored in circular storage. Useful to check for what timerange the current exemplar buffer limit allows. This usually means the last timestampfor all exemplars for a typical setup. This is not true though if one of the series timestamp is in future compared to rest series. +# TYPE prometheus_tsdb_exemplar_last_exemplars_timestamp_seconds gauge +prometheus_tsdb_exemplar_last_exemplars_timestamp_seconds 0 +# HELP prometheus_tsdb_exemplar_max_exemplars Total number of exemplars the exemplar storage can store, resizeable. +# TYPE prometheus_tsdb_exemplar_max_exemplars gauge +prometheus_tsdb_exemplar_max_exemplars 0 +# HELP prometheus_tsdb_exemplar_out_of_order_exemplars_total Total number of out of order exemplar ingestion failed attempts. +# TYPE prometheus_tsdb_exemplar_out_of_order_exemplars_total counter +prometheus_tsdb_exemplar_out_of_order_exemplars_total 0 +# HELP prometheus_tsdb_exemplar_series_with_exemplars_in_storage Number of series with exemplars currently in circular storage. +# TYPE prometheus_tsdb_exemplar_series_with_exemplars_in_storage gauge +prometheus_tsdb_exemplar_series_with_exemplars_in_storage 0 +# HELP prometheus_tsdb_head_active_appenders Number of currently active appender transactions +# TYPE prometheus_tsdb_head_active_appenders gauge +prometheus_tsdb_head_active_appenders 0 +# HELP prometheus_tsdb_head_chunks Total number of chunks in the head block. +# TYPE prometheus_tsdb_head_chunks gauge +prometheus_tsdb_head_chunks 31476 +# HELP prometheus_tsdb_head_chunks_created_total Total number of chunks created in the head +# TYPE prometheus_tsdb_head_chunks_created_total counter +prometheus_tsdb_head_chunks_created_total 4.893432e+06 +# HELP prometheus_tsdb_head_chunks_removed_total Total number of chunks removed in the head +# TYPE prometheus_tsdb_head_chunks_removed_total counter +prometheus_tsdb_head_chunks_removed_total 4.861956e+06 +# HELP prometheus_tsdb_head_chunks_storage_size_bytes Size of the chunks_head directory. +# TYPE prometheus_tsdb_head_chunks_storage_size_bytes gauge +prometheus_tsdb_head_chunks_storage_size_bytes 7.237299e+06 +# HELP prometheus_tsdb_head_gc_duration_seconds Runtime of garbage collection in the head block. +# TYPE prometheus_tsdb_head_gc_duration_seconds summary +prometheus_tsdb_head_gc_duration_seconds_sum 4.773801686000001 +prometheus_tsdb_head_gc_duration_seconds_count 123 +# HELP prometheus_tsdb_head_max_time Maximum timestamp of the head block. The unit is decided by the library consumer. +# TYPE prometheus_tsdb_head_max_time gauge +prometheus_tsdb_head_max_time 1.738949375191e+12 +# HELP prometheus_tsdb_head_max_time_seconds Maximum timestamp of the head block. +# TYPE prometheus_tsdb_head_max_time_seconds gauge +prometheus_tsdb_head_max_time_seconds 1.738949375e+09 +# HELP prometheus_tsdb_head_min_time Minimum time bound of the head block. The unit is decided by the library consumer. +# TYPE prometheus_tsdb_head_min_time gauge +prometheus_tsdb_head_min_time 1.738944000171e+12 +# HELP prometheus_tsdb_head_min_time_seconds Minimum time bound of the head block. +# TYPE prometheus_tsdb_head_min_time_seconds gauge +prometheus_tsdb_head_min_time_seconds 1.738944e+09 +# HELP prometheus_tsdb_head_out_of_order_samples_appended_total Total number of appended out of order samples. +# TYPE prometheus_tsdb_head_out_of_order_samples_appended_total counter +prometheus_tsdb_head_out_of_order_samples_appended_total{type="float"} 0 +prometheus_tsdb_head_out_of_order_samples_appended_total{type="histogram"} 0 +# HELP prometheus_tsdb_head_samples_appended_total Total number of appended samples. +# TYPE prometheus_tsdb_head_samples_appended_total counter +prometheus_tsdb_head_samples_appended_total{type="float"} 5.85543187e+08 +prometheus_tsdb_head_samples_appended_total{type="histogram"} 0 +# HELP prometheus_tsdb_head_series Total number of series in the head block. +# TYPE prometheus_tsdb_head_series gauge +prometheus_tsdb_head_series 10720 +# HELP prometheus_tsdb_head_series_created_total Total number of series created in the head +# TYPE prometheus_tsdb_head_series_created_total counter +prometheus_tsdb_head_series_created_total 18541 +# HELP prometheus_tsdb_head_series_not_found_total Total number of requests for series that were not found. +# TYPE prometheus_tsdb_head_series_not_found_total counter +prometheus_tsdb_head_series_not_found_total 0 +# HELP prometheus_tsdb_head_series_removed_total Total number of series removed in the head +# TYPE prometheus_tsdb_head_series_removed_total counter +prometheus_tsdb_head_series_removed_total 7821 +# HELP prometheus_tsdb_head_truncations_failed_total Total number of head truncations that failed. +# TYPE prometheus_tsdb_head_truncations_failed_total counter +prometheus_tsdb_head_truncations_failed_total 0 +# HELP prometheus_tsdb_head_truncations_total Total number of head truncations attempted. +# TYPE prometheus_tsdb_head_truncations_total counter +prometheus_tsdb_head_truncations_total 123 +# HELP prometheus_tsdb_isolation_high_watermark The highest TSDB append ID that has been given out. +# TYPE prometheus_tsdb_isolation_high_watermark gauge +prometheus_tsdb_isolation_high_watermark 7.852949e+06 +# HELP prometheus_tsdb_isolation_low_watermark The lowest TSDB append ID that is still referenced. +# TYPE prometheus_tsdb_isolation_low_watermark gauge +prometheus_tsdb_isolation_low_watermark 7.852949e+06 +# HELP prometheus_tsdb_lowest_timestamp Lowest timestamp value stored in the database. The unit is decided by the library consumer. +# TYPE prometheus_tsdb_lowest_timestamp gauge +prometheus_tsdb_lowest_timestamp 1.73618640004e+12 +# HELP prometheus_tsdb_lowest_timestamp_seconds Lowest timestamp value stored in the database. +# TYPE prometheus_tsdb_lowest_timestamp_seconds gauge +prometheus_tsdb_lowest_timestamp_seconds 1.7361864e+09 +# HELP prometheus_tsdb_mmap_chunk_corruptions_total Total number of memory-mapped chunk corruptions. +# TYPE prometheus_tsdb_mmap_chunk_corruptions_total counter +prometheus_tsdb_mmap_chunk_corruptions_total 0 +# HELP prometheus_tsdb_mmap_chunks_total Total number of chunks that were memory-mapped. +# TYPE prometheus_tsdb_mmap_chunks_total counter +prometheus_tsdb_mmap_chunks_total 4.851264e+06 +# HELP prometheus_tsdb_out_of_bound_samples_total Total number of out of bound samples ingestion failed attempts with out of order support disabled. +# TYPE prometheus_tsdb_out_of_bound_samples_total counter +prometheus_tsdb_out_of_bound_samples_total{type="float"} 0 +# HELP prometheus_tsdb_out_of_order_samples_total Total number of out of order samples ingestion failed attempts due to out of order being disabled. +# TYPE prometheus_tsdb_out_of_order_samples_total counter +prometheus_tsdb_out_of_order_samples_total{type="float"} 517 +prometheus_tsdb_out_of_order_samples_total{type="histogram"} 0 +# HELP prometheus_tsdb_reloads_failures_total Number of times the database failed to reloadBlocks block data from disk. +# TYPE prometheus_tsdb_reloads_failures_total counter +prometheus_tsdb_reloads_failures_total 0 +# HELP prometheus_tsdb_reloads_total Number of times the database reloaded block data from disk. +# TYPE prometheus_tsdb_reloads_total counter +prometheus_tsdb_reloads_total 14822 +# HELP prometheus_tsdb_retention_limit_bytes Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled +# TYPE prometheus_tsdb_retention_limit_bytes gauge +prometheus_tsdb_retention_limit_bytes 0 +# HELP prometheus_tsdb_retention_limit_seconds How long to retain samples in storage. +# TYPE prometheus_tsdb_retention_limit_seconds gauge +prometheus_tsdb_retention_limit_seconds 2.6784e+06 +# HELP prometheus_tsdb_size_retentions_total The number of times that blocks were deleted because the maximum number of bytes was exceeded. +# TYPE prometheus_tsdb_size_retentions_total counter +prometheus_tsdb_size_retentions_total 0 +# HELP prometheus_tsdb_snapshot_replay_error_total Total number snapshot replays that failed. +# TYPE prometheus_tsdb_snapshot_replay_error_total counter +prometheus_tsdb_snapshot_replay_error_total 0 +# HELP prometheus_tsdb_storage_blocks_bytes The number of bytes that are currently used for local storage by all blocks. +# TYPE prometheus_tsdb_storage_blocks_bytes gauge +prometheus_tsdb_storage_blocks_bytes 2.762863592e+09 +# HELP prometheus_tsdb_symbol_table_size_bytes Size of symbol table in memory for loaded blocks +# TYPE prometheus_tsdb_symbol_table_size_bytes gauge +prometheus_tsdb_symbol_table_size_bytes 10616 +# HELP prometheus_tsdb_time_retentions_total The number of times that blocks were deleted because the maximum time limit was exceeded. +# TYPE prometheus_tsdb_time_retentions_total counter +prometheus_tsdb_time_retentions_total 5 +# HELP prometheus_tsdb_tombstone_cleanup_seconds The time taken to recompact blocks to remove tombstones. +# TYPE prometheus_tsdb_tombstone_cleanup_seconds histogram +prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="+Inf"} 0 +prometheus_tsdb_tombstone_cleanup_seconds_sum 0 +prometheus_tsdb_tombstone_cleanup_seconds_count 0 +# HELP prometheus_tsdb_too_old_samples_total Total number of out of order samples ingestion failed attempts with out of support enabled, but sample outside of time window. +# TYPE prometheus_tsdb_too_old_samples_total counter +prometheus_tsdb_too_old_samples_total{type="float"} 0 +# HELP prometheus_tsdb_vertical_compactions_total Total number of compactions done on overlapping blocks. +# TYPE prometheus_tsdb_vertical_compactions_total counter +prometheus_tsdb_vertical_compactions_total 0 +# HELP prometheus_tsdb_wal_completed_pages_total Total number of completed pages. +# TYPE prometheus_tsdb_wal_completed_pages_total counter +prometheus_tsdb_wal_completed_pages_total 109271 +# HELP prometheus_tsdb_wal_corruptions_total Total number of WAL corruptions. +# TYPE prometheus_tsdb_wal_corruptions_total counter +prometheus_tsdb_wal_corruptions_total 0 +# HELP prometheus_tsdb_wal_fsync_duration_seconds Duration of write log fsync. +# TYPE prometheus_tsdb_wal_fsync_duration_seconds summary +prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.5"} NaN +prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.9"} NaN +prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.99"} NaN +prometheus_tsdb_wal_fsync_duration_seconds_sum 4.842524568000002 +prometheus_tsdb_wal_fsync_duration_seconds_count 123 +# HELP prometheus_tsdb_wal_page_flushes_total Total number of page flushes. +# TYPE prometheus_tsdb_wal_page_flushes_total counter +prometheus_tsdb_wal_page_flushes_total 2.293951e+06 +# HELP prometheus_tsdb_wal_segment_current Write log segment index that TSDB is currently writing to. +# TYPE prometheus_tsdb_wal_segment_current gauge +prometheus_tsdb_wal_segment_current 24726 +# HELP prometheus_tsdb_wal_storage_size_bytes Size of the write log directory. +# TYPE prometheus_tsdb_wal_storage_size_bytes gauge +prometheus_tsdb_wal_storage_size_bytes 6.9168385e+07 +# HELP prometheus_tsdb_wal_truncate_duration_seconds Duration of WAL truncation. +# TYPE prometheus_tsdb_wal_truncate_duration_seconds summary +prometheus_tsdb_wal_truncate_duration_seconds_sum 121.61954577099996 +prometheus_tsdb_wal_truncate_duration_seconds_count 62 +# HELP prometheus_tsdb_wal_truncations_failed_total Total number of write log truncations that failed. +# TYPE prometheus_tsdb_wal_truncations_failed_total counter +prometheus_tsdb_wal_truncations_failed_total 0 +# HELP prometheus_tsdb_wal_truncations_total Total number of write log truncations attempted. +# TYPE prometheus_tsdb_wal_truncations_total counter +prometheus_tsdb_wal_truncations_total 62 +# HELP prometheus_tsdb_wal_writes_failed_total Total number of write log writes that failed. +# TYPE prometheus_tsdb_wal_writes_failed_total counter +prometheus_tsdb_wal_writes_failed_total 0 +# HELP prometheus_web_federation_errors_total Total number of errors that occurred while sending federation responses. +# TYPE prometheus_web_federation_errors_total counter +prometheus_web_federation_errors_total 0 +# HELP prometheus_web_federation_warnings_total Total number of warnings that occurred while sending federation responses. +# TYPE prometheus_web_federation_warnings_total counter +prometheus_web_federation_warnings_total 0 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight 1 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{code="200"} 4.059092e+06 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 +# EOF diff --git a/model/textparse/testdata/omtestdata.txt b/model/textparse/testdata/alltypes.5mfs.om.txt similarity index 100% rename from model/textparse/testdata/omtestdata.txt rename to model/textparse/testdata/alltypes.5mfs.om.txt diff --git a/model/textparse/testdata/promtestdata.nometa.txt b/model/textparse/testdata/promtestdata.nometa.txt deleted file mode 100644 index 235f0aa464..0000000000 --- a/model/textparse/testdata/promtestdata.nometa.txt +++ /dev/null @@ -1,411 +0,0 @@ -go_gc_duration_seconds{quantile="0"} 4.9351e-05 -go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 -go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 -go_gc_duration_seconds{quantile="0.75"} 0.000106744 -go_gc_duration_seconds{quantile="1"} 0.002072195 -go_gc_duration_seconds_sum 0.012139815 -go_gc_duration_seconds_count 99 -go_goroutines 33 -go_memstats_alloc_bytes 1.7518624e+07 -go_memstats_alloc_bytes_total 8.3062296e+08 -go_memstats_buck_hash_sys_bytes 1.494637e+06 -go_memstats_frees_total 4.65658e+06 -go_memstats_gc_sys_bytes 1.107968e+06 -go_memstats_heap_alloc_bytes 1.7518624e+07 -go_memstats_heap_idle_bytes 6.668288e+06 -go_memstats_heap_inuse_bytes 1.8956288e+07 -go_memstats_heap_objects 72755 -go_memstats_heap_released_bytes_total 0 -go_memstats_heap_sys_bytes 2.5624576e+07 -go_memstats_last_gc_time_seconds 1.4843955586166437e+09 -go_memstats_lookups_total 2089 -go_memstats_mallocs_total 4.729335e+06 -go_memstats_mcache_inuse_bytes 9600 -go_memstats_mcache_sys_bytes 16384 -go_memstats_mspan_inuse_bytes 211520 -go_memstats_mspan_sys_bytes 245760 -go_memstats_next_gc_bytes 2.033527e+07 -go_memstats_other_sys_bytes 2.077323e+06 -go_memstats_stack_inuse_bytes 1.6384e+06 -go_memstats_stack_sys_bytes 1.6384e+06 -go_memstats_sys_bytes 3.2205048e+07 -http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="alerts"} 0 -http_request_duration_microseconds_count{handler="alerts"} 0 -http_request_duration_microseconds{handler="config",quantile="0.5"} NaN -http_request_duration_microseconds{handler="config",quantile="0.9"} NaN -http_request_duration_microseconds{handler="config",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="config"} 0 -http_request_duration_microseconds_count{handler="config"} 0 -http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="consoles"} 0 -http_request_duration_microseconds_count{handler="consoles"} 0 -http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="drop_series"} 0 -http_request_duration_microseconds_count{handler="drop_series"} 0 -http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="federate"} 0 -http_request_duration_microseconds_count{handler="federate"} 0 -http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="flags"} 0 -http_request_duration_microseconds_count{handler="flags"} 0 -http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 -http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 -http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 -http_request_duration_microseconds_sum{handler="graph"} 5803.93 -http_request_duration_microseconds_count{handler="graph"} 3 -http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="heap"} 0 -http_request_duration_microseconds_count{handler="heap"} 0 -http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 -http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 -http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 -http_request_duration_microseconds_sum{handler="label_values"} 3995.574 -http_request_duration_microseconds_count{handler="label_values"} 3 -http_request_duration_microseconds{handler="options",quantile="0.5"} NaN -http_request_duration_microseconds{handler="options",quantile="0.9"} NaN -http_request_duration_microseconds{handler="options",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="options"} 0 -http_request_duration_microseconds_count{handler="options"} 0 -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 -http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 -http_request_duration_microseconds_count{handler="prometheus"} 462 -http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 -http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 -http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 -http_request_duration_microseconds_sum{handler="query"} 26074.11 -http_request_duration_microseconds_count{handler="query"} 6 -http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="query_range"} 0 -http_request_duration_microseconds_count{handler="query_range"} 0 -http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="rules"} 0 -http_request_duration_microseconds_count{handler="rules"} 0 -http_request_duration_microseconds{handler="series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="series"} 0 -http_request_duration_microseconds_count{handler="series"} 0 -http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 -http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 -http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 -http_request_duration_microseconds_sum{handler="static"} 6458.621 -http_request_duration_microseconds_count{handler="static"} 3 -http_request_duration_microseconds{handler="status",quantile="0.5"} NaN -http_request_duration_microseconds{handler="status",quantile="0.9"} NaN -http_request_duration_microseconds{handler="status",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="status"} 0 -http_request_duration_microseconds_count{handler="status"} 0 -http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="targets"} 0 -http_request_duration_microseconds_count{handler="targets"} 0 -http_request_duration_microseconds{handler="version",quantile="0.5"} NaN -http_request_duration_microseconds{handler="version",quantile="0.9"} NaN -http_request_duration_microseconds{handler="version",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="version"} 0 -http_request_duration_microseconds_count{handler="version"} 0 -http_request_size_bytes{handler="alerts",quantile="0.5"} NaN -http_request_size_bytes{handler="alerts",quantile="0.9"} NaN -http_request_size_bytes{handler="alerts",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="alerts"} 0 -http_request_size_bytes_count{handler="alerts"} 0 -http_request_size_bytes{handler="config",quantile="0.5"} NaN -http_request_size_bytes{handler="config",quantile="0.9"} NaN -http_request_size_bytes{handler="config",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="config"} 0 -http_request_size_bytes_count{handler="config"} 0 -http_request_size_bytes{handler="consoles",quantile="0.5"} NaN -http_request_size_bytes{handler="consoles",quantile="0.9"} NaN -http_request_size_bytes{handler="consoles",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="consoles"} 0 -http_request_size_bytes_count{handler="consoles"} 0 -http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="drop_series"} 0 -http_request_size_bytes_count{handler="drop_series"} 0 -http_request_size_bytes{handler="federate",quantile="0.5"} NaN -http_request_size_bytes{handler="federate",quantile="0.9"} NaN -http_request_size_bytes{handler="federate",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="federate"} 0 -http_request_size_bytes_count{handler="federate"} 0 -http_request_size_bytes{handler="flags",quantile="0.5"} NaN -http_request_size_bytes{handler="flags",quantile="0.9"} NaN -http_request_size_bytes{handler="flags",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="flags"} 0 -http_request_size_bytes_count{handler="flags"} 0 -http_request_size_bytes{handler="graph",quantile="0.5"} 367 -http_request_size_bytes{handler="graph",quantile="0.9"} 389 -http_request_size_bytes{handler="graph",quantile="0.99"} 389 -http_request_size_bytes_sum{handler="graph"} 1145 -http_request_size_bytes_count{handler="graph"} 3 -http_request_size_bytes{handler="heap",quantile="0.5"} NaN -http_request_size_bytes{handler="heap",quantile="0.9"} NaN -http_request_size_bytes{handler="heap",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="heap"} 0 -http_request_size_bytes_count{handler="heap"} 0 -http_request_size_bytes{handler="label_values",quantile="0.5"} 416 -http_request_size_bytes{handler="label_values",quantile="0.9"} 416 -http_request_size_bytes{handler="label_values",quantile="0.99"} 416 -http_request_size_bytes_sum{handler="label_values"} 1248 -http_request_size_bytes_count{handler="label_values"} 3 -http_request_size_bytes{handler="options",quantile="0.5"} NaN -http_request_size_bytes{handler="options",quantile="0.9"} NaN -http_request_size_bytes{handler="options",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="options"} 0 -http_request_size_bytes_count{handler="options"} 0 -http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 -http_request_size_bytes_sum{handler="prometheus"} 109956 -http_request_size_bytes_count{handler="prometheus"} 462 -http_request_size_bytes{handler="query",quantile="0.5"} 531 -http_request_size_bytes{handler="query",quantile="0.9"} 531 -http_request_size_bytes{handler="query",quantile="0.99"} 531 -http_request_size_bytes_sum{handler="query"} 3186 -http_request_size_bytes_count{handler="query"} 6 -http_request_size_bytes{handler="query_range",quantile="0.5"} NaN -http_request_size_bytes{handler="query_range",quantile="0.9"} NaN -http_request_size_bytes{handler="query_range",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="query_range"} 0 -http_request_size_bytes_count{handler="query_range"} 0 -http_request_size_bytes{handler="rules",quantile="0.5"} NaN -http_request_size_bytes{handler="rules",quantile="0.9"} NaN -http_request_size_bytes{handler="rules",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="rules"} 0 -http_request_size_bytes_count{handler="rules"} 0 -http_request_size_bytes{handler="series",quantile="0.5"} NaN -http_request_size_bytes{handler="series",quantile="0.9"} NaN -http_request_size_bytes{handler="series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="series"} 0 -http_request_size_bytes_count{handler="series"} 0 -http_request_size_bytes{handler="static",quantile="0.5"} 379 -http_request_size_bytes{handler="static",quantile="0.9"} 379 -http_request_size_bytes{handler="static",quantile="0.99"} 379 -http_request_size_bytes_sum{handler="static"} 1137 -http_request_size_bytes_count{handler="static"} 3 -http_request_size_bytes{handler="status",quantile="0.5"} NaN -http_request_size_bytes{handler="status",quantile="0.9"} NaN -http_request_size_bytes{handler="status",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="status"} 0 -http_request_size_bytes_count{handler="status"} 0 -http_request_size_bytes{handler="targets",quantile="0.5"} NaN -http_request_size_bytes{handler="targets",quantile="0.9"} NaN -http_request_size_bytes{handler="targets",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="targets"} 0 -http_request_size_bytes_count{handler="targets"} 0 -http_request_size_bytes{handler="version",quantile="0.5"} NaN -http_request_size_bytes{handler="version",quantile="0.9"} NaN -http_request_size_bytes{handler="version",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="version"} 0 -http_request_size_bytes_count{handler="version"} 0 -http_requests_total{code="200",handler="graph",method="get"} 3 -http_requests_total{code="200",handler="label_values",method="get"} 3 -http_requests_total{code="200",handler="prometheus",method="get"} 462 -http_requests_total{code="200",handler="query",method="get"} 6 -http_requests_total{code="200",handler="static",method="get"} 3 -http_response_size_bytes{handler="alerts",quantile="0.5"} NaN -http_response_size_bytes{handler="alerts",quantile="0.9"} NaN -http_response_size_bytes{handler="alerts",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="alerts"} 0 -http_response_size_bytes_count{handler="alerts"} 0 -http_response_size_bytes{handler="config",quantile="0.5"} NaN -http_response_size_bytes{handler="config",quantile="0.9"} NaN -http_response_size_bytes{handler="config",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="config"} 0 -http_response_size_bytes_count{handler="config"} 0 -http_response_size_bytes{handler="consoles",quantile="0.5"} NaN -http_response_size_bytes{handler="consoles",quantile="0.9"} NaN -http_response_size_bytes{handler="consoles",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="consoles"} 0 -http_response_size_bytes_count{handler="consoles"} 0 -http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="drop_series"} 0 -http_response_size_bytes_count{handler="drop_series"} 0 -http_response_size_bytes{handler="federate",quantile="0.5"} NaN -http_response_size_bytes{handler="federate",quantile="0.9"} NaN -http_response_size_bytes{handler="federate",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="federate"} 0 -http_response_size_bytes_count{handler="federate"} 0 -http_response_size_bytes{handler="flags",quantile="0.5"} NaN -http_response_size_bytes{handler="flags",quantile="0.9"} NaN -http_response_size_bytes{handler="flags",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="flags"} 0 -http_response_size_bytes_count{handler="flags"} 0 -http_response_size_bytes{handler="graph",quantile="0.5"} 3619 -http_response_size_bytes{handler="graph",quantile="0.9"} 3619 -http_response_size_bytes{handler="graph",quantile="0.99"} 3619 -http_response_size_bytes_sum{handler="graph"} 10857 -http_response_size_bytes_count{handler="graph"} 3 -http_response_size_bytes{handler="heap",quantile="0.5"} NaN -http_response_size_bytes{handler="heap",quantile="0.9"} NaN -http_response_size_bytes{handler="heap",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="heap"} 0 -http_response_size_bytes_count{handler="heap"} 0 -http_response_size_bytes{handler="label_values",quantile="0.5"} 642 -http_response_size_bytes{handler="label_values",quantile="0.9"} 642 -http_response_size_bytes{handler="label_values",quantile="0.99"} 642 -http_response_size_bytes_sum{handler="label_values"} 1926 -http_response_size_bytes_count{handler="label_values"} 3 -http_response_size_bytes{handler="options",quantile="0.5"} NaN -http_response_size_bytes{handler="options",quantile="0.9"} NaN -http_response_size_bytes{handler="options",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="options"} 0 -http_response_size_bytes_count{handler="options"} 0 -http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 -http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 -http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 -http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 -http_response_size_bytes_count{handler="prometheus"} 462 -http_response_size_bytes{handler="query",quantile="0.5"} 776 -http_response_size_bytes{handler="query",quantile="0.9"} 781 -http_response_size_bytes{handler="query",quantile="0.99"} 781 -http_response_size_bytes_sum{handler="query"} 4656 -http_response_size_bytes_count{handler="query"} 6 -http_response_size_bytes{handler="query_range",quantile="0.5"} NaN -http_response_size_bytes{handler="query_range",quantile="0.9"} NaN -http_response_size_bytes{handler="query_range",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="query_range"} 0 -http_response_size_bytes_count{handler="query_range"} 0 -http_response_size_bytes{handler="rules",quantile="0.5"} NaN -http_response_size_bytes{handler="rules",quantile="0.9"} NaN -http_response_size_bytes{handler="rules",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="rules"} 0 -http_response_size_bytes_count{handler="rules"} 0 -http_response_size_bytes{handler="series",quantile="0.5"} NaN -http_response_size_bytes{handler="series",quantile="0.9"} NaN -http_response_size_bytes{handler="series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="series"} 0 -http_response_size_bytes_count{handler="series"} 0 -http_response_size_bytes{handler="static",quantile="0.5"} 6316 -http_response_size_bytes{handler="static",quantile="0.9"} 6316 -http_response_size_bytes{handler="static",quantile="0.99"} 6316 -http_response_size_bytes_sum{handler="static"} 18948 -http_response_size_bytes_count{handler="static"} 3 -http_response_size_bytes{handler="status",quantile="0.5"} NaN -http_response_size_bytes{handler="status",quantile="0.9"} NaN -http_response_size_bytes{handler="status",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="status"} 0 -http_response_size_bytes_count{handler="status"} 0 -http_response_size_bytes{handler="targets",quantile="0.5"} NaN -http_response_size_bytes{handler="targets",quantile="0.9"} NaN -http_response_size_bytes{handler="targets",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="targets"} 0 -http_response_size_bytes_count{handler="targets"} 0 -http_response_size_bytes{handler="version",quantile="0.5"} NaN -http_response_size_bytes{handler="version",quantile="0.9"} NaN -http_response_size_bytes{handler="version",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="version"} 0 -http_response_size_bytes_count{handler="version"} 0 -prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 -prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 -prometheus_config_last_reload_successful 1 -prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_count 1 -prometheus_evaluator_iterations_skipped_total 0 -prometheus_notifications_dropped_total 0 -prometheus_notifications_queue_capacity 10000 -prometheus_notifications_queue_length 0 -prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 -prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 -prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_azure_refresh_duration_seconds_sum 0 -prometheus_sd_azure_refresh_duration_seconds_count 0 -prometheus_sd_azure_refresh_failures_total 0 -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_failures_total 0 -prometheus_sd_dns_lookup_failures_total 0 -prometheus_sd_dns_lookups_total 0 -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_ec2_refresh_duration_seconds_sum 0 -prometheus_sd_ec2_refresh_duration_seconds_count 0 -prometheus_sd_ec2_refresh_failures_total 0 -prometheus_sd_file_read_errors_total 0 -prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN -prometheus_sd_file_scan_duration_seconds_sum 0 -prometheus_sd_file_scan_duration_seconds_count 0 -prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN -prometheus_sd_gce_refresh_duration_sum 0 -prometheus_sd_gce_refresh_duration_count 0 -prometheus_sd_gce_refresh_failures_total 0 -prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_marathon_refresh_duration_seconds_sum 0 -prometheus_sd_marathon_refresh_duration_seconds_count 0 -prometheus_sd_marathon_refresh_failures_total 0 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 -prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 -prometheus_target_interval_length_seconds_count{interval="50ms"} 685 -prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 -prometheus_target_skipped_scrapes_total 0 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 -prometheus_treecache_watcher_goroutines 0 -prometheus_treecache_zookeeper_failures_total 0 -# EOF diff --git a/model/textparse/testdata/promtestdata.txt b/model/textparse/testdata/promtestdata.txt deleted file mode 100644 index 174f383e91..0000000000 --- a/model/textparse/testdata/promtestdata.txt +++ /dev/null @@ -1,529 +0,0 @@ -# HELP go_gc_duration_seconds A summary of the GC invocation durations. -# TYPE go_gc_duration_seconds summary -go_gc_duration_seconds{quantile="0"} 4.9351e-05 -go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 -go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 -go_gc_duration_seconds{quantile="0.75"} 0.000106744 -go_gc_duration_seconds{quantile="1"} 0.002072195 -go_gc_duration_seconds_sum 0.012139815 -go_gc_duration_seconds_count 99 -# HELP go_goroutines Number of goroutines that currently exist. -# TYPE go_goroutines gauge -go_goroutines 33 -# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. -# TYPE go_memstats_alloc_bytes gauge -go_memstats_alloc_bytes 1.7518624e+07 -# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. -# TYPE go_memstats_alloc_bytes_total counter -go_memstats_alloc_bytes_total 8.3062296e+08 -# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. -# TYPE go_memstats_buck_hash_sys_bytes gauge -go_memstats_buck_hash_sys_bytes 1.494637e+06 -# HELP go_memstats_frees_total Total number of frees. -# TYPE go_memstats_frees_total counter -go_memstats_frees_total 4.65658e+06 -# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. -# TYPE go_memstats_gc_sys_bytes gauge -go_memstats_gc_sys_bytes 1.107968e+06 -# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. -# TYPE go_memstats_heap_alloc_bytes gauge -go_memstats_heap_alloc_bytes 1.7518624e+07 -# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. -# TYPE go_memstats_heap_idle_bytes gauge -go_memstats_heap_idle_bytes 6.668288e+06 -# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. -# TYPE go_memstats_heap_inuse_bytes gauge -go_memstats_heap_inuse_bytes 1.8956288e+07 -# HELP go_memstats_heap_objects Number of allocated objects. -# TYPE go_memstats_heap_objects gauge -go_memstats_heap_objects 72755 -# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS. -# TYPE go_memstats_heap_released_bytes_total counter -go_memstats_heap_released_bytes_total 0 -# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. -# TYPE go_memstats_heap_sys_bytes gauge -go_memstats_heap_sys_bytes 2.5624576e+07 -# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. -# TYPE go_memstats_last_gc_time_seconds gauge -go_memstats_last_gc_time_seconds 1.4843955586166437e+09 -# HELP go_memstats_lookups_total Total number of pointer lookups. -# TYPE go_memstats_lookups_total counter -go_memstats_lookups_total 2089 -# HELP go_memstats_mallocs_total Total number of mallocs. -# TYPE go_memstats_mallocs_total counter -go_memstats_mallocs_total 4.729335e+06 -# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. -# TYPE go_memstats_mcache_inuse_bytes gauge -go_memstats_mcache_inuse_bytes 9600 -# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. -# TYPE go_memstats_mcache_sys_bytes gauge -go_memstats_mcache_sys_bytes 16384 -# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. -# TYPE go_memstats_mspan_inuse_bytes gauge -go_memstats_mspan_inuse_bytes 211520 -# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. -# TYPE go_memstats_mspan_sys_bytes gauge -go_memstats_mspan_sys_bytes 245760 -# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. -# TYPE go_memstats_next_gc_bytes gauge -go_memstats_next_gc_bytes 2.033527e+07 -# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. -# TYPE go_memstats_other_sys_bytes gauge -go_memstats_other_sys_bytes 2.077323e+06 -# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. -# TYPE go_memstats_stack_inuse_bytes gauge -go_memstats_stack_inuse_bytes 1.6384e+06 -# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. -# TYPE go_memstats_stack_sys_bytes gauge -go_memstats_stack_sys_bytes 1.6384e+06 -# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations. -# TYPE go_memstats_sys_bytes gauge -go_memstats_sys_bytes 3.2205048e+07 -# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. -# TYPE http_request_duration_microseconds summary -http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="alerts"} 0 -http_request_duration_microseconds_count{handler="alerts"} 0 -http_request_duration_microseconds{handler="config",quantile="0.5"} NaN -http_request_duration_microseconds{handler="config",quantile="0.9"} NaN -http_request_duration_microseconds{handler="config",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="config"} 0 -http_request_duration_microseconds_count{handler="config"} 0 -http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="consoles"} 0 -http_request_duration_microseconds_count{handler="consoles"} 0 -http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="drop_series"} 0 -http_request_duration_microseconds_count{handler="drop_series"} 0 -http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="federate"} 0 -http_request_duration_microseconds_count{handler="federate"} 0 -http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="flags"} 0 -http_request_duration_microseconds_count{handler="flags"} 0 -http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 -http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 -http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 -http_request_duration_microseconds_sum{handler="graph"} 5803.93 -http_request_duration_microseconds_count{handler="graph"} 3 -http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="heap"} 0 -http_request_duration_microseconds_count{handler="heap"} 0 -http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 -http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 -http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 -http_request_duration_microseconds_sum{handler="label_values"} 3995.574 -http_request_duration_microseconds_count{handler="label_values"} 3 -http_request_duration_microseconds{handler="options",quantile="0.5"} NaN -http_request_duration_microseconds{handler="options",quantile="0.9"} NaN -http_request_duration_microseconds{handler="options",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="options"} 0 -http_request_duration_microseconds_count{handler="options"} 0 -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 -http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 -http_request_duration_microseconds_count{handler="prometheus"} 462 -http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 -http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 -http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 -http_request_duration_microseconds_sum{handler="query"} 26074.11 -http_request_duration_microseconds_count{handler="query"} 6 -http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="query_range"} 0 -http_request_duration_microseconds_count{handler="query_range"} 0 -http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="rules"} 0 -http_request_duration_microseconds_count{handler="rules"} 0 -http_request_duration_microseconds{handler="series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="series"} 0 -http_request_duration_microseconds_count{handler="series"} 0 -http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 -http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 -http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 -http_request_duration_microseconds_sum{handler="static"} 6458.621 -http_request_duration_microseconds_count{handler="static"} 3 -http_request_duration_microseconds{handler="status",quantile="0.5"} NaN -http_request_duration_microseconds{handler="status",quantile="0.9"} NaN -http_request_duration_microseconds{handler="status",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="status"} 0 -http_request_duration_microseconds_count{handler="status"} 0 -http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="targets"} 0 -http_request_duration_microseconds_count{handler="targets"} 0 -http_request_duration_microseconds{handler="version",quantile="0.5"} NaN -http_request_duration_microseconds{handler="version",quantile="0.9"} NaN -http_request_duration_microseconds{handler="version",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="version"} 0 -http_request_duration_microseconds_count{handler="version"} 0 -# HELP http_request_size_bytes The HTTP request sizes in bytes. -# TYPE http_request_size_bytes summary -http_request_size_bytes{handler="alerts",quantile="0.5"} NaN -http_request_size_bytes{handler="alerts",quantile="0.9"} NaN -http_request_size_bytes{handler="alerts",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="alerts"} 0 -http_request_size_bytes_count{handler="alerts"} 0 -http_request_size_bytes{handler="config",quantile="0.5"} NaN -http_request_size_bytes{handler="config",quantile="0.9"} NaN -http_request_size_bytes{handler="config",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="config"} 0 -http_request_size_bytes_count{handler="config"} 0 -http_request_size_bytes{handler="consoles",quantile="0.5"} NaN -http_request_size_bytes{handler="consoles",quantile="0.9"} NaN -http_request_size_bytes{handler="consoles",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="consoles"} 0 -http_request_size_bytes_count{handler="consoles"} 0 -http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="drop_series"} 0 -http_request_size_bytes_count{handler="drop_series"} 0 -http_request_size_bytes{handler="federate",quantile="0.5"} NaN -http_request_size_bytes{handler="federate",quantile="0.9"} NaN -http_request_size_bytes{handler="federate",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="federate"} 0 -http_request_size_bytes_count{handler="federate"} 0 -http_request_size_bytes{handler="flags",quantile="0.5"} NaN -http_request_size_bytes{handler="flags",quantile="0.9"} NaN -http_request_size_bytes{handler="flags",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="flags"} 0 -http_request_size_bytes_count{handler="flags"} 0 -http_request_size_bytes{handler="graph",quantile="0.5"} 367 -http_request_size_bytes{handler="graph",quantile="0.9"} 389 -http_request_size_bytes{handler="graph",quantile="0.99"} 389 -http_request_size_bytes_sum{handler="graph"} 1145 -http_request_size_bytes_count{handler="graph"} 3 -http_request_size_bytes{handler="heap",quantile="0.5"} NaN -http_request_size_bytes{handler="heap",quantile="0.9"} NaN -http_request_size_bytes{handler="heap",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="heap"} 0 -http_request_size_bytes_count{handler="heap"} 0 -http_request_size_bytes{handler="label_values",quantile="0.5"} 416 -http_request_size_bytes{handler="label_values",quantile="0.9"} 416 -http_request_size_bytes{handler="label_values",quantile="0.99"} 416 -http_request_size_bytes_sum{handler="label_values"} 1248 -http_request_size_bytes_count{handler="label_values"} 3 -http_request_size_bytes{handler="options",quantile="0.5"} NaN -http_request_size_bytes{handler="options",quantile="0.9"} NaN -http_request_size_bytes{handler="options",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="options"} 0 -http_request_size_bytes_count{handler="options"} 0 -http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 -http_request_size_bytes_sum{handler="prometheus"} 109956 -http_request_size_bytes_count{handler="prometheus"} 462 -http_request_size_bytes{handler="query",quantile="0.5"} 531 -http_request_size_bytes{handler="query",quantile="0.9"} 531 -http_request_size_bytes{handler="query",quantile="0.99"} 531 -http_request_size_bytes_sum{handler="query"} 3186 -http_request_size_bytes_count{handler="query"} 6 -http_request_size_bytes{handler="query_range",quantile="0.5"} NaN -http_request_size_bytes{handler="query_range",quantile="0.9"} NaN -http_request_size_bytes{handler="query_range",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="query_range"} 0 -http_request_size_bytes_count{handler="query_range"} 0 -http_request_size_bytes{handler="rules",quantile="0.5"} NaN -http_request_size_bytes{handler="rules",quantile="0.9"} NaN -http_request_size_bytes{handler="rules",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="rules"} 0 -http_request_size_bytes_count{handler="rules"} 0 -http_request_size_bytes{handler="series",quantile="0.5"} NaN -http_request_size_bytes{handler="series",quantile="0.9"} NaN -http_request_size_bytes{handler="series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="series"} 0 -http_request_size_bytes_count{handler="series"} 0 -http_request_size_bytes{handler="static",quantile="0.5"} 379 -http_request_size_bytes{handler="static",quantile="0.9"} 379 -http_request_size_bytes{handler="static",quantile="0.99"} 379 -http_request_size_bytes_sum{handler="static"} 1137 -http_request_size_bytes_count{handler="static"} 3 -http_request_size_bytes{handler="status",quantile="0.5"} NaN -http_request_size_bytes{handler="status",quantile="0.9"} NaN -http_request_size_bytes{handler="status",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="status"} 0 -http_request_size_bytes_count{handler="status"} 0 -http_request_size_bytes{handler="targets",quantile="0.5"} NaN -http_request_size_bytes{handler="targets",quantile="0.9"} NaN -http_request_size_bytes{handler="targets",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="targets"} 0 -http_request_size_bytes_count{handler="targets"} 0 -http_request_size_bytes{handler="version",quantile="0.5"} NaN -http_request_size_bytes{handler="version",quantile="0.9"} NaN -http_request_size_bytes{handler="version",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="version"} 0 -http_request_size_bytes_count{handler="version"} 0 -# HELP http_requests_total Total number of HTTP requests made. -# TYPE http_requests_total counter -http_requests_total{code="200",handler="graph",method="get"} 3 -http_requests_total{code="200",handler="label_values",method="get"} 3 -http_requests_total{code="200",handler="prometheus",method="get"} 462 -http_requests_total{code="200",handler="query",method="get"} 6 -http_requests_total{code="200",handler="static",method="get"} 3 -# HELP http_response_size_bytes The HTTP response sizes in bytes. -# TYPE http_response_size_bytes summary -http_response_size_bytes{handler="alerts",quantile="0.5"} NaN -http_response_size_bytes{handler="alerts",quantile="0.9"} NaN -http_response_size_bytes{handler="alerts",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="alerts"} 0 -http_response_size_bytes_count{handler="alerts"} 0 -http_response_size_bytes{handler="config",quantile="0.5"} NaN -http_response_size_bytes{handler="config",quantile="0.9"} NaN -http_response_size_bytes{handler="config",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="config"} 0 -http_response_size_bytes_count{handler="config"} 0 -http_response_size_bytes{handler="consoles",quantile="0.5"} NaN -http_response_size_bytes{handler="consoles",quantile="0.9"} NaN -http_response_size_bytes{handler="consoles",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="consoles"} 0 -http_response_size_bytes_count{handler="consoles"} 0 -http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="drop_series"} 0 -http_response_size_bytes_count{handler="drop_series"} 0 -http_response_size_bytes{handler="federate",quantile="0.5"} NaN -http_response_size_bytes{handler="federate",quantile="0.9"} NaN -http_response_size_bytes{handler="federate",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="federate"} 0 -http_response_size_bytes_count{handler="federate"} 0 -http_response_size_bytes{handler="flags",quantile="0.5"} NaN -http_response_size_bytes{handler="flags",quantile="0.9"} NaN -http_response_size_bytes{handler="flags",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="flags"} 0 -http_response_size_bytes_count{handler="flags"} 0 -http_response_size_bytes{handler="graph",quantile="0.5"} 3619 -http_response_size_bytes{handler="graph",quantile="0.9"} 3619 -http_response_size_bytes{handler="graph",quantile="0.99"} 3619 -http_response_size_bytes_sum{handler="graph"} 10857 -http_response_size_bytes_count{handler="graph"} 3 -http_response_size_bytes{handler="heap",quantile="0.5"} NaN -http_response_size_bytes{handler="heap",quantile="0.9"} NaN -http_response_size_bytes{handler="heap",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="heap"} 0 -http_response_size_bytes_count{handler="heap"} 0 -http_response_size_bytes{handler="label_values",quantile="0.5"} 642 -http_response_size_bytes{handler="label_values",quantile="0.9"} 642 -http_response_size_bytes{handler="label_values",quantile="0.99"} 642 -http_response_size_bytes_sum{handler="label_values"} 1926 -http_response_size_bytes_count{handler="label_values"} 3 -http_response_size_bytes{handler="options",quantile="0.5"} NaN -http_response_size_bytes{handler="options",quantile="0.9"} NaN -http_response_size_bytes{handler="options",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="options"} 0 -http_response_size_bytes_count{handler="options"} 0 -http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 -http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 -http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 -http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 -http_response_size_bytes_count{handler="prometheus"} 462 -http_response_size_bytes{handler="query",quantile="0.5"} 776 -http_response_size_bytes{handler="query",quantile="0.9"} 781 -http_response_size_bytes{handler="query",quantile="0.99"} 781 -http_response_size_bytes_sum{handler="query"} 4656 -http_response_size_bytes_count{handler="query"} 6 -http_response_size_bytes{handler="query_range",quantile="0.5"} NaN -http_response_size_bytes{handler="query_range",quantile="0.9"} NaN -http_response_size_bytes{handler="query_range",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="query_range"} 0 -http_response_size_bytes_count{handler="query_range"} 0 -http_response_size_bytes{handler="rules",quantile="0.5"} NaN -http_response_size_bytes{handler="rules",quantile="0.9"} NaN -http_response_size_bytes{handler="rules",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="rules"} 0 -http_response_size_bytes_count{handler="rules"} 0 -http_response_size_bytes{handler="series",quantile="0.5"} NaN -http_response_size_bytes{handler="series",quantile="0.9"} NaN -http_response_size_bytes{handler="series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="series"} 0 -http_response_size_bytes_count{handler="series"} 0 -http_response_size_bytes{handler="static",quantile="0.5"} 6316 -http_response_size_bytes{handler="static",quantile="0.9"} 6316 -http_response_size_bytes{handler="static",quantile="0.99"} 6316 -http_response_size_bytes_sum{handler="static"} 18948 -http_response_size_bytes_count{handler="static"} 3 -http_response_size_bytes{handler="status",quantile="0.5"} NaN -http_response_size_bytes{handler="status",quantile="0.9"} NaN -http_response_size_bytes{handler="status",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="status"} 0 -http_response_size_bytes_count{handler="status"} 0 -http_response_size_bytes{handler="targets",quantile="0.5"} NaN -http_response_size_bytes{handler="targets",quantile="0.9"} NaN -http_response_size_bytes{handler="targets",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="targets"} 0 -http_response_size_bytes_count{handler="targets"} 0 -http_response_size_bytes{handler="version",quantile="0.5"} NaN -http_response_size_bytes{handler="version",quantile="0.9"} NaN -http_response_size_bytes{handler="version",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="version"} 0 -http_response_size_bytes_count{handler="version"} 0 -# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built. -# TYPE prometheus_build_info gauge -prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 -# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. -# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge -prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 -# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful. -# TYPE prometheus_config_last_reload_successful gauge -prometheus_config_last_reload_successful 1 -# HELP prometheus_evaluator_duration_seconds The duration of rule group evaluations. -# TYPE prometheus_evaluator_duration_seconds summary -prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_count 1 -# HELP prometheus_evaluator_iterations_skipped_total The total number of rule group evaluations skipped due to throttled metric storage. -# TYPE prometheus_evaluator_iterations_skipped_total counter -prometheus_evaluator_iterations_skipped_total 0 -# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to alert manager missing in configuration. -# TYPE prometheus_notifications_dropped_total counter -prometheus_notifications_dropped_total 0 -# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. -# TYPE prometheus_notifications_queue_capacity gauge -prometheus_notifications_queue_capacity 10000 -# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. -# TYPE prometheus_notifications_queue_length gauge -prometheus_notifications_queue_length 0 -# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. -# TYPE prometheus_rule_evaluation_failures_total counter -prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 -prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 -# HELP prometheus_sd_azure_refresh_duration_seconds The duration of a Azure-SD refresh in seconds. -# TYPE prometheus_sd_azure_refresh_duration_seconds summary -prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_azure_refresh_duration_seconds_sum 0 -prometheus_sd_azure_refresh_duration_seconds_count 0 -# HELP prometheus_sd_azure_refresh_failures_total Number of Azure-SD refresh failures. -# TYPE prometheus_sd_azure_refresh_failures_total counter -prometheus_sd_azure_refresh_failures_total 0 -# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds. -# TYPE prometheus_sd_consul_rpc_duration_seconds summary -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 -# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures. -# TYPE prometheus_sd_consul_rpc_failures_total counter -prometheus_sd_consul_rpc_failures_total 0 -# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures. -# TYPE prometheus_sd_dns_lookup_failures_total counter -prometheus_sd_dns_lookup_failures_total 0 -# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups. -# TYPE prometheus_sd_dns_lookups_total counter -prometheus_sd_dns_lookups_total 0 -# HELP prometheus_sd_ec2_refresh_duration_seconds The duration of a EC2-SD refresh in seconds. -# TYPE prometheus_sd_ec2_refresh_duration_seconds summary -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_ec2_refresh_duration_seconds_sum 0 -prometheus_sd_ec2_refresh_duration_seconds_count 0 -# HELP prometheus_sd_ec2_refresh_failures_total The number of EC2-SD scrape failures. -# TYPE prometheus_sd_ec2_refresh_failures_total counter -prometheus_sd_ec2_refresh_failures_total 0 -# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors. -# TYPE prometheus_sd_file_read_errors_total counter -prometheus_sd_file_read_errors_total 0 -# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds. -# TYPE prometheus_sd_file_scan_duration_seconds summary -prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN -prometheus_sd_file_scan_duration_seconds_sum 0 -prometheus_sd_file_scan_duration_seconds_count 0 -# HELP prometheus_sd_gce_refresh_duration The duration of a GCE-SD refresh in seconds. -# TYPE prometheus_sd_gce_refresh_duration summary -prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN -prometheus_sd_gce_refresh_duration_sum 0 -prometheus_sd_gce_refresh_duration_count 0 -# HELP prometheus_sd_gce_refresh_failures_total The number of GCE-SD refresh failures. -# TYPE prometheus_sd_gce_refresh_failures_total counter -prometheus_sd_gce_refresh_failures_total 0 -# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled. -# TYPE prometheus_sd_kubernetes_events_total counter -prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 -# HELP prometheus_sd_marathon_refresh_duration_seconds The duration of a Marathon-SD refresh in seconds. -# TYPE prometheus_sd_marathon_refresh_duration_seconds summary -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_marathon_refresh_duration_seconds_sum 0 -prometheus_sd_marathon_refresh_duration_seconds_count 0 -# HELP prometheus_sd_marathon_refresh_failures_total The number of Marathon-SD refresh failures. -# TYPE prometheus_sd_marathon_refresh_failures_total counter -prometheus_sd_marathon_refresh_failures_total 0 -# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. -# TYPE prometheus_target_interval_length_seconds summary -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 -prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 -prometheus_target_interval_length_seconds_count{interval="50ms"} 685 -# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool. -# TYPE prometheus_target_scrape_pool_sync_total counter -prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 -# HELP prometheus_target_skipped_scrapes_total Total number of scrapes that were skipped because the metric storage was throttled. -# TYPE prometheus_target_skipped_scrapes_total counter -prometheus_target_skipped_scrapes_total 0 -# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool. -# TYPE prometheus_target_sync_length_seconds summary -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 -# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines. -# TYPE prometheus_treecache_watcher_goroutines gauge -prometheus_treecache_watcher_goroutines 0 -# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. -# TYPE prometheus_treecache_zookeeper_failures_total counter -prometheus_treecache_zookeeper_failures_total 0 -# EOF diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 02bd01e0d3..2185d028fb 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1391,7 +1391,7 @@ func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) { func readTextParseTestMetrics(t testing.TB) []byte { t.Helper() - b, err := os.ReadFile("../model/textparse/testdata/promtestdata.txt") + b, err := os.ReadFile("../model/textparse/testdata/alltypes.237mfs.prom.txt") if err != nil { t.Fatal(err) } @@ -1459,15 +1459,24 @@ func TestPromTextToProto(t *testing.T) { } got = append(got, mf.GetName()) } - require.Len(t, got, 59) + require.Len(t, got, 237) // Check a few to see if those are not dups. - require.Equal(t, "go_gc_duration_seconds", got[0]) - require.Equal(t, "prometheus_evaluator_duration_seconds", got[32]) - require.Equal(t, "prometheus_treecache_zookeeper_failures_total", got[58]) + require.Equal(t, "go_gc_cycles_automatic_gc_cycles_total", got[0]) + require.Equal(t, "prometheus_sd_kuma_fetch_duration_seconds", got[128]) + require.Equal(t, "promhttp_metric_handler_requests_total", got[236]) } +// BenchmarkScrapeLoopAppend benchmarks a core append function in a scrapeLoop +// that creates a new parser and goes through a byte slice from a single scrape. +// Benchmark compares append function run across 2 dimensions: +// *`data`: different sizes of metrics scraped e.g. one big gauge metric family +// with a thousand series and more realistic scenario with common types. +// *`fmt`: different scrape formats which will benchmark different parsers e.g. +// promtext, omtext and promproto. +// +// Recommended CLI invocation: /* - export bench=append-v1 && go test \ + export bench=append-v1 && go test ./scrape/... \ -run '^$' -bench '^BenchmarkScrapeLoopAppend' \ -benchtime 5s -count 6 -cpu 2 -timeout 999m \ | tee ${bench}.txt @@ -1477,8 +1486,8 @@ func BenchmarkScrapeLoopAppend(b *testing.B) { name string parsableText []byte }{ - {name: "1Fam1000Gauges", parsableText: makeTestGauges(1000)}, // ~33.8 KB, ~38.8 KB in proto - {name: "59FamsAllTypes", parsableText: readTextParseTestMetrics(b)}, // ~33.3 KB, ~13.2 KB in proto. + {name: "1Fam1000Gauges", parsableText: makeTestGauges(2000)}, // ~68.1 KB, ~77.9 KB in proto. + {name: "237FamsAllTypes", parsableText: readTextParseTestMetrics(b)}, // ~185.7 KB, ~70.6 KB in proto. } { b.Run(fmt.Sprintf("data=%v", data.name), func(b *testing.B) { metricsProto := promTextToProto(b, data.parsableText) From b472ce7010de872747af2b57188c2d32c7e1007b Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Mon, 10 Feb 2025 21:20:48 +0100 Subject: [PATCH 1357/1397] chore: enable early-return from revive Signed-off-by: Matthieu MOREL --- .golangci.yml | 7 +++++++ discovery/azure/azure.go | 5 ++--- discovery/linode/linode.go | 9 ++++---- discovery/manager.go | 6 +++--- discovery/moby/docker.go | 21 +++++++++---------- .../remote_storage_adapter/influxdb/client.go | 6 +++--- promql/engine.go | 6 +++--- storage/remote/azuread/azuread.go | 5 ++--- tsdb/agent/db.go | 10 ++++----- tsdb/isolation.go | 7 +++---- 10 files changed, 41 insertions(+), 41 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index f294a4d76e..56509fe13f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -126,6 +126,9 @@ linters-settings: - allowTypesBefore: "*testing.T,testing.TB" - name: context-keys-type - name: dot-imports + - name: early-return + arguments: + - "preserveScope" # A lot of false positives: incorrectly identifies channel draining as "empty code block". # See https://github.com/mgechev/revive/issues/386 - name: empty-block @@ -137,6 +140,8 @@ linters-settings: - name: exported - name: increment-decrement - name: indent-error-flow + arguments: + - "preserveScope" - name: package-comments # TODO(beorn7): Currently, we have a lot of missing package doc comments. Maybe we should have them. disabled: true @@ -144,6 +149,8 @@ linters-settings: - name: receiver-naming - name: redefines-builtin-id - name: superfluous-else + arguments: + - "preserveScope" - name: time-naming - name: unexported-return - name: unreachable-code diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index 862d86859b..8a6945790f 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -458,11 +458,10 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID) } if err != nil { - if errors.Is(err, errorNotFound) { - d.logger.Warn("Network interface does not exist", "name", nicID, "err", err) - } else { + if !errors.Is(err, errorNotFound) { return nil, err } + d.logger.Warn("Network interface does not exist", "name", nicID, "err", err) // Get out of this routine because we cannot continue without a network interface. return nil, nil } diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 8e6fd8bf00..453901bc05 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -194,13 +194,12 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { events, err := d.client.ListEvents(ctx, &eventsOpts) if err != nil { var e *linodego.Error - if errors.As(err, &e) && e.Code == http.StatusUnauthorized { - // If we get a 401, the token doesn't have `events:read_only` scope. - // Disable event polling and fallback to doing a full refresh every interval. - d.eventPollingEnabled = false - } else { + if !(errors.As(err, &e) && e.Code == http.StatusUnauthorized) { return nil, err } + // If we get a 401, the token doesn't have `events:read_only` scope. + // Disable event polling and fallback to doing a full refresh every interval. + d.eventPollingEnabled = false } else { // Event polling tells us changes the Linode API is aware of. Actions issued outside of the Linode API, // such as issuing a `shutdown` at the VM's console instead of using the API to power off an instance, diff --git a/discovery/manager.go b/discovery/manager.go index 87e0ecc44b..3219117d2a 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -101,12 +101,12 @@ func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus. // Register the metrics. // We have to do this after setting all options, so that the name of the Manager is set. - if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil { - mgr.metrics = metrics - } else { + metrics, err := NewManagerMetrics(registerer, mgr.name) + if err != nil { logger.Error("Failed to create discovery manager metrics", "manager", mgr.name, "err", err) return nil } + mgr.metrics = metrics return mgr } diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index de277a58db..fca97ccd47 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -237,17 +237,16 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er if len(networks) == 0 { // Try to lookup shared networks for { - if containerNetworkMode.IsContainer() { - tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()] - if !exists { - break - } - networks = tmpContainer.NetworkSettings.Networks - containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode) - if len(networks) > 0 { - break - } - } else { + if !containerNetworkMode.IsContainer() { + break + } + tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()] + if !exists { + break + } + networks = tmpContainer.NetworkSettings.Networks + containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode) + if len(networks) > 0 { break } } diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go index 12dc1732a5..a91b27360e 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go @@ -288,11 +288,11 @@ func valuesToSamples(timestamp time.Time, value interface{}) (prompb.Sample, err var valueInt64 int64 var ok bool if valueFloat64, ok = value.(float64); !ok { - if valueInt64, ok = value.(int64); ok { - valueFloat64 = float64(valueInt64) - } else { + valueInt64, ok = value.(int64) + if !ok { return prompb.Sample{}, fmt.Errorf("unable to convert sample value to float64: %v", value) } + valueFloat64 = float64(valueInt64) } return prompb.Sample{ diff --git a/promql/engine.go b/promql/engine.go index 3e14a2529d..b4a4899bec 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -3553,11 +3553,11 @@ func formatDate(t time.Time) string { // unwrapParenExpr does the AST equivalent of removing parentheses around a expression. func unwrapParenExpr(e *parser.Expr) { for { - if p, ok := (*e).(*parser.ParenExpr); ok { - *e = p.Expr - } else { + p, ok := (*e).(*parser.ParenExpr) + if !ok { break } + *e = p.Expr } } diff --git a/storage/remote/azuread/azuread.go b/storage/remote/azuread/azuread.go index 20ec53d6f6..aa3dee6b45 100644 --- a/storage/remote/azuread/azuread.go +++ b/storage/remote/azuread/azuread.go @@ -349,11 +349,10 @@ func (tokenProvider *tokenProvider) getToken(ctx context.Context) error { func (tokenProvider *tokenProvider) updateRefreshTime(accessToken azcore.AccessToken) error { tokenExpiryTimestamp := accessToken.ExpiresOn.UTC() deltaExpirytime := time.Now().Add(time.Until(tokenExpiryTimestamp) / 2) - if deltaExpirytime.After(time.Now().UTC()) { - tokenProvider.refreshTime = deltaExpirytime - } else { + if !deltaExpirytime.After(time.Now().UTC()) { return errors.New("access token expiry is less than the current time") } + tokenProvider.refreshTime = deltaExpirytime return nil } diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 0bcef8e7bc..1f876f4e1e 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -1026,12 +1026,11 @@ func (a *appender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.L return 0, storage.ErrOutOfOrderCT } - if ct > series.lastTs { - series.lastTs = ct - } else { + if ct <= series.lastTs { // discard the sample if it's out of order. return 0, storage.ErrOutOfOrderCT } + series.lastTs = ct switch { case h != nil: @@ -1091,12 +1090,11 @@ func (a *appender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, return 0, storage.ErrOutOfOrderSample } - if ct > series.lastTs { - series.lastTs = ct - } else { + if ct <= series.lastTs { // discard the sample if it's out of order. return 0, storage.ErrOutOfOrderCT } + series.lastTs = ct // NOTE: always modify pendingSamples and sampleSeries together. a.pendingSamples = append(a.pendingSamples, record.RefSample{ diff --git a/tsdb/isolation.go b/tsdb/isolation.go index 86330f36e4..1035991e74 100644 --- a/tsdb/isolation.go +++ b/tsdb/isolation.go @@ -275,12 +275,11 @@ func (txr *txRing) cleanupAppendIDsBelow(bound uint64) { pos := int(txr.txIDFirst) for txr.txIDCount > 0 { - if txr.txIDs[pos] < bound { - txr.txIDFirst++ - txr.txIDCount-- - } else { + if txr.txIDs[pos] >= bound { break } + txr.txIDFirst++ + txr.txIDCount-- pos++ if pos == len(txr.txIDs) { From d6443244077a06e1e0f81cc5558c0abf2ce2bdce Mon Sep 17 00:00:00 2001 From: machine424 Date: Thu, 2 Jan 2025 17:59:04 +0100 Subject: [PATCH 1358/1397] feat(tsdb/(head|agent)): reuse pools across segments to avoid generating garbage during WL replay This is part of the "reduce WAL replay overhead/garbage" effort to help with https://github.com/prometheus/prometheus/issues/6934. Signed-off-by: machine424 --- tsdb/agent/db.go | 27 +++++++++++----------- tsdb/head.go | 10 ++++++++ tsdb/head_test.go | 35 ++++++++++++++++++++-------- tsdb/head_wal.go | 59 ++++++++++++++++++----------------------------- 4 files changed, 72 insertions(+), 59 deletions(-) diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 0bcef8e7bc..32494a4d62 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -235,6 +235,12 @@ type DB struct { appenderPool sync.Pool bufPool sync.Pool + // These pools are used during WAL replay. + walReplaySeriesPool zeropool.Pool[[]record.RefSeries] + walReplaySamplesPool zeropool.Pool[[]record.RefSample] + walReplayHistogramsPool zeropool.Pool[[]record.RefHistogramSample] + walReplayFloatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] + nextRef *atomic.Uint64 series *stripeSeries // deleted is a map of (ref IDs that should be deleted from WAL) to (the WAL segment they @@ -426,11 +432,6 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H decoded = make(chan interface{}, 10) errCh = make(chan error, 1) - - seriesPool zeropool.Pool[[]record.RefSeries] - samplesPool zeropool.Pool[[]record.RefSample] - histogramsPool zeropool.Pool[[]record.RefHistogramSample] - floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] ) go func() { @@ -440,7 +441,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H rec := r.Record() switch dec.Type(rec) { case record.Series: - series := seriesPool.Get()[:0] + series := db.walReplaySeriesPool.Get()[:0] series, err = dec.Series(rec, series) if err != nil { errCh <- &wlog.CorruptionErr{ @@ -452,7 +453,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H } decoded <- series case record.Samples: - samples := samplesPool.Get()[:0] + samples := db.walReplaySamplesPool.Get()[:0] samples, err = dec.Samples(rec, samples) if err != nil { errCh <- &wlog.CorruptionErr{ @@ -464,7 +465,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H } decoded <- samples case record.HistogramSamples, record.CustomBucketsHistogramSamples: - histograms := histogramsPool.Get()[:0] + histograms := db.walReplayHistogramsPool.Get()[:0] histograms, err = dec.HistogramSamples(rec, histograms) if err != nil { errCh <- &wlog.CorruptionErr{ @@ -476,7 +477,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H } decoded <- histograms case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: - floatHistograms := floatHistogramsPool.Get()[:0] + floatHistograms := db.walReplayFloatHistogramsPool.Get()[:0] floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms) if err != nil { errCh <- &wlog.CorruptionErr{ @@ -521,7 +522,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H } } } - seriesPool.Put(v) + db.walReplaySeriesPool.Put(v) case []record.RefSample: for _, entry := range v { // Update the lastTs for the series based @@ -535,7 +536,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H series.lastTs = entry.T } } - samplesPool.Put(v) + db.walReplaySamplesPool.Put(v) case []record.RefHistogramSample: for _, entry := range v { // Update the lastTs for the series based @@ -549,7 +550,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H series.lastTs = entry.T } } - histogramsPool.Put(v) + db.walReplayHistogramsPool.Put(v) case []record.RefFloatHistogramSample: for _, entry := range v { // Update the lastTs for the series based @@ -563,7 +564,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H series.lastTs = entry.T } } - floatHistogramsPool.Put(v) + db.walReplayFloatHistogramsPool.Put(v) default: panic(fmt.Errorf("unexpected decoded type: %T", d)) } diff --git a/tsdb/head.go b/tsdb/head.go index 4fbb4b5710..6f9a287aa0 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -94,6 +94,16 @@ type Head struct { bytesPool zeropool.Pool[[]byte] memChunkPool sync.Pool + // These pools are used during WAL/WBL replay. + wlReplaySeriesPool zeropool.Pool[[]record.RefSeries] + wlReplaySamplesPool zeropool.Pool[[]record.RefSample] + wlReplaytStonesPool zeropool.Pool[[]tombstones.Stone] + wlReplayExemplarsPool zeropool.Pool[[]record.RefExemplar] + wlReplayHistogramsPool zeropool.Pool[[]record.RefHistogramSample] + wlReplayFloatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] + wlReplayMetadataPool zeropool.Pool[[]record.RefMetadata] + wlReplayMmapMarkersPool zeropool.Pool[[]record.RefMmapMarker] + // All series addressable by their ID or hash. series *stripeSeries diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 33b54a756f..e498578c10 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -46,6 +46,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/tombstones" @@ -440,27 +441,41 @@ func BenchmarkLoadWLs(b *testing.B) { // BenchmarkLoadRealWLs will be skipped unless the BENCHMARK_LOAD_REAL_WLS_DIR environment variable is set. // BENCHMARK_LOAD_REAL_WLS_DIR should be the folder where `wal` and `chunks_head` are located. +// +// Using an absolute path for BENCHMARK_LOAD_REAL_WLS_DIR is recommended. +// +// Because WLs loading may alter BENCHMARK_LOAD_REAL_WLS_DIR which can affect benchmark results and to ensure consistency, +// a copy of BENCHMARK_LOAD_REAL_WLS_DIR is made for each iteration and deleted at the end. +// Make sure there is sufficient disk space for that. func BenchmarkLoadRealWLs(b *testing.B) { - dir := os.Getenv("BENCHMARK_LOAD_REAL_WLS_DIR") - if dir == "" { + srcDir := os.Getenv("BENCHMARK_LOAD_REAL_WLS_DIR") + if srcDir == "" { b.SkipNow() } - wal, err := wlog.New(nil, nil, filepath.Join(dir, "wal"), wlog.CompressionNone) - require.NoError(b, err) - b.Cleanup(func() { wal.Close() }) - - wbl, err := wlog.New(nil, nil, filepath.Join(dir, "wbl"), wlog.CompressionNone) - require.NoError(b, err) - b.Cleanup(func() { wbl.Close() }) - // Load the WAL. for i := 0; i < b.N; i++ { + b.StopTimer() + dir := b.TempDir() + require.NoError(b, fileutil.CopyDirs(srcDir, dir)) + + wal, err := wlog.New(nil, nil, filepath.Join(dir, "wal"), wlog.CompressionNone) + require.NoError(b, err) + b.Cleanup(func() { wal.Close() }) + + wbl, err := wlog.New(nil, nil, filepath.Join(dir, "wbl"), wlog.CompressionNone) + require.NoError(b, err) + b.Cleanup(func() { wbl.Close() }) + b.StartTimer() + opts := DefaultHeadOptions() opts.ChunkDirRoot = dir h, err := NewHead(nil, nil, wal, wbl, opts, nil) require.NoError(b, err) require.NoError(b, h.Init(0)) + + b.StopTimer() + require.NoError(b, os.RemoveAll(dir)) } } diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 0afe84a875..ad03fa4766 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -39,7 +39,6 @@ import ( "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/wlog" - "github.com/prometheus/prometheus/util/zeropool" ) // histogramRecord combines both RefHistogramSample and RefFloatHistogramSample @@ -73,14 +72,6 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch decoded = make(chan interface{}, 10) decodeErr, seriesCreationErr error - - seriesPool zeropool.Pool[[]record.RefSeries] - samplesPool zeropool.Pool[[]record.RefSample] - tstonesPool zeropool.Pool[[]tombstones.Stone] - exemplarsPool zeropool.Pool[[]record.RefExemplar] - histogramsPool zeropool.Pool[[]record.RefHistogramSample] - floatHistogramsPool zeropool.Pool[[]record.RefFloatHistogramSample] - metadataPool zeropool.Pool[[]record.RefMetadata] ) defer func() { @@ -140,7 +131,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch rec := r.Record() switch dec.Type(rec) { case record.Series: - series := seriesPool.Get()[:0] + series := h.wlReplaySeriesPool.Get()[:0] series, err = dec.Series(rec, series) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -152,7 +143,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decoded <- series case record.Samples: - samples := samplesPool.Get()[:0] + samples := h.wlReplaySamplesPool.Get()[:0] samples, err = dec.Samples(rec, samples) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -164,7 +155,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decoded <- samples case record.Tombstones: - tstones := tstonesPool.Get()[:0] + tstones := h.wlReplaytStonesPool.Get()[:0] tstones, err = dec.Tombstones(rec, tstones) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -176,7 +167,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decoded <- tstones case record.Exemplars: - exemplars := exemplarsPool.Get()[:0] + exemplars := h.wlReplayExemplarsPool.Get()[:0] exemplars, err = dec.Exemplars(rec, exemplars) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -188,7 +179,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decoded <- exemplars case record.HistogramSamples, record.CustomBucketsHistogramSamples: - hists := histogramsPool.Get()[:0] + hists := h.wlReplayHistogramsPool.Get()[:0] hists, err = dec.HistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -200,7 +191,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decoded <- hists case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: - hists := floatHistogramsPool.Get()[:0] + hists := h.wlReplayFloatHistogramsPool.Get()[:0] hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -212,7 +203,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decoded <- hists case record.Metadata: - meta := metadataPool.Get()[:0] + meta := h.wlReplayMetadataPool.Get()[:0] meta, err := dec.Metadata(rec, meta) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -251,7 +242,7 @@ Outer: idx := uint64(mSeries.ref) % uint64(concurrency) processors[idx].input <- walSubsetProcessorInputItem{walSeriesRef: walSeries.Ref, existingSeries: mSeries} } - seriesPool.Put(v) + h.wlReplaySeriesPool.Put(v) case []record.RefSample: samples := v minValidTime := h.minValidTime.Load() @@ -287,7 +278,7 @@ Outer: } samples = samples[m:] } - samplesPool.Put(v) + h.wlReplaySamplesPool.Put(v) case []tombstones.Stone: for _, s := range v { for _, itv := range s.Intervals { @@ -301,12 +292,12 @@ Outer: h.tombstones.AddInterval(s.Ref, itv) } } - tstonesPool.Put(v) + h.wlReplaytStonesPool.Put(v) case []record.RefExemplar: for _, e := range v { exemplarsInput <- e } - exemplarsPool.Put(v) + h.wlReplayExemplarsPool.Put(v) case []record.RefHistogramSample: samples := v minValidTime := h.minValidTime.Load() @@ -342,7 +333,7 @@ Outer: } samples = samples[m:] } - histogramsPool.Put(v) + h.wlReplayHistogramsPool.Put(v) case []record.RefFloatHistogramSample: samples := v minValidTime := h.minValidTime.Load() @@ -378,7 +369,7 @@ Outer: } samples = samples[m:] } - floatHistogramsPool.Put(v) + h.wlReplayFloatHistogramsPool.Put(v) case []record.RefMetadata: for _, m := range v { s := h.series.getByID(m.Ref) @@ -392,7 +383,7 @@ Outer: Help: m.Help, } } - metadataPool.Put(v) + h.wlReplayMetadataPool.Put(v) default: panic(fmt.Errorf("unexpected decoded type: %T", d)) } @@ -659,12 +650,8 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch shards = make([][]record.RefSample, concurrency) histogramShards = make([][]histogramRecord, concurrency) - decodedCh = make(chan interface{}, 10) - decodeErr error - samplesPool zeropool.Pool[[]record.RefSample] - markersPool zeropool.Pool[[]record.RefMmapMarker] - histogramSamplesPool zeropool.Pool[[]record.RefHistogramSample] - floatHistogramSamplesPool zeropool.Pool[[]record.RefFloatHistogramSample] + decodedCh = make(chan interface{}, 10) + decodeErr error ) defer func() { @@ -700,7 +687,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch rec := r.Record() switch dec.Type(rec) { case record.Samples: - samples := samplesPool.Get()[:0] + samples := h.wlReplaySamplesPool.Get()[:0] samples, err = dec.Samples(rec, samples) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -712,7 +699,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decodedCh <- samples case record.MmapMarkers: - markers := markersPool.Get()[:0] + markers := h.wlReplayMmapMarkersPool.Get()[:0] markers, err = dec.MmapMarkers(rec, markers) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -724,7 +711,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decodedCh <- markers case record.HistogramSamples, record.CustomBucketsHistogramSamples: - hists := histogramSamplesPool.Get()[:0] + hists := h.wlReplayHistogramsPool.Get()[:0] hists, err = dec.HistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -736,7 +723,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } decodedCh <- hists case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: - hists := floatHistogramSamplesPool.Get()[:0] + hists := h.wlReplayFloatHistogramsPool.Get()[:0] hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ @@ -787,7 +774,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } samples = samples[m:] } - samplesPool.Put(v) + h.wlReplaySamplesPool.Put(v) case []record.RefMmapMarker: markers := v for _, rm := range markers { @@ -842,7 +829,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } samples = samples[m:] } - histogramSamplesPool.Put(v) + h.wlReplayHistogramsPool.Put(v) case []record.RefFloatHistogramSample: samples := v // We split up the samples into chunks of 5000 samples or less. @@ -874,7 +861,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } samples = samples[m:] } - floatHistogramSamplesPool.Put(v) + h.wlReplayFloatHistogramsPool.Put(v) default: panic(fmt.Errorf("unexpected decodedCh type: %T", d)) } From acd74b02fbebc10ebb559e2bc716b5fabb46bbb8 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Tue, 11 Feb 2025 14:46:03 +1100 Subject: [PATCH 1359/1397] Fix typo Signed-off-by: Charles Korn --- docs/querying/operators.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/querying/operators.md b/docs/querying/operators.md index 02cea500a1..df65a4335f 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -334,7 +334,7 @@ the input samples, while `limit_ratio(-0.9, ...)` returns precisely the remaining approximately 90% of the input samples not returned by `limit_ratio(0.1, ...)`. -`group` and `count` do not do not interact with the sample values, +`group` and `count` do not interact with the sample values, they work in the same way for float samples and histogram samples. `count_values` outputs one time series per unique sample value. Each series has From 2cfd402b3a0c092a22fedd92cbf942dd0c51ea48 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Tue, 11 Feb 2025 14:55:39 +1100 Subject: [PATCH 1360/1397] Clarify sorting behaviour of `topk` and `bottomk`. Signed-off-by: Charles Korn --- docs/querying/operators.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/querying/operators.md b/docs/querying/operators.md index df65a4335f..7242049784 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -319,7 +319,14 @@ the input samples, including the original labels, are returned in the result vector. `by` and `without` are only used to bucket the input vector. Similar to `min` and `max`, they only operate on float samples, considering `NaN` values to be farthest from the top or bottom, respectively. Histogram samples in the -input vector are ignored, flagged by an info-level annotation. +input vector are ignored, flagged by an info-level annotation. + +If used in an instant query, `topk` and `bottomk` return series ordered by +value in descending or ascending order, respectively. If used with `by` or +`without`, then series within each bucket are sorted by value, and series in +the same bucket are returned consecutively, but there is no guarantee that +buckets of series will be returned in any particular order. No sorting applies +to range queries. `limitk` and `limit_ratio` also return a subset of the input samples, including the original labels in the result vector. The subset is selected in a From 329ec6831a7dd2893fb108ccbaccf6aa56c8ada3 Mon Sep 17 00:00:00 2001 From: jub0bs <52325150+jub0bs@users.noreply.github.com> Date: Tue, 11 Feb 2025 14:14:55 +0100 Subject: [PATCH 1361/1397] util/httputil: reduce heap allocations in newCompressedResponseWriter (#16001) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * util/httputil: Benchmark newCompressedResponseWriter This benchmark illustrates that newCompressedResponseWriter incurs a prohibitive amount of heap allocations when handling a request containing a malicious Accept-Encoding header.¬ Signed-off-by: jub0bs * util/httputil: Improve newCompressedResponseWriter This change dramatically reduces the heap allocations (in bytes) incurred when handling a request containing a malicious Accept-Encoding header. Below are some benchmark results; for conciseness, I've omitted the name of the benchmark function (BenchmarkNewCompressionHandler_MaliciousAcceptEncoding): ``` goos: darwin goarch: amd64 pkg: github.com/prometheus/prometheus/util/httputil cpu: Intel(R) Core(TM) i7-6700HQ CPU @ 2.60GHz │ old │ new │ │ sec/op │ sec/op vs base │ 18.60m ± 2% 13.54m ± 3% -27.17% (p=0.000 n=10) │ old │ new │ │ B/op │ B/op vs base │ 16785442.50 ± 0% 32.00 ± 0% -100.00% (p=0.000 n=10) │ old │ new │ │ allocs/op │ allocs/op vs base │ 2.000 ± 0% 1.000 ± 0% -50.00% (p=0.000 n=10) ``` Signed-off-by: jub0bs --------- Signed-off-by: jub0bs --- util/httputil/compression.go | 12 ++++++++++-- util/httputil/compression_test.go | 12 ++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/util/httputil/compression.go b/util/httputil/compression.go index 9a8a666453..d5bedb7fa9 100644 --- a/util/httputil/compression.go +++ b/util/httputil/compression.go @@ -56,8 +56,13 @@ func (c *compressedResponseWriter) Close() { // Constructs a new compressedResponseWriter based on client request headers. func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter { - encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",") - for _, encoding := range encodings { + raw := req.Header.Get(acceptEncodingHeader) + var ( + encoding string + commaFound bool + ) + for { + encoding, raw, commaFound = strings.Cut(raw, ",") switch strings.TrimSpace(encoding) { case gzipEncoding: writer.Header().Set(contentEncodingHeader, gzipEncoding) @@ -72,6 +77,9 @@ func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) writer: zlib.NewWriter(writer), } } + if !commaFound { + break + } } return &compressedResponseWriter{ ResponseWriter: writer, diff --git a/util/httputil/compression_test.go b/util/httputil/compression_test.go index e166c7de79..fd3f1f66d4 100644 --- a/util/httputil/compression_test.go +++ b/util/httputil/compression_test.go @@ -18,6 +18,7 @@ import ( "io" "net/http" "net/http/httptest" + "strings" "testing" "github.com/klauspost/compress/gzip" @@ -72,6 +73,17 @@ func TestCompressionHandler_PlainText(t *testing.T) { require.Equal(t, expected, actual, "expected response with content") } +func BenchmarkNewCompressionHandler_MaliciousAcceptEncoding(b *testing.B) { + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/whatever", nil) + req.Header.Set("Accept-Encoding", strings.Repeat(",", http.DefaultMaxHeaderBytes)) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + newCompressedResponseWriter(rec, req) + } +} + func TestCompressionHandler_Gzip(t *testing.T) { tearDown := setup() defer tearDown() From 8baad1a73e471bd3cf3175a1608199e27484f179 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Tue, 11 Feb 2025 19:09:21 +0100 Subject: [PATCH 1362/1397] Bump promci to latest to avoid deprecated actions/cache CI issue. (#16013) Current main is broken due to: ``` Error: This request has been automatically failed because it uses a deprecated version of `actions/cache: 0c45773b623bea8c8e75f6c82b208c3cf94ea4f9`. Please update your workflow to use v3/v4 of actions/cache to avoid interruptions. Learn more: https://github.blog/changelog/2024-12-05-notice-of-upcoming-releases-and-breaking-changes-for-github-actions/#actions-cache-v1-v2-and-actions-toolkit-cache-package-closing-down ``` This is odd given `actions/cache: 0c45773b623bea8c8e75f6c82b208c3cf94ea4f9` has a comment of v4.0.2 (so not v1 and v2), trying 4.1.0. https://github.com/prometheus/prometheus/actions/runs/13267052807/job/37037025130?pr=16011 Signed-off-by: bwplotka --- .github/workflows/ci.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 57c47720c6..35a2651a0a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,7 +14,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.23-base steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 + - uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6 - uses: ./.github/promci/actions/setup_environment with: enable_npm: true @@ -30,7 +30,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.23-base steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 + - uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6 - uses: ./.github/promci/actions/setup_environment - run: go test --tags=dedupelabels ./... - run: GOARCH=386 go test ./... @@ -63,7 +63,7 @@ jobs: steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 + - uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6 - uses: ./.github/promci/actions/setup_environment with: enable_go: false @@ -122,7 +122,7 @@ jobs: thread: [ 0, 1, 2 ] steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 + - uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6 - uses: ./.github/promci/actions/build with: promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" @@ -147,7 +147,7 @@ jobs: # should also be updated. steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 + - uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6 - uses: ./.github/promci/actions/build with: parallelism: 12 @@ -209,7 +209,7 @@ jobs: if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 + - uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6 - uses: ./.github/promci/actions/publish_main with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -226,7 +226,7 @@ jobs: (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 + - uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6 - uses: ./.github/promci/actions/publish_release with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -241,7 +241,7 @@ jobs: steps: - name: Checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5 + - uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6 - name: Install nodejs uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0 with: From 6518941d73ad60be8fcf812df6ba0e975dd0450e Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 6 Feb 2025 14:54:03 +0100 Subject: [PATCH 1363/1397] promql: Expose problem with histogram counter resets in subquery Signed-off-by: beorn7 --- promql/promqltest/testdata/subquery.test | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/promql/promqltest/testdata/subquery.test b/promql/promqltest/testdata/subquery.test index 5a5e4e0092..377ee1e5ce 100644 --- a/promql/promqltest/testdata/subquery.test +++ b/promql/promqltest/testdata/subquery.test @@ -134,3 +134,19 @@ eval instant at 20s min_over_time(metric_total[15s:10s]) eval instant at 20m min_over_time(rate(metric_total[5m])[20m:1m]) {} 0.12119047619047618 +clear + +# This native histogram series has a counter reset at 5m. +# TODO(beorn7): Write this more nicely once https://github.com/prometheus/prometheus/issues/15984 is fixed. +load 1m + native_histogram {{sum:100 count:100}} {{sum:103 count:103}} {{sum:106 count:106}} {{sum:109 count:109}} {{sum:112 count:112}} {{sum:3 count:3 counter_reset_hint:reset}} {{sum:6 count:6}}+{{sum:3 count:3}}x5 + +# This sub-query has to detect the counter reset even if it does +# not include the exact sample where the counter reset has happened. +eval instant at 10m increase(native_histogram[10m:3m]) + {} {{count:25 sum:25}} +# This sub-query must not detect the counter reset multiple times +# even though the sample where the counter reset happened is returned +# by the sub-query multiple times. +eval instant at 10m increase(native_histogram[10m:15s]) + {} {{count:30.769230769230766 sum:30.769230769230766}} From 14bb63c467963b5923aa01c565ddaffb3058d3b2 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 6 Feb 2025 14:54:37 +0100 Subject: [PATCH 1364/1397] promql: Fix counter reset detection in subqueries with histograms Signed-off-by: beorn7 --- promql/engine.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/promql/engine.go b/promql/engine.go index 3e14a2529d..579fcd1c06 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1548,6 +1548,28 @@ func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr VectorSelector: vs, } for _, s := range mat { + // Set any "NotCounterReset" and "CounterReset" hints in native + // histograms to "UnknownCounterReset" because we might + // otherwise miss a counter reset happening in samples not + // returned by the subquery, or we might over-detect counter + // resets if the sample with a counter reset is returned + // multiple times by a high-res subquery. This intentionally + // does not attempt to be clever (like detecting if we are + // really missing underlying samples or returning underlying + // samples multiple times) because subqueries on counters are + // inherently problematic WRT counter reset handling, so we + // cannot really solve the problem for good. We only want to + // avoid problems that happen due to the explicitly set counter + // reset hints and go back to the behavior we already know from + // float samples. + for i, hp := range s.Histograms { + switch hp.H.CounterResetHint { + case histogram.NotCounterReset, histogram.CounterReset: + h := *hp.H // Shallow copy is sufficient, we only change CounterResetHint. + h.CounterResetHint = histogram.UnknownCounterReset + s.Histograms[i].H = &h + } + } vs.Series = append(vs.Series, NewStorageSeries(s)) } return ms, mat.TotalSamples(), ws From 0964b6e5844c4ddcb8d0e22bee3c8f04bc78bdf1 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 12 Feb 2025 16:46:57 +1100 Subject: [PATCH 1365/1397] promql: emit correct annotation in `quantile_over_time` when run over a range with histograms and floats Signed-off-by: Charles Korn --- promql/functions.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/promql/functions.go b/promql/functions.go index 1cb8b2af2d..dcbbbc081c 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -922,7 +922,7 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva } if len(el.Histograms) > 0 { metricName := el.Metric.Get(labels.MetricName) - annos.Add(annotations.NewHistogramIgnoredInAggregationInfo(metricName, args[0].PositionRange())) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) } values := make(vectorByValueHeap, 0, len(el.Floats)) for _, f := range el.Floats { From a944fa1e7af03b7bff0733bbd2ce03558a69e612 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Wed, 12 Feb 2025 15:56:00 +0100 Subject: [PATCH 1366/1397] Make theme switcher a single three-state toggle button (#16000) This should help a bit with the header icon overflow on narrow screens and also overall make things look less cluttered. Signed-off-by: Julius Volz --- .../src/components/ThemeSelector.tsx | 73 +++++++------------ 1 file changed, 25 insertions(+), 48 deletions(-) diff --git a/web/ui/mantine-ui/src/components/ThemeSelector.tsx b/web/ui/mantine-ui/src/components/ThemeSelector.tsx index a7f5d1dcaa..964d491a04 100644 --- a/web/ui/mantine-ui/src/components/ThemeSelector.tsx +++ b/web/ui/mantine-ui/src/components/ThemeSelector.tsx @@ -1,14 +1,8 @@ +import { useMantineColorScheme, rem, ActionIcon } from "@mantine/core"; import { - useMantineColorScheme, - SegmentedControl, - rem, - MantineColorScheme, - Tooltip, -} from "@mantine/core"; -import { + IconBrightnessFilled, IconMoonFilled, IconSunFilled, - IconUserFilled, } from "@tabler/icons-react"; import { FC } from "react"; @@ -20,45 +14,28 @@ export const ThemeSelector: FC = () => { }; return ( - setColorScheme(v as MantineColorScheme)} - data={[ - { - value: "light", - label: ( - - - - ), - }, - { - value: "dark", - label: ( - - - - ), - }, - { - value: "auto", - label: ( - - - - ), - }, - ]} - /> + + setColorScheme( + colorScheme === "light" + ? "dark" + : colorScheme === "dark" + ? "auto" + : "light" + ) + } + > + {colorScheme === "light" ? ( + + ) : colorScheme === "dark" ? ( + + ) : ( + + )} + ); }; From a323c23332954a0355792493ae778f49c288d9a4 Mon Sep 17 00:00:00 2001 From: Gabriel Bernal Date: Wed, 12 Feb 2025 16:08:45 +0100 Subject: [PATCH 1367/1397] set ttlAutopurge to true as ttl is set for codemirror prometheus client cache (#16024) Signed-off-by: Gabriel Bernal --- web/ui/module/codemirror-promql/src/client/prometheus.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/ui/module/codemirror-promql/src/client/prometheus.ts b/web/ui/module/codemirror-promql/src/client/prometheus.ts index 72e34d4ec5..7fce7f1a17 100644 --- a/web/ui/module/codemirror-promql/src/client/prometheus.ts +++ b/web/ui/module/codemirror-promql/src/client/prometheus.ts @@ -294,7 +294,7 @@ class Cache { constructor(config?: CacheConfig) { const maxAge = { ttl: config && config.maxAge ? config.maxAge : 5 * 60 * 1000, - ttlAutopurge: false, + ttlAutopurge: true, }; this.completeAssociation = new LRUCache>>(maxAge); this.metricMetadata = {}; From 00b69efabb3cd3e213a2a34939f745d7156f7d47 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Wed, 12 Feb 2025 16:47:56 +0100 Subject: [PATCH 1368/1397] model/textparse: Change parser interface Metric(...) string to Labels(...) (#16012) * model/textparse: Change parser interface Metric(...) string to Labels(...) Simplified the interface given no one is using the return argument. Renamed for clarity too. Found and discussed https://github.com/prometheus/prometheus/pull/15731#discussion_r1950916842 Signed-off-by: bwplotka * Fixed comments; optimized not needed copy for om and text. Signed-off-by: bwplotka --------- Signed-off-by: bwplotka --- cmd/promtool/backfill.go | 4 ++-- model/textparse/benchmark_test.go | 2 +- model/textparse/interface.go | 5 ++--- model/textparse/interface_test.go | 2 +- model/textparse/nhcbparse.go | 12 +++++------- model/textparse/openmetricsparse.go | 10 +++------- model/textparse/promparse.go | 10 +++------- model/textparse/protobufparse.go | 6 ++---- scrape/scrape.go | 2 +- scrape/scrape_test.go | 2 +- web/federate_test.go | 2 +- 11 files changed, 22 insertions(+), 35 deletions(-) diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 125c9a08e6..473ea036ec 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -149,7 +149,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn _, ts, v := p.Series() if ts == nil { l := labels.Labels{} - p.Metric(&l) + p.Labels(&l) return fmt.Errorf("expected timestamp for series %v, got none", l) } if *ts < t { @@ -163,7 +163,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn } l := labels.Labels{} - p.Metric(&l) + p.Labels(&l) lb.Reset(l) for name, value := range customLabels { diff --git a/model/textparse/benchmark_test.go b/model/textparse/benchmark_test.go index 4c5d850930..ce7845ed03 100644 --- a/model/textparse/benchmark_test.go +++ b/model/textparse/benchmark_test.go @@ -203,7 +203,7 @@ func benchParse(b *testing.B, data []byte, parser string) { b.Fatal("not implemented entry", t) } - _ = p.Metric(&res) + p.Labels(&res) _ = p.CreatedTimestamp() for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) { } diff --git a/model/textparse/interface.go b/model/textparse/interface.go index 2682855281..58cdf52a03 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -57,11 +57,10 @@ type Parser interface { // The returned byte slice becomes invalid after the next call to Next. Comment() []byte - // Metric writes the labels of the current sample into the passed labels. - // It returns the string from which the metric was parsed. + // Labels writes the labels of the current sample into the passed labels. // The values of the "le" labels of classic histograms and "quantile" labels // of summaries should follow the OpenMetrics formatting rules. - Metric(l *labels.Labels) string + Labels(l *labels.Labels) // Exemplar writes the exemplar of the current sample into the passed // exemplar. It can be called repeatedly to retrieve multiple exemplars diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go index 72c8284f2d..504a4917d9 100644 --- a/model/textparse/interface_test.go +++ b/model/textparse/interface_test.go @@ -238,7 +238,7 @@ func testParse(t *testing.T, p Parser) (ret []parsedEntry) { got.m = string(m) } - p.Metric(&got.lset) + p.Labels(&got.lset) // Parser reuses int pointer. if ct := p.CreatedTimestamp(); ct != nil { got.ct = int64p(*ct) diff --git a/model/textparse/nhcbparse.go b/model/textparse/nhcbparse.go index 83e381539f..d8c2317980 100644 --- a/model/textparse/nhcbparse.go +++ b/model/textparse/nhcbparse.go @@ -67,8 +67,7 @@ type NHCBParser struct { h *histogram.Histogram fh *histogram.FloatHistogram // For Metric. - lset labels.Labels - metricString string + lset labels.Labels // For Type. bName []byte typ model.MetricType @@ -141,13 +140,12 @@ func (p *NHCBParser) Comment() []byte { return p.parser.Comment() } -func (p *NHCBParser) Metric(l *labels.Labels) string { +func (p *NHCBParser) Labels(l *labels.Labels) { if p.state == stateEmitting { *l = p.lsetNHCB - return p.metricStringNHCB + return } *l = p.lset - return p.metricString } func (p *NHCBParser) Exemplar(ex *exemplar.Exemplar) bool { @@ -200,7 +198,7 @@ func (p *NHCBParser) Next() (Entry, error) { switch p.entry { case EntrySeries: p.bytes, p.ts, p.value = p.parser.Series() - p.metricString = p.parser.Metric(&p.lset) + p.parser.Labels(&p.lset) // Check the label set to see if we can continue or need to emit the NHCB. var isNHCB bool if p.compareLabels() { @@ -224,7 +222,7 @@ func (p *NHCBParser) Next() (Entry, error) { return p.entry, p.err case EntryHistogram: p.bytes, p.ts, p.h, p.fh = p.parser.Histogram() - p.metricString = p.parser.Metric(&p.lset) + p.parser.Labels(&p.lset) p.storeExponentialLabels() case EntryType: p.bName, p.typ = p.parser.Type() diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index f0dd51afee..2f4d5ab2c5 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -197,11 +197,9 @@ func (p *OpenMetricsParser) Comment() []byte { return p.text } -// Metric writes the labels of the current sample into the passed labels. -// It returns the string from which the metric was parsed. -func (p *OpenMetricsParser) Metric(l *labels.Labels) string { - // Copy the buffer to a string: this is only necessary for the return value. - s := string(p.series) +// Labels writes the labels of the current sample into the passed labels. +func (p *OpenMetricsParser) Labels(l *labels.Labels) { + s := yoloString(p.series) p.builder.Reset() metricName := unreplace(s[p.offsets[0]-p.start : p.offsets[1]-p.start]) @@ -220,8 +218,6 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string { p.builder.Sort() *l = p.builder.Labels() - - return s } // Exemplar writes the exemplar of the current sample into the passed exemplar. diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index 17b0c3db8b..844ab2f15d 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -223,11 +223,9 @@ func (p *PromParser) Comment() []byte { return p.text } -// Metric writes the labels of the current sample into the passed labels. -// It returns the string from which the metric was parsed. -func (p *PromParser) Metric(l *labels.Labels) string { - // Copy the buffer to a string: this is only necessary for the return value. - s := string(p.series) +// Labels writes the labels of the current sample into the passed labels. +func (p *PromParser) Labels(l *labels.Labels) { + s := yoloString(p.series) p.builder.Reset() metricName := unreplace(s[p.offsets[0]-p.start : p.offsets[1]-p.start]) @@ -246,8 +244,6 @@ func (p *PromParser) Metric(l *labels.Labels) string { p.builder.Sort() *l = p.builder.Labels() - - return s } // Exemplar implements the Parser interface. However, since the classic diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index a77e1d728f..6d074a2695 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -296,9 +296,9 @@ func (p *ProtobufParser) Comment() []byte { return nil } -// Metric writes the labels of the current sample into the passed labels. +// Labels writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. -func (p *ProtobufParser) Metric(l *labels.Labels) string { +func (p *ProtobufParser) Labels(l *labels.Labels) { p.builder.Reset() p.builder.Add(labels.MetricName, p.getMagicName()) @@ -312,8 +312,6 @@ func (p *ProtobufParser) Metric(l *labels.Labels) string { // Sort labels to maintain the sorted labels invariant. p.builder.Sort() *l = p.builder.Labels() - - return p.metricBytes.String() } // Exemplar writes the exemplar of the current sample into the passed diff --git a/scrape/scrape.go b/scrape/scrape.go index 2e95ee2282..58314af4eb 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1714,7 +1714,7 @@ loop: lset = ce.lset hash = ce.hash } else { - p.Metric(&lset) + p.Labels(&lset) hash = lset.Hash() // Hash label set as it is seen local to the target. Then add target labels diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 2185d028fb..ddc9b2853f 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1988,7 +1988,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { var lset labels.Labels p.Next() - p.Metric(&lset) + p.Labels(&lset) hash := lset.Hash() // Create a fake entry in the cache diff --git a/web/federate_test.go b/web/federate_test.go index 056a95d67f..717c0a2356 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -403,7 +403,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { } require.NoError(t, err) if et == textparse.EntryHistogram || et == textparse.EntrySeries { - p.Metric(&l) + p.Labels(&l) } switch et { case textparse.EntryHelp: From 8fe24ced8285a10b22724255d3e2c65d93ce9ebf Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Wed, 12 Feb 2025 17:51:40 +0100 Subject: [PATCH 1369/1397] ci: Add GH action that clicks "enable auto-merge" on dependabot PRs (#15967) This will merge on green all non-conflicting dependabot PRs. Signed-off-by: bwplotka --- .github/workflows/automerge-dependabot.yml | 30 ++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 .github/workflows/automerge-dependabot.yml diff --git a/.github/workflows/automerge-dependabot.yml b/.github/workflows/automerge-dependabot.yml new file mode 100644 index 0000000000..3909f57329 --- /dev/null +++ b/.github/workflows/automerge-dependabot.yml @@ -0,0 +1,30 @@ +--- +name: Dependabot auto-merge +on: pull_request + +concurrency: + group: ${{ github.workflow }}-${{ (github.event.pull_request && github.event.pull_request.number) || github.ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + dependabot: + permissions: + contents: write + pull-requests: write + runs-on: ubuntu-latest + if: ${{ github.event.pull_request.user.login == 'dependabot[bot]' && github.repository_owner == 'prometheus' }} + steps: + - name: Dependabot metadata + id: metadata + uses: dependabot/fetch-metadata@d7267f607e9d3fb96fc2fbe83e0af444713e90b7 # v2.3.0 + with: + github-token: "${{ secrets.GITHUB_TOKEN }}" + - name: Enable auto-merge for Dependabot PRs + if: ${{steps.metadata.outputs.update-type == 'version-update:semver-minor' || steps.metadata.outputs.update-type == 'version-update:semver-patch'}} + run: gh pr merge --auto --merge "$PR_URL" + env: + PR_URL: ${{github.event.pull_request.html_url}} + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} From 509b978f0d675b4c9b3ccf8c0fc06961b0f03e8f Mon Sep 17 00:00:00 2001 From: Aman <95525722+amanycodes@users.noreply.github.com> Date: Wed, 12 Feb 2025 22:44:05 +0530 Subject: [PATCH 1370/1397] single-button-theme-toggle-and-responsive-logo (#16021) Signed-off-by: amanycodes --- web/ui/mantine-ui/src/App.tsx | 6 ++++-- web/ui/mantine-ui/src/components/ThemeSelector.tsx | 10 +++++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index 3bec30fa31..2599c9f5aa 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -332,14 +332,16 @@ function App() { justify="space-between" wrap="nowrap" > - + - Prometheus{agentMode && " Agent"} + Prometheus + Prometheus + {agentMode && "Agent"} diff --git a/web/ui/mantine-ui/src/components/ThemeSelector.tsx b/web/ui/mantine-ui/src/components/ThemeSelector.tsx index 964d491a04..131327a1f6 100644 --- a/web/ui/mantine-ui/src/components/ThemeSelector.tsx +++ b/web/ui/mantine-ui/src/components/ThemeSelector.tsx @@ -1,8 +1,8 @@ import { useMantineColorScheme, rem, ActionIcon } from "@mantine/core"; import { - IconBrightnessFilled, IconMoonFilled, IconSunFilled, + IconBrightnessFilled, } from "@tabler/icons-react"; import { FC } from "react"; @@ -30,11 +30,11 @@ export const ThemeSelector: FC = () => { } > {colorScheme === "light" ? ( - - ) : colorScheme === "dark" ? ( - - ) : ( + ) : colorScheme === "dark" ? ( + + ) : ( + )} ); From 898af6102d19213199a9015bc4d97d109b35616f Mon Sep 17 00:00:00 2001 From: Nicolas Peugnet Date: Thu, 13 Feb 2025 03:03:31 +0100 Subject: [PATCH 1371/1397] promtool: support creating tsdb blocks from a pipe (#16011) This is very useful when piping the input file to stdin and then using /dev/stdin as the input file. e.g. xzcat dump.xz | promtool tsdb create-blocks-from openmetrics /dev/stdin /tmp/data Signed-off-by: Nicolas Peugnet --- cmd/promtool/tsdb.go | 20 ++++++++-- cmd/promtool/tsdb_posix_test.go | 69 +++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 3 deletions(-) create mode 100644 cmd/promtool/tsdb_posix_test.go diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index b291d29bf7..6a62e2e8bc 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -826,17 +826,31 @@ func checkErr(err error) int { } func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration, customLabels map[string]string) int { - inputFile, err := fileutil.OpenMmapFile(path) + var buf []byte + info, err := os.Stat(path) if err != nil { return checkErr(err) } - defer inputFile.Close() + if info.Mode()&(os.ModeNamedPipe|os.ModeCharDevice) != 0 { + // Read the pipe chunks by chunks as it cannot be mmap-ed + buf, err = os.ReadFile(path) + if err != nil { + return checkErr(err) + } + } else { + inputFile, err := fileutil.OpenMmapFile(path) + if err != nil { + return checkErr(err) + } + defer inputFile.Close() + buf = inputFile.Bytes() + } if err := os.MkdirAll(outputDir, 0o777); err != nil { return checkErr(fmt.Errorf("create output dir: %w", err)) } - return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration, customLabels)) + return checkErr(backfill(5000, buf, outputDir, humanReadable, quiet, maxBlockDuration, customLabels)) } func displayHistogram(dataType string, datas []int, total int) { diff --git a/cmd/promtool/tsdb_posix_test.go b/cmd/promtool/tsdb_posix_test.go new file mode 100644 index 0000000000..8a83aead70 --- /dev/null +++ b/cmd/promtool/tsdb_posix_test.go @@ -0,0 +1,69 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows + +package main + +import ( + "bytes" + "io" + "math" + "os" + "path" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/tsdb" +) + +func TestTSDBDumpOpenMetricsRoundTripPipe(t *testing.T) { + initialMetrics, err := os.ReadFile("testdata/dump-openmetrics-roundtrip-test.prom") + require.NoError(t, err) + initialMetrics = normalizeNewLine(initialMetrics) + + pipeDir := t.TempDir() + dbDir := t.TempDir() + + // create pipe + pipe := path.Join(pipeDir, "pipe") + err = syscall.Mkfifo(pipe, 0o666) + require.NoError(t, err) + + go func() { + // open pipe to write + in, err := os.OpenFile(pipe, os.O_WRONLY, os.ModeNamedPipe) + require.NoError(t, err) + defer func() { require.NoError(t, in.Close()) }() + _, err = io.Copy(in, bytes.NewReader(initialMetrics)) + require.NoError(t, err) + }() + + // Import samples from OM format + code := backfillOpenMetrics(pipe, dbDir, false, false, 2*time.Hour, map[string]string{}) + require.Equal(t, 0, code) + db, err := tsdb.Open(dbDir, nil, nil, tsdb.DefaultOptions(), nil) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + // Dump the blocks into OM format + dumpedMetrics := getDumpedSamples(t, dbDir, "", math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) + + // Should get back the initial metrics. + require.Equal(t, string(initialMetrics), dumpedMetrics) +} From 9b4c8f6be28823c604aab50febcd32013aa4212c Mon Sep 17 00:00:00 2001 From: frazou <4470714+Frazew@users.noreply.github.com> Date: Thu, 13 Feb 2025 10:48:33 +0100 Subject: [PATCH 1372/1397] rulefmt: support YAML aliases for Alert/Record/Expr (#14957) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * rulefmt: add tests with YAML aliases for Alert/Record/Expr Altough somewhat discouraged in favour of using proper configuration management tools to generate full YAML, it can still be useful in some situations to use YAML anchors/aliases in rules. The current implementation is however confusing: aliases will work everywhere except on the alert/record name and expr This first commit adds (failing) tests to illustrate the issue, the next one fixes it. The YAML test file is intentionally filled with anchors and aliases. Although this is probably not representative of a real-world use case (which would have less of them), it errs on the safer side. Signed-off-by: François HORTA * rulefmt: support YAML aliases for Alert/Record/Expr This fixes the use of YAML aliases in alert/recording rule names and expressions. A side effect of this change is that the RuleNode YAML type is no longer propagated deeper in the codebase, instead the generic Rule type can now be used. Signed-off-by: François HORTA * rulefmt: Add test for YAML merge combined with aliases Currently this does work, but adding a test for the related functionally here makes sense. Signed-off-by: David Leadbeater * rulefmt: Rebase to latest changes Signed-off-by: David Leadbeater --------- Signed-off-by: François HORTA Signed-off-by: David Leadbeater Co-authored-by: David Leadbeater --- cmd/promtool/main.go | 8 +-- model/rulefmt/rulefmt.go | 75 ++++++++++++++---------- model/rulefmt/rulefmt_test.go | 27 +++++++++ model/rulefmt/testdata/test_aliases.yaml | 57 ++++++++++++++++++ rules/manager.go | 8 +-- rules/manager_test.go | 8 +-- 6 files changed, 139 insertions(+), 44 deletions(-) create mode 100644 model/rulefmt/testdata/test_aliases.yaml diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 4275b7b572..00280500ed 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -992,11 +992,11 @@ func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType { return duplicates } -func ruleMetric(rule rulefmt.RuleNode) string { - if rule.Alert.Value != "" { - return rule.Alert.Value +func ruleMetric(rule rulefmt.Rule) string { + if rule.Alert != "" { + return rule.Alert } - return rule.Record.Value + return rule.Record } var checkMetricsUsage = strings.TrimSpace(` diff --git a/model/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go index 3fc3fa0437..7b99c5fa33 100644 --- a/model/rulefmt/rulefmt.go +++ b/model/rulefmt/rulefmt.go @@ -92,7 +92,7 @@ type RuleGroups struct { } type ruleGroups struct { - Groups []yaml.Node `yaml:"groups"` + Groups []ruleGroupNode `yaml:"groups"` } // Validate validates all rules in the rule groups. @@ -128,9 +128,9 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { set[g.Name] = struct{}{} for i, r := range g.Rules { - for _, node := range g.Rules[i].Validate() { - var ruleName yaml.Node - if r.Alert.Value != "" { + for _, node := range r.Validate(node.Groups[j].Rules[i]) { + var ruleName string + if r.Alert != "" { ruleName = r.Alert } else { ruleName = r.Record @@ -138,7 +138,7 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { errs = append(errs, &Error{ Group: g.Name, Rule: i + 1, - RuleName: ruleName.Value, + RuleName: ruleName, Err: node, }) } @@ -154,7 +154,18 @@ type RuleGroup struct { Interval model.Duration `yaml:"interval,omitempty"` QueryOffset *model.Duration `yaml:"query_offset,omitempty"` Limit int `yaml:"limit,omitempty"` - Rules []RuleNode `yaml:"rules"` + Rules []Rule `yaml:"rules"` + Labels map[string]string `yaml:"labels,omitempty"` +} + +// ruleGroupNode adds yaml.v3 layer to support line and columns outputs for invalid rule groups. +type ruleGroupNode struct { + yaml.Node + Name string `yaml:"name"` + Interval model.Duration `yaml:"interval,omitempty"` + QueryOffset *model.Duration `yaml:"query_offset,omitempty"` + Limit int `yaml:"limit,omitempty"` + Rules []ruleNode `yaml:"rules"` Labels map[string]string `yaml:"labels,omitempty"` } @@ -169,8 +180,8 @@ type Rule struct { Annotations map[string]string `yaml:"annotations,omitempty"` } -// RuleNode adds yaml.v3 layer to support line and column outputs for invalid rules. -type RuleNode struct { +// ruleNode adds yaml.v3 layer to support line and column outputs for invalid rules. +type ruleNode struct { Record yaml.Node `yaml:"record,omitempty"` Alert yaml.Node `yaml:"alert,omitempty"` Expr yaml.Node `yaml:"expr"` @@ -181,64 +192,64 @@ type RuleNode struct { } // Validate the rule and return a list of encountered errors. -func (r *RuleNode) Validate() (nodes []WrappedError) { - if r.Record.Value != "" && r.Alert.Value != "" { +func (r *Rule) Validate(node ruleNode) (nodes []WrappedError) { + if r.Record != "" && r.Alert != "" { nodes = append(nodes, WrappedError{ err: errors.New("only one of 'record' and 'alert' must be set"), - node: &r.Record, - nodeAlt: &r.Alert, + node: &node.Record, + nodeAlt: &node.Alert, }) } - if r.Record.Value == "" && r.Alert.Value == "" { + if r.Record == "" && r.Alert == "" { nodes = append(nodes, WrappedError{ err: errors.New("one of 'record' or 'alert' must be set"), - node: &r.Record, - nodeAlt: &r.Alert, + node: &node.Record, + nodeAlt: &node.Alert, }) } - if r.Expr.Value == "" { + if r.Expr == "" { nodes = append(nodes, WrappedError{ err: errors.New("field 'expr' must be set in rule"), - node: &r.Expr, + node: &node.Expr, }) - } else if _, err := parser.ParseExpr(r.Expr.Value); err != nil { + } else if _, err := parser.ParseExpr(r.Expr); err != nil { nodes = append(nodes, WrappedError{ err: fmt.Errorf("could not parse expression: %w", err), - node: &r.Expr, + node: &node.Expr, }) } - if r.Record.Value != "" { + if r.Record != "" { if len(r.Annotations) > 0 { nodes = append(nodes, WrappedError{ err: errors.New("invalid field 'annotations' in recording rule"), - node: &r.Record, + node: &node.Record, }) } if r.For != 0 { nodes = append(nodes, WrappedError{ err: errors.New("invalid field 'for' in recording rule"), - node: &r.Record, + node: &node.Record, }) } if r.KeepFiringFor != 0 { nodes = append(nodes, WrappedError{ err: errors.New("invalid field 'keep_firing_for' in recording rule"), - node: &r.Record, + node: &node.Record, }) } - if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) { + if !model.IsValidMetricName(model.LabelValue(r.Record)) { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("invalid recording rule name: %s", r.Record.Value), - node: &r.Record, + err: fmt.Errorf("invalid recording rule name: %s", r.Record), + node: &node.Record, }) } // While record is a valid UTF-8 it's common mistake to put PromQL expression in the record name. // Disallow "{}" chars. - if strings.Contains(r.Record.Value, "{") || strings.Contains(r.Record.Value, "}") { + if strings.Contains(r.Record, "{") || strings.Contains(r.Record, "}") { nodes = append(nodes, WrappedError{ - err: fmt.Errorf("braces present in the recording rule name; should it be in expr?: %s", r.Record.Value), - node: &r.Record, + err: fmt.Errorf("braces present in the recording rule name; should it be in expr?: %s", r.Record), + node: &node.Record, }) } } @@ -274,8 +285,8 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { // testTemplateParsing checks if the templates used in labels and annotations // of the alerting rules are parsed correctly. -func testTemplateParsing(rl *RuleNode) (errs []error) { - if rl.Alert.Value == "" { +func testTemplateParsing(rl *Rule) (errs []error) { + if rl.Alert == "" { // Not an alerting rule. return errs } @@ -292,7 +303,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) { tmpl := template.NewTemplateExpander( context.TODO(), strings.Join(append(defs, text), ""), - "__alert_"+rl.Alert.Value, + "__alert_"+rl.Alert, tmplData, model.Time(timestamp.FromTime(time.Now())), nil, diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go index 9e191820c5..e8ac3077bd 100644 --- a/model/rulefmt/rulefmt_test.go +++ b/model/rulefmt/rulefmt_test.go @@ -33,6 +33,33 @@ func TestParseFileSuccess(t *testing.T) { require.Empty(t, errs, "unexpected errors parsing file") } +func TestParseFileSuccessWithAliases(t *testing.T) { + exprString := `sum without(instance) (rate(errors_total[5m])) +/ +sum without(instance) (rate(requests_total[5m])) +` + rgs, errs := ParseFile("testdata/test_aliases.yaml", false) + require.Empty(t, errs, "unexpected errors parsing file") + for _, rg := range rgs.Groups { + require.Equal(t, "HighAlert", rg.Rules[0].Alert) + require.Equal(t, "critical", rg.Rules[0].Labels["severity"]) + require.Equal(t, "stuff's happening with {{ $.labels.service }}", rg.Rules[0].Annotations["description"]) + + require.Equal(t, "new_metric", rg.Rules[1].Record) + + require.Equal(t, "HighAlert", rg.Rules[2].Alert) + require.Equal(t, "critical", rg.Rules[2].Labels["severity"]) + require.Equal(t, "stuff's happening with {{ $.labels.service }}", rg.Rules[0].Annotations["description"]) + + require.Equal(t, "HighAlert2", rg.Rules[3].Alert) + require.Equal(t, "critical", rg.Rules[3].Labels["severity"]) + + for _, rule := range rg.Rules { + require.Equal(t, exprString, rule.Expr) + } + } +} + func TestParseFileFailure(t *testing.T) { for _, c := range []struct { filename string diff --git a/model/rulefmt/testdata/test_aliases.yaml b/model/rulefmt/testdata/test_aliases.yaml new file mode 100644 index 0000000000..326259b0b6 --- /dev/null +++ b/model/rulefmt/testdata/test_aliases.yaml @@ -0,0 +1,57 @@ +groups: + - name: my-group-name + interval: 30s # defaults to global interval + rules: + - &highalert + alert: &alertname HighAlert + expr: &expr | + sum without(instance) (rate(errors_total[5m])) + / + sum without(instance) (rate(requests_total[5m])) + for: 5m + labels: + severity: &severity critical + annotations: + description: &description "stuff's happening with {{ $.labels.service }}" + + # Mix recording rules in the same list + - record: &recordname "new_metric" + expr: *expr + labels: + abc: edf + uvw: xyz + + - alert: *alertname + expr: *expr + for: 5m + labels: + severity: *severity + annotations: + description: *description + + - <<: *highalert + alert: HighAlert2 + + - name: my-another-name + interval: 30s # defaults to global interval + rules: + - alert: *alertname + expr: *expr + for: 5m + labels: + severity: *severity + annotations: + description: *description + + - record: *recordname + expr: *expr + + - alert: *alertname + expr: *expr + labels: + severity: *severity + annotations: + description: *description + + - <<: *highalert + alert: HighAlert2 diff --git a/rules/manager.go b/rules/manager.go index b1d3e8e3d6..6e96df2168 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -324,16 +324,16 @@ func (m *Manager) LoadGroups( rules := make([]Rule, 0, len(rg.Rules)) for _, r := range rg.Rules { - expr, err := m.opts.GroupLoader.Parse(r.Expr.Value) + expr, err := m.opts.GroupLoader.Parse(r.Expr) if err != nil { return nil, []error{fmt.Errorf("%s: %w", fn, err)} } mLabels := FromMaps(rg.Labels, r.Labels) - if r.Alert.Value != "" { + if r.Alert != "" { rules = append(rules, NewAlertingRule( - r.Alert.Value, + r.Alert, expr, time.Duration(r.For), time.Duration(r.KeepFiringFor), @@ -347,7 +347,7 @@ func (m *Manager) LoadGroups( continue } rules = append(rules, NewRecordingRule( - r.Record.Value, + r.Record, expr, mLabels, )) diff --git a/rules/manager_test.go b/rules/manager_test.go index 34b77dd40c..6c32b6d0f1 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -842,7 +842,7 @@ func TestUpdate(t *testing.T) { // Change group rules and reload. for i, g := range rgs.Groups { for j, r := range g.Rules { - rgs.Groups[i].Rules[j].Expr.SetString(fmt.Sprintf("%s * 0", r.Expr.Value)) + rgs.Groups[i].Rules[j].Expr = fmt.Sprintf("%s * 0", r.Expr) } } reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs) @@ -869,9 +869,9 @@ func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest { rtmp := []rulefmt.Rule{} for _, r := range g.Rules { rtmp = append(rtmp, rulefmt.Rule{ - Record: r.Record.Value, - Alert: r.Alert.Value, - Expr: r.Expr.Value, + Record: r.Record, + Alert: r.Alert, + Expr: r.Expr, For: r.For, Labels: r.Labels, Annotations: r.Annotations, From 733a5e9eb433f2380e4c583ee07ff8bd21f06a8f Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Thu, 13 Feb 2025 11:38:35 +0100 Subject: [PATCH 1373/1397] textparse: Optimized protobuf parser with custom streaming unmarshal. (#15731) * textparse: Optimized protobuf parser with custom streaming decoder. Signed-off-by: bwplotka Update model/textparse/protobufparse.go Co-authored-by: George Krajcsovits Signed-off-by: Bartlomiej Plotka Addressing comments. Signed-off-by: bwplotka decoder: reuse histograms and summaries. Signed-off-by: bwplotka optimize help returning (5% of mem utilization). Signed-off-by: bwplotka Apply suggestions from code review Co-authored-by: George Krajcsovits Signed-off-by: Bartlomiej Plotka Update prompb/io/prometheus/client/decoder.go Co-authored-by: George Krajcsovits Signed-off-by: Bartlomiej Plotka Fix build. Signed-off-by: bwplotka * Update model/textparse/protobufparse.go Signed-off-by: Bartlomiej Plotka --------- Signed-off-by: bwplotka Signed-off-by: Bartlomiej Plotka --- model/textparse/interface_test.go | 27 +- model/textparse/nhcbparse_test.go | 2 +- model/textparse/promparse.go | 4 + model/textparse/protobufparse.go | 386 +++++----- model/textparse/protobufparse_test.go | 12 +- prompb/io/prometheus/client/decoder.go | 780 ++++++++++++++++++++ prompb/io/prometheus/client/decoder_test.go | 171 +++++ scrape/scrape_test.go | 1 + 8 files changed, 1183 insertions(+), 200 deletions(-) create mode 100644 prompb/io/prometheus/client/decoder.go create mode 100644 prompb/io/prometheus/client/decoder_test.go diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go index 504a4917d9..a5ca12859e 100644 --- a/model/textparse/interface_test.go +++ b/model/textparse/interface_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -210,6 +211,17 @@ func requireEntries(t *testing.T, exp, got []parsedEntry) { t.Helper() testutil.RequireEqualWithOptions(t, exp, got, []cmp.Option{ + // We reuse slices so we sometimes have empty vs nil differences + // we need to ignore with cmpopts.EquateEmpty(). + // However we have to filter out labels, as only + // one comparer per type has to be specified, + // and RequireEqualWithOptions uses + // cmp.Comparer(labels.Equal). + cmp.FilterValues(func(x, y any) bool { + _, xIsLabels := x.(labels.Labels) + _, yIsLabels := y.(labels.Labels) + return !xIsLabels && !yIsLabels + }, cmpopts.EquateEmpty()), cmp.AllowUnexported(parsedEntry{}), }) } @@ -230,15 +242,20 @@ func testParse(t *testing.T, p Parser) (ret []parsedEntry) { case EntryInvalid: t.Fatal("entry invalid not expected") case EntrySeries, EntryHistogram: + var ts *int64 if et == EntrySeries { - m, got.t, got.v = p.Series() - got.m = string(m) + m, ts, got.v = p.Series() } else { - m, got.t, got.shs, got.fhs = p.Histogram() - got.m = string(m) + m, ts, got.shs, got.fhs = p.Histogram() } - + if ts != nil { + // TODO(bwplotka): Change to 0 in the interface for set check to + // avoid pointer mangling. + got.t = int64p(*ts) + } + got.m = string(m) p.Labels(&got.lset) + // Parser reuses int pointer. if ct := p.CreatedTimestamp(); ct != nil { got.ct = int64p(*ct) diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go index 859bcc1cb7..f75c7f5677 100644 --- a/model/textparse/nhcbparse_test.go +++ b/model/textparse/nhcbparse_test.go @@ -931,7 +931,7 @@ func createTestPromHistogram() string { return `# HELP test_histogram1 Test histogram 1 # TYPE test_histogram1 histogram test_histogram1_count 175 1234568 -test_histogram1_sum 0.0008280461746287094 1234768 +test_histogram1_sum 0.0008280461746287094 1234568 test_histogram1_bucket{le="-0.0004899999999999998"} 2 1234568 test_histogram1_bucket{le="-0.0003899999999999998"} 4 1234568 test_histogram1_bucket{le="-0.0002899999999999998"} 16 1234568 diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index 844ab2f15d..f3f2571b00 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -502,6 +502,10 @@ func yoloString(b []byte) string { return unsafe.String(unsafe.SliceData(b), len(b)) } +func yoloBytes(b string) []byte { + return unsafe.Slice(unsafe.StringData(b), len(b)) +} + func parseFloat(s string) (float64, error) { // Keep to pre-Go 1.13 float formats. if strings.ContainsAny(s, "pP_") { diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index 6d074a2695..380c9918cf 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -15,7 +15,6 @@ package textparse import ( "bytes" - "encoding/binary" "errors" "fmt" "io" @@ -25,7 +24,6 @@ import ( "sync" "unicode/utf8" - "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" "github.com/prometheus/common/model" @@ -45,24 +43,24 @@ var floatFormatBufPool = sync.Pool{ }, } -// ProtobufParser is a very inefficient way of unmarshaling the old Prometheus -// protobuf format and then present it as it if were parsed by a -// Prometheus-2-style text parser. This is only done so that we can easily plug -// in the protobuf format into Prometheus 2. For future use (with the final -// format that will be used for native histograms), we have to revisit the -// parsing. A lot of the efficiency tricks of the Prometheus-2-style parsing -// could be used in a similar fashion (byte-slice pointers into the raw -// payload), which requires some hand-coded protobuf handling. But the current -// parsers all expect the full series name (metric name plus label pairs) as one -// string, which is not how things are represented in the protobuf format. If -// the re-arrangement work is actually causing problems (which has to be seen), -// that expectation needs to be changed. +// ProtobufParser parses the old Prometheus protobuf format and present it +// as the text-style textparse.Parser interface. +// +// It uses a tailored streaming protobuf dto.MetricStreamingDecoder that +// reuses internal protobuf structs and allows direct unmarshalling to Prometheus +// types like labels. type ProtobufParser struct { - in []byte // The input to parse. - inPos int // Position within the input. - metricPos int // Position within Metric slice. + dec *dto.MetricStreamingDecoder + + // Used for both the string returned by Series and Histogram, as well as, + // metric family for Type, Unit and Help. + entryBytes *bytes.Buffer + + lset labels.Labels + builder labels.ScratchBuilder // Held here to reduce allocations when building Labels. + // fieldPos is the position within a Summary or (legacy) Histogram. -2 - // is the count. -1 is the sum. Otherwise it is the index within + // is the count. -1 is the sum. Otherwise, it is the index within // quantiles/buckets. fieldPos int fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. @@ -78,27 +76,20 @@ type ProtobufParser struct { // that we have to decode the next MetricFamily. state Entry - builder labels.ScratchBuilder // held here to reduce allocations when building Labels - - mf *dto.MetricFamily - // Whether to also parse a classic histogram that is also present as a // native histogram. parseClassicHistograms bool - - // The following are just shenanigans to satisfy the Parser interface. - metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric. } // NewProtobufParser returns a parser for the payload in the byte slice. func NewProtobufParser(b []byte, parseClassicHistograms bool, st *labels.SymbolTable) Parser { return &ProtobufParser{ - in: b, + dec: dto.NewMetricStreamingDecoder(b), + entryBytes: &bytes.Buffer{}, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), // TODO(bwplotka): Try base builder. + state: EntryInvalid, - mf: &dto.MetricFamily{}, - metricBytes: &bytes.Buffer{}, parseClassicHistograms: parseClassicHistograms, - builder: labels.NewScratchBuilderWithSymbolTable(st, 16), } } @@ -106,19 +97,18 @@ func NewProtobufParser(b []byte, parseClassicHistograms bool, st *labels.SymbolT // value, the timestamp if set, and the value of the current sample. func (p *ProtobufParser) Series() ([]byte, *int64, float64) { var ( - m = p.mf.GetMetric()[p.metricPos] - ts = m.GetTimestampMs() + ts = &p.dec.TimestampMs // To save memory allocations, never nil. v float64 ) - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_COUNTER: - v = m.GetCounter().GetValue() + v = p.dec.GetCounter().GetValue() case dto.MetricType_GAUGE: - v = m.GetGauge().GetValue() + v = p.dec.GetGauge().GetValue() case dto.MetricType_UNTYPED: - v = m.GetUntyped().GetValue() + v = p.dec.GetUntyped().GetValue() case dto.MetricType_SUMMARY: - s := m.GetSummary() + s := p.dec.GetSummary() switch p.fieldPos { case -2: v = float64(s.GetSampleCount()) @@ -133,7 +123,7 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { } case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: // This should only happen for a classic histogram. - h := m.GetHistogram() + h := p.dec.GetHistogram() switch p.fieldPos { case -2: v = h.GetSampleCountFloat() @@ -159,8 +149,8 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { default: panic("encountered unexpected metric type, this is a bug") } - if ts != 0 { - return p.metricBytes.Bytes(), &ts, v + if *ts != 0 { + return p.entryBytes.Bytes(), ts, v } // TODO(beorn7): We assume here that ts==0 means no timestamp. That's // not true in general, but proto3 originally has no distinction between @@ -171,7 +161,7 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { // away from gogo-protobuf to an actively maintained protobuf // implementation. Once that's done, we can simply use the `optional` // keyword and check for the unset state explicitly. - return p.metricBytes.Bytes(), nil, v + return p.entryBytes.Bytes(), nil, v } // Histogram returns the bytes of a series with a native histogram as a value, @@ -186,47 +176,56 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { // value. func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { var ( - m = p.mf.GetMetric()[p.metricPos] - ts = m.GetTimestampMs() - h = m.GetHistogram() + ts = &p.dec.TimestampMs // To save memory allocations, never nil. + h = p.dec.GetHistogram() ) + if p.parseClassicHistograms && len(h.GetBucket()) > 0 { p.redoClassic = true } if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 { // It is a float histogram. fh := histogram.FloatHistogram{ - Count: h.GetSampleCountFloat(), - Sum: h.GetSampleSum(), - ZeroThreshold: h.GetZeroThreshold(), - ZeroCount: h.GetZeroCountFloat(), - Schema: h.GetSchema(), + Count: h.GetSampleCountFloat(), + Sum: h.GetSampleSum(), + ZeroThreshold: h.GetZeroThreshold(), + ZeroCount: h.GetZeroCountFloat(), + Schema: h.GetSchema(), + + // Decoder reuses slices, so we need to copy. PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), - PositiveBuckets: h.GetPositiveCount(), + PositiveBuckets: make([]float64, len(h.GetPositiveCount())), NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), - NegativeBuckets: h.GetNegativeCount(), + NegativeBuckets: make([]float64, len(h.GetNegativeCount())), } for i, span := range h.GetPositiveSpan() { fh.PositiveSpans[i].Offset = span.GetOffset() fh.PositiveSpans[i].Length = span.GetLength() } + for i, cnt := range h.GetPositiveCount() { + fh.PositiveBuckets[i] = cnt + } for i, span := range h.GetNegativeSpan() { fh.NegativeSpans[i].Offset = span.GetOffset() fh.NegativeSpans[i].Length = span.GetLength() } - if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + for i, cnt := range h.GetNegativeCount() { + fh.NegativeBuckets[i] = cnt + } + if p.dec.GetType() == dto.MetricType_GAUGE_HISTOGRAM { fh.CounterResetHint = histogram.GaugeType } fh.Compact(0) - if ts != 0 { - return p.metricBytes.Bytes(), &ts, nil, &fh + if *ts != 0 { + return p.entryBytes.Bytes(), ts, nil, &fh } // Nasty hack: Assume that ts==0 means no timestamp. That's not true in // general, but proto3 has no distinction between unset and // default. Need to avoid in the final format. - return p.metricBytes.Bytes(), nil, nil, &fh + return p.entryBytes.Bytes(), nil, nil, &fh } + // TODO(bwplotka): Create sync.Pool for those structs. sh := histogram.Histogram{ Count: h.GetSampleCount(), Sum: h.GetSampleSum(), @@ -234,41 +233,47 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his ZeroCount: h.GetZeroCount(), Schema: h.GetSchema(), PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), - PositiveBuckets: h.GetPositiveDelta(), + PositiveBuckets: make([]int64, len(h.GetPositiveDelta())), NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), - NegativeBuckets: h.GetNegativeDelta(), + NegativeBuckets: make([]int64, len(h.GetNegativeDelta())), } for i, span := range h.GetPositiveSpan() { sh.PositiveSpans[i].Offset = span.GetOffset() sh.PositiveSpans[i].Length = span.GetLength() } + for i, cnt := range h.GetPositiveDelta() { + sh.PositiveBuckets[i] = cnt + } for i, span := range h.GetNegativeSpan() { sh.NegativeSpans[i].Offset = span.GetOffset() sh.NegativeSpans[i].Length = span.GetLength() } - if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + for i, cnt := range h.GetNegativeDelta() { + sh.NegativeBuckets[i] = cnt + } + if p.dec.GetType() == dto.MetricType_GAUGE_HISTOGRAM { sh.CounterResetHint = histogram.GaugeType } sh.Compact(0) - if ts != 0 { - return p.metricBytes.Bytes(), &ts, &sh, nil + if *ts != 0 { + return p.entryBytes.Bytes(), ts, &sh, nil } - return p.metricBytes.Bytes(), nil, &sh, nil + return p.entryBytes.Bytes(), nil, &sh, nil } // Help returns the metric name and help text in the current entry. // Must only be called after Next returned a help entry. // The returned byte slices become invalid after the next call to Next. func (p *ProtobufParser) Help() ([]byte, []byte) { - return p.metricBytes.Bytes(), []byte(p.mf.GetHelp()) + return p.entryBytes.Bytes(), yoloBytes(p.dec.GetHelp()) } // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. func (p *ProtobufParser) Type() ([]byte, model.MetricType) { - n := p.metricBytes.Bytes() - switch p.mf.GetType() { + n := p.entryBytes.Bytes() + switch p.dec.GetType() { case dto.MetricType_COUNTER: return n, model.MetricTypeCounter case dto.MetricType_GAUGE: @@ -287,7 +292,7 @@ func (p *ProtobufParser) Type() ([]byte, model.MetricType) { // Must only be called after Next returned a unit entry. // The returned byte slices become invalid after the next call to Next. func (p *ProtobufParser) Unit() ([]byte, []byte) { - return p.metricBytes.Bytes(), []byte(p.mf.GetUnit()) + return p.entryBytes.Bytes(), []byte(p.dec.GetUnit()) } // Comment always returns nil because comments aren't supported by the protobuf @@ -297,21 +302,8 @@ func (p *ProtobufParser) Comment() []byte { } // Labels writes the labels of the current sample into the passed labels. -// It returns the string from which the metric was parsed. func (p *ProtobufParser) Labels(l *labels.Labels) { - p.builder.Reset() - p.builder.Add(labels.MetricName, p.getMagicName()) - - for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { - p.builder.Add(lp.GetName(), lp.GetValue()) - } - if needed, name, value := p.getMagicLabel(); needed { - p.builder.Add(name, value) - } - - // Sort labels to maintain the sorted labels invariant. - p.builder.Sort() - *l = p.builder.Labels() + *l = p.lset.Copy() } // Exemplar writes the exemplar of the current sample into the passed @@ -324,15 +316,14 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { // We only ever return one exemplar per (non-native-histogram) series. return false } - m := p.mf.GetMetric()[p.metricPos] var exProto *dto.Exemplar - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_COUNTER: - exProto = m.GetCounter().GetExemplar() + exProto = p.dec.GetCounter().GetExemplar() case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: isClassic := p.state == EntrySeries - if !isClassic && len(m.GetHistogram().GetExemplars()) > 0 { - exs := m.GetHistogram().GetExemplars() + if !isClassic && len(p.dec.GetHistogram().GetExemplars()) > 0 { + exs := p.dec.GetHistogram().GetExemplars() for p.exemplarPos < len(exs) { exProto = exs[p.exemplarPos] p.exemplarPos++ @@ -344,7 +335,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { return false } } else { - bb := m.GetHistogram().GetBucket() + bb := p.dec.GetHistogram().GetBucket() if p.fieldPos < 0 { if isClassic { return false // At _count or _sum. @@ -392,13 +383,13 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { // invalid (as timestamp e.g. negative value) on counters, summaries or histograms. func (p *ProtobufParser) CreatedTimestamp() *int64 { var ct *types.Timestamp - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_COUNTER: - ct = p.mf.GetMetric()[p.metricPos].GetCounter().GetCreatedTimestamp() + ct = p.dec.GetCounter().GetCreatedTimestamp() case dto.MetricType_SUMMARY: - ct = p.mf.GetMetric()[p.metricPos].GetSummary().GetCreatedTimestamp() + ct = p.dec.GetSummary().GetCreatedTimestamp() case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: - ct = p.mf.GetMetric()[p.metricPos].GetHistogram().GetCreatedTimestamp() + ct = p.dec.GetHistogram().GetCreatedTimestamp() default: } ctAsTime, err := types.TimestampFromProto(ct) @@ -416,31 +407,34 @@ func (p *ProtobufParser) CreatedTimestamp() *int64 { func (p *ProtobufParser) Next() (Entry, error) { p.exemplarReturned = false switch p.state { + // Invalid state occurs on: + // * First Next() call. + // * Recursive call that tells Next to move to the next metric family. case EntryInvalid: - p.metricPos = 0 p.exemplarPos = 0 p.fieldPos = -2 - n, err := readDelimited(p.in[p.inPos:], p.mf) - p.inPos += n - if err != nil { + + if err := p.dec.NextMetricFamily(); err != nil { return p.state, err } - - // Skip empty metric families. - if len(p.mf.GetMetric()) == 0 { - return p.Next() + if err := p.dec.NextMetric(); err != nil { + // Skip empty metric families. + if errors.Is(err, io.EOF) { + return p.Next() + } + return EntryInvalid, err } // We are at the beginning of a metric family. Put only the name - // into metricBytes and validate only name, help, and type for now. - name := p.mf.GetName() + // into entryBytes and validate only name, help, and type for now. + name := p.dec.GetName() if !model.IsValidMetricName(model.LabelValue(name)) { return EntryInvalid, fmt.Errorf("invalid metric name: %s", name) } - if help := p.mf.GetHelp(); !utf8.ValidString(help) { + if help := p.dec.GetHelp(); !utf8.ValidString(help) { return EntryInvalid, fmt.Errorf("invalid help for metric %q: %s", name, help) } - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_COUNTER, dto.MetricType_GAUGE, dto.MetricType_HISTOGRAM, @@ -449,11 +443,11 @@ func (p *ProtobufParser) Next() (Entry, error) { dto.MetricType_UNTYPED: // All good. default: - return EntryInvalid, fmt.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) + return EntryInvalid, fmt.Errorf("unknown metric type for metric %q: %s", name, p.dec.GetType()) } - unit := p.mf.GetUnit() + unit := p.dec.GetUnit() if len(unit) > 0 { - if p.mf.GetType() == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { + if p.dec.GetType() == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { if !strings.HasSuffix(name[:len(name)-6], unit) || len(name)-6 < len(unit)+1 || name[len(name)-6-len(unit)-1] != '_' { return EntryInvalid, fmt.Errorf("unit %q not a suffix of counter %q", unit, name) } @@ -461,12 +455,11 @@ func (p *ProtobufParser) Next() (Entry, error) { return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", unit, name) } } - p.metricBytes.Reset() - p.metricBytes.WriteString(name) - + p.entryBytes.Reset() + p.entryBytes.WriteString(name) p.state = EntryHelp case EntryHelp: - if p.mf.Unit != "" { + if p.dec.Unit != "" { p.state = EntryUnit } else { p.state = EntryType @@ -474,48 +467,78 @@ func (p *ProtobufParser) Next() (Entry, error) { case EntryUnit: p.state = EntryType case EntryType: - t := p.mf.GetType() + t := p.dec.GetType() if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && - isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + isNativeHistogram(p.dec.GetHistogram()) { p.state = EntryHistogram } else { p.state = EntrySeries } - if err := p.updateMetricBytes(); err != nil { + if err := p.onSeriesOrHistogramUpdate(); err != nil { return EntryInvalid, err } - case EntryHistogram, EntrySeries: - if p.redoClassic { - p.redoClassic = false - p.state = EntrySeries - p.fieldPos = -3 - p.fieldsDone = false - } - t := p.mf.GetType() - if p.state == EntrySeries && !p.fieldsDone && - (t == dto.MetricType_SUMMARY || - t == dto.MetricType_HISTOGRAM || - t == dto.MetricType_GAUGE_HISTOGRAM) { - p.fieldPos++ - } else { - p.metricPos++ + case EntrySeries: + // Potentially a second series in the metric family. + t := p.dec.GetType() + if t == dto.MetricType_SUMMARY || + t == dto.MetricType_HISTOGRAM || + t == dto.MetricType_GAUGE_HISTOGRAM { + // Non-trivial series (complex metrics, with magic suffixes). + + // Did we iterate over all the classic representations fields? + // NOTE: p.fieldsDone is updated on p.onSeriesOrHistogramUpdate. + if !p.fieldsDone { + // Still some fields to iterate over. + p.fieldPos++ + if err := p.onSeriesOrHistogramUpdate(); err != nil { + return EntryInvalid, err + } + return p.state, nil + } + + // Reset histogram fields. p.fieldPos = -2 p.fieldsDone = false p.exemplarPos = 0 + // If this is a metric family containing native - // histograms, we have to switch back to native - // histograms after parsing a classic histogram. - if p.state == EntrySeries && - (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && - isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + // histograms, it means we are here thanks to redoClassic state. + // Return to native histograms for the consistent flow. + if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && + isNativeHistogram(p.dec.GetHistogram()) { p.state = EntryHistogram } } - if p.metricPos >= len(p.mf.GetMetric()) { - p.state = EntryInvalid - return p.Next() + // Is there another series? + if err := p.dec.NextMetric(); err != nil { + if errors.Is(err, io.EOF) { + p.state = EntryInvalid + return p.Next() + } + return EntryInvalid, err } - if err := p.updateMetricBytes(); err != nil { + if err := p.onSeriesOrHistogramUpdate(); err != nil { + return EntryInvalid, err + } + case EntryHistogram: + // Was Histogram() called and parseClassicHistograms is true? + if p.redoClassic { + p.redoClassic = false + p.fieldPos = -3 + p.fieldsDone = false + p.state = EntrySeries + return p.Next() // Switch to classic histogram. + } + + // Is there another series? + if err := p.dec.NextMetric(); err != nil { + if errors.Is(err, io.EOF) { + p.state = EntryInvalid + return p.Next() + } + return EntryInvalid, err + } + if err := p.onSeriesOrHistogramUpdate(); err != nil { return EntryInvalid, err } default: @@ -524,30 +547,39 @@ func (p *ProtobufParser) Next() (Entry, error) { return p.state, nil } -func (p *ProtobufParser) updateMetricBytes() error { - b := p.metricBytes - b.Reset() - b.WriteString(p.getMagicName()) - for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { - b.WriteByte(model.SeparatorByte) - n := lp.GetName() - if !model.LabelName(n).IsValid() { - return fmt.Errorf("invalid label name: %s", n) - } - b.WriteString(n) - b.WriteByte(model.SeparatorByte) - v := lp.GetValue() - if !utf8.ValidString(v) { - return fmt.Errorf("invalid label value: %s", v) - } - b.WriteString(v) +// onSeriesOrHistogramUpdate updates internal state before returning +// a series or histogram. It updates: +// * p.lset. +// * p.entryBytes. +// * p.fieldsDone depending on p.fieldPos. +func (p *ProtobufParser) onSeriesOrHistogramUpdate() error { + p.builder.Reset() + p.builder.Add(labels.MetricName, p.getMagicName()) + + if err := p.dec.Label(&p.builder); err != nil { + return err } - if needed, n, v := p.getMagicLabel(); needed { - b.WriteByte(model.SeparatorByte) - b.WriteString(n) - b.WriteByte(model.SeparatorByte) - b.WriteString(v) + + if needed, name, value := p.getMagicLabel(); needed { + p.builder.Add(name, value) } + + // Sort labels to maintain the sorted labels invariant. + p.builder.Sort() + p.builder.Overwrite(&p.lset) + + // entryBytes has to be unique for each series. + p.entryBytes.Reset() + p.lset.Range(func(l labels.Label) { + if l.Name == labels.MetricName { + p.entryBytes.WriteString(l.Value) + return + } + p.entryBytes.WriteByte(model.SeparatorByte) + p.entryBytes.WriteString(l.Name) + p.entryBytes.WriteByte(model.SeparatorByte) + p.entryBytes.WriteString(l.Value) + }) return nil } @@ -555,36 +587,37 @@ func (p *ProtobufParser) updateMetricBytes() error { // ("_count", "_sum", "_bucket") if needed according to the current parser // state. func (p *ProtobufParser) getMagicName() string { - t := p.mf.GetType() + t := p.dec.GetType() if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_GAUGE_HISTOGRAM && t != dto.MetricType_SUMMARY) { - return p.mf.GetName() + return p.dec.GetName() } if p.fieldPos == -2 { - return p.mf.GetName() + "_count" + return p.dec.GetName() + "_count" } if p.fieldPos == -1 { - return p.mf.GetName() + "_sum" + return p.dec.GetName() + "_sum" } if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM { - return p.mf.GetName() + "_bucket" + return p.dec.GetName() + "_bucket" } - return p.mf.GetName() + return p.dec.GetName() } // getMagicLabel returns if a magic label ("quantile" or "le") is needed and, if // so, its name and value. It also sets p.fieldsDone if applicable. func (p *ProtobufParser) getMagicLabel() (bool, string, string) { + // Native histogram or _count and _sum series. if p.state == EntryHistogram || p.fieldPos < 0 { return false, "", "" } - switch p.mf.GetType() { + switch p.dec.GetType() { case dto.MetricType_SUMMARY: - qq := p.mf.GetMetric()[p.metricPos].GetSummary().GetQuantile() + qq := p.dec.GetSummary().GetQuantile() q := qq[p.fieldPos] p.fieldsDone = p.fieldPos == len(qq)-1 return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile()) case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: - bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket() + bb := p.dec.GetHistogram().GetBucket() if p.fieldPos >= len(bb) { p.fieldsDone = true return true, model.BucketLabel, "+Inf" @@ -596,29 +629,6 @@ func (p *ProtobufParser) getMagicLabel() (bool, string, string) { return false, "", "" } -var errInvalidVarint = errors.New("protobufparse: invalid varint encountered") - -// readDelimited is essentially doing what the function of the same name in -// github.com/matttproud/golang_protobuf_extensions/pbutil is doing, but it is -// specific to a MetricFamily, utilizes the more efficient gogo-protobuf -// unmarshaling, and acts on a byte slice directly without any additional -// staging buffers. -func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { - if len(b) == 0 { - return 0, io.EOF - } - messageLength, varIntLength := proto.DecodeVarint(b) - if varIntLength == 0 || varIntLength > binary.MaxVarintLen32 { - return 0, errInvalidVarint - } - totalLength := varIntLength + int(messageLength) - if totalLength > len(b) { - return 0, fmt.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) - } - mf.Reset() - return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) -} - // formatOpenMetricsFloat works like the usual Go string formatting of a float // but appends ".0" if the resulting number would otherwise contain neither a // "." nor an "e". diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index 065459a69a..381e6a9355 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -1246,7 +1246,7 @@ func TestProtobufParse(t *testing.T) { ), }, { - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", + m: "rpc_durations_seconds\xffquantile\xff0.5\xffservice\xffexponential", v: 6.442786329648548e-07, lset: labels.FromStrings( "__name__", "rpc_durations_seconds", @@ -1255,7 +1255,7 @@ func TestProtobufParse(t *testing.T) { ), }, { - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", + m: "rpc_durations_seconds\xffquantile\xff0.9\xffservice\xffexponential", v: 1.9435742936658396e-06, lset: labels.FromStrings( "__name__", "rpc_durations_seconds", @@ -1264,7 +1264,7 @@ func TestProtobufParse(t *testing.T) { ), }, { - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", + m: "rpc_durations_seconds\xffquantile\xff0.99\xffservice\xffexponential", v: 4.0471608667037015e-06, lset: labels.FromStrings( "__name__", "rpc_durations_seconds", @@ -2199,7 +2199,7 @@ func TestProtobufParse(t *testing.T) { ), }, { - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", + m: "rpc_durations_seconds\xffquantile\xff0.5\xffservice\xffexponential", v: 6.442786329648548e-07, lset: labels.FromStrings( "__name__", "rpc_durations_seconds", @@ -2208,7 +2208,7 @@ func TestProtobufParse(t *testing.T) { ), }, { - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", + m: "rpc_durations_seconds\xffquantile\xff0.9\xffservice\xffexponential", v: 1.9435742936658396e-06, lset: labels.FromStrings( "__name__", "rpc_durations_seconds", @@ -2217,7 +2217,7 @@ func TestProtobufParse(t *testing.T) { ), }, { - m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", + m: "rpc_durations_seconds\xffquantile\xff0.99\xffservice\xffexponential", v: 4.0471608667037015e-06, lset: labels.FromStrings( "__name__", "rpc_durations_seconds", diff --git a/prompb/io/prometheus/client/decoder.go b/prompb/io/prometheus/client/decoder.go new file mode 100644 index 0000000000..b21f78cc9c --- /dev/null +++ b/prompb/io/prometheus/client/decoder.go @@ -0,0 +1,780 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package io_prometheus_client //nolint:revive + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "unicode/utf8" + "unsafe" + + proto "github.com/gogo/protobuf/proto" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/labels" +) + +type MetricStreamingDecoder struct { + in []byte + inPos int + + // TODO(bwplotka): Switch to generator/plugin that won't have those fields accessible e.g. OpaqueAPI + // We leverage the fact those two don't collide. + *MetricFamily // Without Metric, guarded by overridden GetMetric method. + *Metric // Without Label, guarded by overridden GetLabel method. + + mfData []byte + metrics []pos + metricIndex int + + mData []byte + labels []pos +} + +// NewMetricStreamingDecoder returns a Go iterator that unmarshals given protobuf bytes one +// metric family and metric at the time, allowing efficient streaming. +// +// Do not modify MetricStreamingDecoder between iterations as it's reused to save allocations. +// GetGauge, GetCounter, etc are also cached, which means GetGauge will work for counter +// if previously gauge was parsed. It's up to the caller to use Type to decide what +// method to use when checking the value. +// +// TODO(bwplotka): io.Reader approach is possible too, but textparse has access to whole scrape for now. +func NewMetricStreamingDecoder(data []byte) *MetricStreamingDecoder { + return &MetricStreamingDecoder{ + in: data, + MetricFamily: &MetricFamily{}, + Metric: &Metric{}, + metrics: make([]pos, 0, 100), + } +} + +var errInvalidVarint = errors.New("clientpb: invalid varint encountered") + +func (m *MetricStreamingDecoder) NextMetricFamily() error { + b := m.in[m.inPos:] + if len(b) == 0 { + return io.EOF + } + messageLength, varIntLength := proto.DecodeVarint(b) // TODO(bwplotka): Get rid of gogo. + if varIntLength == 0 || varIntLength > binary.MaxVarintLen32 { + return errInvalidVarint + } + totalLength := varIntLength + int(messageLength) + if totalLength > len(b) { + return fmt.Errorf("clientpb: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) + } + m.resetMetricFamily() + m.mfData = b[varIntLength:totalLength] + + m.inPos += totalLength + return m.MetricFamily.unmarshalWithoutMetrics(m, m.mfData) +} + +// resetMetricFamily resets all the fields in m to equal the zero value, but re-using slice memory. +func (m *MetricStreamingDecoder) resetMetricFamily() { + m.metrics = m.metrics[:0] + m.metricIndex = 0 + m.MetricFamily.Reset() +} + +func (m *MetricStreamingDecoder) NextMetric() error { + if m.metricIndex >= len(m.metrics) { + return io.EOF + } + + m.resetMetric() + m.mData = m.mfData[m.metrics[m.metricIndex].start:m.metrics[m.metricIndex].end] + if err := m.Metric.unmarshalWithoutLabels(m, m.mData); err != nil { + return err + } + m.metricIndex++ + return nil +} + +// resetMetric resets all the fields in m to equal the zero value, but re-using slices memory. +func (m *MetricStreamingDecoder) resetMetric() { + m.labels = m.labels[:0] + m.TimestampMs = 0 + + // TODO(bwplotka): Autogenerate reset functions. + if m.Metric.Counter != nil { + m.Metric.Counter.Value = 0 + m.Metric.Counter.CreatedTimestamp = nil + m.Metric.Counter.Exemplar = nil + } + if m.Metric.Gauge != nil { + m.Metric.Gauge.Value = 0 + } + if m.Metric.Histogram != nil { + m.Metric.Histogram.SampleCount = 0 + m.Metric.Histogram.SampleCountFloat = 0 + m.Metric.Histogram.SampleSum = 0 + m.Metric.Histogram.Bucket = m.Metric.Histogram.Bucket[:0] + m.Metric.Histogram.CreatedTimestamp = nil + m.Metric.Histogram.Schema = 0 + m.Metric.Histogram.ZeroThreshold = 0 + m.Metric.Histogram.ZeroCount = 0 + m.Metric.Histogram.ZeroCountFloat = 0 + m.Metric.Histogram.NegativeSpan = m.Metric.Histogram.NegativeSpan[:0] + m.Metric.Histogram.NegativeDelta = m.Metric.Histogram.NegativeDelta[:0] + m.Metric.Histogram.NegativeCount = m.Metric.Histogram.NegativeCount[:0] + m.Metric.Histogram.PositiveSpan = m.Metric.Histogram.PositiveSpan[:0] + m.Metric.Histogram.PositiveDelta = m.Metric.Histogram.PositiveDelta[:0] + m.Metric.Histogram.PositiveCount = m.Metric.Histogram.PositiveCount[:0] + m.Metric.Histogram.Exemplars = m.Metric.Histogram.Exemplars[:0] + } + if m.Metric.Summary != nil { + m.Metric.Summary.SampleCount = 0 + m.Metric.Summary.SampleSum = 0 + m.Metric.Summary.Quantile = m.Metric.Summary.Quantile[:0] + m.Metric.Summary.CreatedTimestamp = nil + } +} + +func (m *MetricStreamingDecoder) GetMetric() { + panic("don't use GetMetric, use Metric directly") +} + +func (m *MetricStreamingDecoder) GetLabel() { + panic("don't use GetLabel, use Label instead") +} + +// Label parses labels into labels scratch builder. Metric name is missing +// given the protobuf metric model and has to be deduced from the metric family name. +// TODO: The method name intentionally hide MetricStreamingDecoder.Metric.Label +// field to avoid direct use (it's not parsed). In future generator will generate +// structs tailored for streaming decoding. +func (m *MetricStreamingDecoder) Label(b *labels.ScratchBuilder) error { + for _, l := range m.labels { + if err := parseLabel(m.mData[l.start:l.end], b); err != nil { + return err + } + } + return nil +} + +// parseLabels is essentially LabelPair.Unmarshal but directly adding into scratch builder +// and reusing strings. +func parseLabel(dAtA []byte, b *labels.ScratchBuilder) error { + var name, value string + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return errors.New("proto: LabelPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + name = yoloString(dAtA[iNdEx:postIndex]) + if !model.LabelName(name).IsValid() { + return fmt.Errorf("invalid label name: %s", name) + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + value = yoloString(dAtA[iNdEx:postIndex]) + if !utf8.ValidString(value) { + return fmt.Errorf("invalid label value: %s", value) + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if iNdEx > l { + return io.ErrUnexpectedEOF + } + b.Add(name, value) + return nil +} + +func yoloString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +type pos struct { + start, end int +} + +func (m *Metric) unmarshalWithoutLabels(p *MetricStreamingDecoder, dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return errors.New("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + p.labels = append(p.labels, pos{start: iNdEx, end: postIndex}) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Gauge == nil { + m.Gauge = &Gauge{} + } + if err := m.Gauge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Counter == nil { + m.Counter = &Counter{} + } + if err := m.Counter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Summary == nil { + m.Summary = &Summary{} + } + if err := m.Summary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Untyped", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Untyped == nil { + m.Untyped = &Untyped{} + } + if err := m.Untyped.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Histogram == nil { + m.Histogram = &Histogram{} + } + if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func (m *MetricFamily) unmarshalWithoutMetrics(buf *MetricStreamingDecoder, dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return errors.New("proto: MetricFamily: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricFamily: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Help = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + buf.metrics = append(buf.metrics, pos{start: iNdEx, end: postIndex}) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = yoloString(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/prompb/io/prometheus/client/decoder_test.go b/prompb/io/prometheus/client/decoder_test.go new file mode 100644 index 0000000000..8697b78fca --- /dev/null +++ b/prompb/io/prometheus/client/decoder_test.go @@ -0,0 +1,171 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package io_prometheus_client //nolint:revive + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/labels" +) + +const ( + testGauge = `name: "go_build_info" +help: "Build information about the main Go module." +type: GAUGE +metric: < + label: < + name: "checksum" + value: "" + > + label: < + name: "path" + value: "github.com/prometheus/client_golang" + > + label: < + name: "version" + value: "(devel)" + > + gauge: < + value: 1 + > +> +metric: < + label: < + name: "checksum" + value: "" + > + label: < + name: "path" + value: "github.com/prometheus/prometheus" + > + label: < + name: "version" + value: "v3.0.0" + > + gauge: < + value: 2 + > +> + +` + testCounter = `name: "go_memstats_alloc_bytes_total" +help: "Total number of bytes allocated, even if freed." +type: COUNTER +unit: "bytes" +metric: < + counter: < + value: 1.546544e+06 + exemplar: < + label: < + name: "dummyID" + value: "42" + > + value: 12 + timestamp: < + seconds: 1625851151 + nanos: 233181499 + > + > + > +> + +` +) + +func TestMetricStreamingDecoder(t *testing.T) { + varintBuf := make([]byte, binary.MaxVarintLen32) + buf := bytes.Buffer{} + for _, m := range []string{testGauge, testCounter} { + mf := &MetricFamily{} + require.NoError(t, proto.UnmarshalText(m, mf)) + // From proto message to binary protobuf. + protoBuf, err := proto.Marshal(mf) + require.NoError(t, err) + + // Write first length, then binary protobuf. + varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) + buf.Write(varintBuf[:varintLength]) + buf.Write(protoBuf) + } + + d := NewMetricStreamingDecoder(buf.Bytes()) + require.NoError(t, d.NextMetricFamily()) + nextFn := func() error { + for { + err := d.NextMetric() + if errors.Is(err, io.EOF) { + if err := d.NextMetricFamily(); err != nil { + return err + } + continue + } + return err + } + } + + var firstMetricLset labels.Labels + { + require.NoError(t, nextFn()) + + require.Equal(t, "go_build_info", d.GetName()) + require.Equal(t, "Build information about the main Go module.", d.GetHelp()) + require.Equal(t, MetricType_GAUGE, d.GetType()) + + require.Equal(t, float64(1), d.GetGauge().GetValue()) + b := labels.NewScratchBuilder(0) + require.NoError(t, d.Label(&b)) + + firstMetricLset = b.Labels() + + require.Equal(t, `{checksum="", path="github.com/prometheus/client_golang", version="(devel)"}`, firstMetricLset.String()) + } + + { + require.NoError(t, nextFn()) + + require.Equal(t, "go_build_info", d.GetName()) + require.Equal(t, "Build information about the main Go module.", d.GetHelp()) + require.Equal(t, MetricType_GAUGE, d.GetType()) + + require.Equal(t, float64(2), d.GetGauge().GetValue()) + b := labels.NewScratchBuilder(0) + require.NoError(t, d.Label(&b)) + require.Equal(t, `{checksum="", path="github.com/prometheus/prometheus", version="v3.0.0"}`, b.Labels().String()) + } + { + // Different mf now. + require.NoError(t, nextFn()) + + require.Equal(t, "go_memstats_alloc_bytes_total", d.GetName()) + require.Equal(t, "Total number of bytes allocated, even if freed.", d.GetHelp()) + require.Equal(t, "bytes", d.GetUnit()) + require.Equal(t, MetricType_COUNTER, d.GetType()) + + require.Equal(t, 1.546544e+06, d.Metric.GetCounter().GetValue()) + b := labels.NewScratchBuilder(0) + require.NoError(t, d.Label(&b)) + require.Equal(t, `{}`, b.Labels().String()) + } + require.Equal(t, io.EOF, nextFn()) + + // Expect labels and metricBytes to be static and reusable even after parsing. + require.Equal(t, `{checksum="", path="github.com/prometheus/client_golang", version="(devel)"}`, firstMetricLset.String()) +} diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index ddc9b2853f..b30dbfa1b9 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1895,6 +1895,7 @@ func TestScrapeLoopAppend(t *testing.T) { } func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) { + t.Helper() testutil.RequireEqualWithOptions(t, expected, actual, []cmp.Option{cmp.Comparer(equalFloatSamples), cmp.AllowUnexported(histogramSample{})}, msgAndArgs...) From de23a9667cc670b26d8afdba8e3fa48a00e74ec0 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Thu, 13 Feb 2025 13:16:33 +0100 Subject: [PATCH 1374/1397] prw2: Split PRW2.0 from metadata-wal-records feature (#16030) Rationales: * metadata-wal-records might be deprecated and replaced going forward: https://github.com/prometheus/prometheus/issues/15911 * PRW 2.0 works without metadata just fine (although it sends untyped metrics as expected). Signed-off-by: bwplotka --- cmd/prometheus/main.go | 4 ++-- docs/feature_flags.md | 3 +-- storage/remote/queue_manager_test.go | 6 +++--- storage/remote/read_test.go | 2 +- storage/remote/storage.go | 4 ++-- storage/remote/storage_test.go | 10 +++++----- storage/remote/write.go | 8 +------- storage/remote/write_test.go | 14 +++++++------- tsdb/agent/db_test.go | 6 +++--- web/api/v1/api_test.go | 2 +- 10 files changed, 26 insertions(+), 33 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 3c2b5ee0c1..d69648d88b 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -231,7 +231,7 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error { logger.Info("Experimental additional scrape metrics enabled") case "metadata-wal-records": c.scrape.AppendMetadata = true - logger.Info("Experimental metadata records in WAL enabled, required for remote write 2.0") + logger.Info("Experimental metadata records in WAL enabled") case "promql-per-step-stats": c.enablePerStepStats = true logger.Info("Experimental per-step statistics reporting") @@ -699,7 +699,7 @@ func main() { var ( localStorage = &readyStorage{stats: tsdb.NewDBStats()} scraper = &readyScrapeManager{} - remoteStorage = remote.NewStorage(logger.With("component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata) + remoteStorage = remote.NewStorage(logger.With("component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper) fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage) ) diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 2a5a9263f5..6973d6d73b 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -114,8 +114,7 @@ Fall back to serving the old (Prometheus 2.x) web UI instead of the new UI. The When enabled, Prometheus will store metadata in-memory and keep track of metadata changes as WAL records on a per-series basis. -This must be used if -you are also using remote write 2.0 as it will only gather metadata from the WAL. +This must be used if you would like to send metadata using the new remote write 2.0. ## Delay compaction start time diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 38eda81d97..27d5aa47b6 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -135,7 +135,7 @@ func TestBasicContentNegotiation(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { dir := t.TempDir() - s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, true) + s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil) defer s.Close() var ( @@ -243,7 +243,7 @@ func TestSampleDelivery(t *testing.T) { } { t.Run(fmt.Sprintf("%s-%s", tc.protoMsg, tc.name), func(t *testing.T) { dir := t.TempDir() - s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, true) + s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil) defer s.Close() var ( @@ -362,7 +362,7 @@ func TestMetadataDelivery(t *testing.T) { func TestWALMetadataDelivery(t *testing.T) { dir := t.TempDir() - s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, true) + s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil) defer s.Close() cfg := config.DefaultQueueConfig diff --git a/storage/remote/read_test.go b/storage/remote/read_test.go index b78a8c6215..8073f23b3b 100644 --- a/storage/remote/read_test.go +++ b/storage/remote/read_test.go @@ -93,7 +93,7 @@ func TestNoDuplicateReadConfigs(t *testing.T) { for _, tc := range cases { t.Run("", func(t *testing.T) { - s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) + s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil) conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteReadConfigs: tc.cfgs, diff --git a/storage/remote/storage.go b/storage/remote/storage.go index 14c3c87d93..ba6d100bdf 100644 --- a/storage/remote/storage.go +++ b/storage/remote/storage.go @@ -64,7 +64,7 @@ type Storage struct { } // NewStorage returns a remote.Storage. -func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWAL bool) *Storage { +func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager) *Storage { if l == nil { l = promslog.NewNopLogger() } @@ -76,7 +76,7 @@ func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeC deduper: deduper, localStartTimeCallback: stCallback, } - s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, metadataInWAL) + s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm) return s } diff --git a/storage/remote/storage_test.go b/storage/remote/storage_test.go index 8c97d870e9..a62cd2da39 100644 --- a/storage/remote/storage_test.go +++ b/storage/remote/storage_test.go @@ -29,7 +29,7 @@ import ( func TestStorageLifecycle(t *testing.T) { dir := t.TempDir() - s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) + s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil) conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{ @@ -56,7 +56,7 @@ func TestStorageLifecycle(t *testing.T) { func TestUpdateRemoteReadConfigs(t *testing.T) { dir := t.TempDir() - s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) + s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil) conf := &config.Config{ GlobalConfig: config.GlobalConfig{}, @@ -77,7 +77,7 @@ func TestUpdateRemoteReadConfigs(t *testing.T) { func TestFilterExternalLabels(t *testing.T) { dir := t.TempDir() - s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) + s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil) conf := &config.Config{ GlobalConfig: config.GlobalConfig{ @@ -102,7 +102,7 @@ func TestFilterExternalLabels(t *testing.T) { func TestIgnoreExternalLabels(t *testing.T) { dir := t.TempDir() - s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) + s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil) conf := &config.Config{ GlobalConfig: config.GlobalConfig{ @@ -154,7 +154,7 @@ func baseRemoteReadConfig(host string) *config.RemoteReadConfig { // ApplyConfig runs concurrently with Notify // See https://github.com/prometheus/prometheus/issues/12747 func TestWriteStorageApplyConfigsDuringCommit(t *testing.T) { - s := NewStorage(nil, nil, nil, t.TempDir(), defaultFlushDeadline, nil, false) + s := NewStorage(nil, nil, nil, t.TempDir(), defaultFlushDeadline, nil) var wg sync.WaitGroup wg.Add(2000) diff --git a/storage/remote/write.go b/storage/remote/write.go index 0363095444..51daeedb72 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -15,7 +15,6 @@ package remote import ( "context" - "errors" "fmt" "log/slog" "math" @@ -67,7 +66,6 @@ type WriteStorage struct { externalLabels labels.Labels dir string queues map[string]*QueueManager - metadataInWAL bool samplesIn *ewmaRate flushDeadline time.Duration interner *pool @@ -79,7 +77,7 @@ type WriteStorage struct { } // NewWriteStorage creates and runs a WriteStorage. -func NewWriteStorage(logger *slog.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWal bool) *WriteStorage { +func NewWriteStorage(logger *slog.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager) *WriteStorage { if logger == nil { logger = promslog.NewNopLogger() } @@ -95,7 +93,6 @@ func NewWriteStorage(logger *slog.Logger, reg prometheus.Registerer, dir string, interner: newPool(), scraper: sm, quit: make(chan struct{}), - metadataInWAL: metadataInWal, highestTimestamp: &maxTimestamp{ Gauge: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, @@ -149,9 +146,6 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { newQueues := make(map[string]*QueueManager) newHashes := []string{} for _, rwConf := range conf.RemoteWriteConfigs { - if rwConf.ProtobufMessage == config.RemoteWriteProtoMsgV2 && !rws.metadataInWAL { - return errors.New("invalid remote write configuration, if you are using remote write version 2.0 the `--enable-feature=metadata-wal-records` feature flag must be enabled") - } hash, err := toHash(rwConf) if err != nil { return err diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go index 8125da7f6e..a3b30b6425 100644 --- a/storage/remote/write_test.go +++ b/storage/remote/write_test.go @@ -117,7 +117,7 @@ func TestWriteStorageApplyConfig_NoDuplicateWriteConfigs(t *testing.T) { }, } { t.Run("", func(t *testing.T) { - s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false) + s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil) conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: tc.cfgs, @@ -143,7 +143,7 @@ func TestWriteStorageApplyConfig_RestartOnNameChange(t *testing.T) { hash, err := toHash(cfg) require.NoError(t, err) - s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false) + s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil) conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, @@ -165,7 +165,7 @@ func TestWriteStorageApplyConfig_RestartOnNameChange(t *testing.T) { func TestWriteStorageApplyConfig_UpdateWithRegisterer(t *testing.T) { dir := t.TempDir() - s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, false) + s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil) c1 := &config.RemoteWriteConfig{ Name: "named", URL: &common_config.URL{ @@ -206,7 +206,7 @@ func TestWriteStorageApplyConfig_UpdateWithRegisterer(t *testing.T) { func TestWriteStorageApplyConfig_Lifecycle(t *testing.T) { dir := t.TempDir() - s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false) + s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil) conf := &config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{ @@ -222,7 +222,7 @@ func TestWriteStorageApplyConfig_Lifecycle(t *testing.T) { func TestWriteStorageApplyConfig_UpdateExternalLabels(t *testing.T) { dir := t.TempDir() - s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, false) + s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil) externalLabels := labels.FromStrings("external", "true") conf := &config.Config{ @@ -250,7 +250,7 @@ func TestWriteStorageApplyConfig_UpdateExternalLabels(t *testing.T) { func TestWriteStorageApplyConfig_Idempotent(t *testing.T) { dir := t.TempDir() - s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false) + s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil) conf := &config.Config{ GlobalConfig: config.GlobalConfig{}, RemoteWriteConfigs: []*config.RemoteWriteConfig{ @@ -274,7 +274,7 @@ func TestWriteStorageApplyConfig_Idempotent(t *testing.T) { func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) { dir := t.TempDir() - s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false) + s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil) c0 := &config.RemoteWriteConfig{ RemoteTimeout: model.Duration(10 * time.Second), diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index 0238a8e140..db98e87408 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -91,7 +91,7 @@ func createTestAgentDB(t testing.TB, reg prometheus.Registerer, opts *Options) * t.Helper() dbDir := t.TempDir() - rs := remote.NewStorage(promslog.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, false) + rs := remote.NewStorage(promslog.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil) t.Cleanup(func() { require.NoError(t, rs.Close()) }) @@ -737,7 +737,7 @@ func TestLockfile(t *testing.T) { tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) { logger := promslog.NewNopLogger() reg := prometheus.NewRegistry() - rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, false) + rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil) t.Cleanup(func() { require.NoError(t, rs.Close()) }) @@ -757,7 +757,7 @@ func TestLockfile(t *testing.T) { func Test_ExistingWAL_NextRef(t *testing.T) { dbDir := t.TempDir() - rs := remote.NewStorage(promslog.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false) + rs := remote.NewStorage(promslog.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil) defer func() { require.NoError(t, rs.Close()) }() diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index f9bdbe3947..fcf9c2a243 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -495,7 +495,7 @@ func TestEndpoints(t *testing.T) { remote := remote.NewStorage(promslog.New(&promslogConfig), prometheus.DefaultRegisterer, func() (int64, error) { return 0, nil - }, dbDir, 1*time.Second, nil, false) + }, dbDir, 1*time.Second, nil) err = remote.ApplyConfig(&config.Config{ RemoteReadConfigs: []*config.RemoteReadConfig{ From a5ffa83be83be22e2ec9fd1d4765299d8d16119e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Laimis=20Juzeli=C5=ABnas?= <58551069+laimis9133@users.noreply.github.com> Date: Thu, 13 Feb 2025 15:25:50 +0200 Subject: [PATCH 1375/1397] scrape: reorder the switch case of errors (#15991) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change case order for switch scrapeLoop This commit changes the ordering in error identification switch cases for better production performance and adds reasonings behind error switch case order as comments. --------- Signed-off-by: Laimis Juzeliūnas --- scrape/scrape.go | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/scrape/scrape.go b/scrape/scrape.go index 58314af4eb..020bddb757 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1962,12 +1962,24 @@ func isSeriesPartOfFamily(mName string, mfName []byte, typ model.MetricType) boo // Adds samples to the appender, checking the error, and then returns the # of samples added, // whether the caller should continue to process more samples, and any sample or bucket limit errors. +// Switch error cases for Sample and Bucket limits are checked first since they're more common +// during normal operation (e.g., accidental cardinality explosion, sudden traffic spikes). +// Current case ordering prevents exercising other cases when limits are exceeded. +// Remaining error cases typically occur only a few times, often during initial setup. func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) { switch { case err == nil: return true, nil - case errors.Is(err, storage.ErrNotFound): - return false, storage.ErrNotFound + case errors.Is(err, errSampleLimit): + // Keep on parsing output if we hit the limit, so we report the correct + // total number of samples scraped. + *sampleLimitErr = err + return false, nil + case errors.Is(err, errBucketLimit): + // Keep on parsing output if we hit the limit, so we report the bucket + // total number of samples scraped. + *bucketLimitErr = err + return false, nil case errors.Is(err, storage.ErrOutOfOrderSample): appErrs.numOutOfOrder++ sl.l.Debug("Out of order sample", "series", string(met)) @@ -1983,16 +1995,8 @@ func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucke sl.l.Debug("Out of bounds metric", "series", string(met)) sl.metrics.targetScrapeSampleOutOfBounds.Inc() return false, nil - case errors.Is(err, errSampleLimit): - // Keep on parsing output if we hit the limit, so we report the correct - // total number of samples scraped. - *sampleLimitErr = err - return false, nil - case errors.Is(err, errBucketLimit): - // Keep on parsing output if we hit the limit, so we report the correct - // total number of samples scraped. - *bucketLimitErr = err - return false, nil + case errors.Is(err, storage.ErrNotFound): + return false, storage.ErrNotFound default: return false, err } From ef24d47df9988baa4fb899c525b3cfd1c13e9c04 Mon Sep 17 00:00:00 2001 From: Robin Candau Date: Thu, 13 Feb 2025 18:22:39 +0100 Subject: [PATCH 1376/1397] build: Ignore timestamps recording in gzip metadata Use the `-n / --noname` option to ignore non-deterministric information, such as timestamps, in gzip metadata. This is required for [reproducible builds](https://reproducible-builds.org/). Signed-off-by: Robin Candau --- scripts/compress_assets.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/compress_assets.sh b/scripts/compress_assets.sh index 19e1e22486..91b1c7b31a 100755 --- a/scripts/compress_assets.sh +++ b/scripts/compress_assets.sh @@ -13,9 +13,9 @@ fi cd web/ui cp embed.go.tmpl embed.go -GZIP_OPTS="-fk" +GZIP_OPTS="-fkn" # gzip option '-k' may not always exist in the latest gzip available on different distros. -if ! gzip -k -h &>/dev/null; then GZIP_OPTS="-f"; fi +if ! gzip -k -h &>/dev/null; then GZIP_OPTS="-fn"; fi mkdir -p static find static -type f -name '*.gz' -delete From 77a56981909c3478e423127fffb2e10008eeddbf Mon Sep 17 00:00:00 2001 From: Julien Duchesne Date: Mon, 17 Feb 2025 06:19:16 -0500 Subject: [PATCH 1377/1397] Ruler Concurrency: Consider `__name__` matchers (#16039) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When calculating dependencies between rules, we sometimes run into `{__name__...}` matchers These can be used the same way as the actual rule names This will enable even more rules to run concurrently The new logic is also not slower: ``` julienduchesne@triceratops prometheus % benchstat test-old.txt test.txt goos: darwin goarch: arm64 pkg: github.com/prometheus/prometheus/rules cpu: Apple M3 Pro │ test-old.txt │ test.txt │ │ sec/op │ sec/op vs base │ DependencyMap-11 1.206µ ± 7% 1.024µ ± 7% -15.10% (p=0.000 n=10) │ test-old.txt │ test.txt │ │ B/op │ B/op vs base │ DependencyMap-11 1.720Ki ± 0% 1.438Ki ± 0% -16.35% (p=0.000 n=10) │ test-old.txt │ test.txt │ │ allocs/op │ allocs/op vs base │ DependencyMap-11 39.00 ± 0% 34.00 ± 0% -12.82% (p=0.000 n=10) ``` Signed-off-by: Julien Duchesne --- rules/group.go | 43 ++++++++++++++++++++++++++----------------- rules/manager_test.go | 14 +++++++++++--- 2 files changed, 37 insertions(+), 20 deletions(-) diff --git a/rules/group.go b/rules/group.go index 9ad9aab093..a83c33a8e8 100644 --- a/rules/group.go +++ b/rules/group.go @@ -1110,9 +1110,6 @@ func buildDependencyMap(rules []Rule) dependencyMap { return dependencies } - inputs := make(map[string][]Rule, len(rules)) - outputs := make(map[string][]Rule, len(rules)) - var indeterminate bool for _, rule := range rules { @@ -1120,26 +1117,46 @@ func buildDependencyMap(rules []Rule) dependencyMap { break } - name := rule.Name() - outputs[name] = append(outputs[name], rule) - parser.Inspect(rule.Query(), func(node parser.Node, path []parser.Node) error { if n, ok := node.(*parser.VectorSelector); ok { + // Find the name matcher for the rule. + var nameMatcher *labels.Matcher + if n.Name != "" { + nameMatcher = labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, n.Name) + } else { + for _, m := range n.LabelMatchers { + if m.Name == model.MetricNameLabel { + nameMatcher = m + break + } + } + } + // A wildcard metric expression means we cannot reliably determine if this rule depends on any other, // which means we cannot safely run any rules concurrently. - if n.Name == "" && len(n.LabelMatchers) > 0 { + if nameMatcher == nil { indeterminate = true return nil } // Rules which depend on "meta-metrics" like ALERTS and ALERTS_FOR_STATE will have undefined behaviour // if they run concurrently. - if n.Name == alertMetricName || n.Name == alertForStateMetricName { + if nameMatcher.Matches(alertMetricName) || nameMatcher.Matches(alertForStateMetricName) { indeterminate = true return nil } - inputs[n.Name] = append(inputs[n.Name], rule) + // Find rules which depend on the output of this rule. + for _, other := range rules { + if other == rule { + continue + } + + otherName := other.Name() + if nameMatcher.Matches(otherName) { + dependencies[other] = append(dependencies[other], rule) + } + } } return nil }) @@ -1149,13 +1166,5 @@ func buildDependencyMap(rules []Rule) dependencyMap { return nil } - for output, outRules := range outputs { - for _, outRule := range outRules { - if inRules, found := inputs[output]; found && len(inRules) > 0 { - dependencies[outRule] = append(dependencies[outRule], inRules...) - } - } - } - return dependencies } diff --git a/rules/manager_test.go b/rules/manager_test.go index 6c32b6d0f1..9ef1fcf074 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -1601,10 +1601,14 @@ func TestDependencyMap(t *testing.T) { require.NoError(t, err) rule4 := NewRecordingRule("user:requests:increase1h", expr, labels.Labels{}) + expr, err = parser.ParseExpr(`sum by (user) ({__name__=~"user:requests.+5m"})`) + require.NoError(t, err) + rule5 := NewRecordingRule("user:requests:sum5m", expr, labels.Labels{}) + group := NewGroup(GroupOptions{ Name: "rule_group", Interval: time.Second, - Rules: []Rule{rule, rule2, rule3, rule4}, + Rules: []Rule{rule, rule2, rule3, rule4, rule5}, Opts: opts, }) @@ -1619,13 +1623,17 @@ func TestDependencyMap(t *testing.T) { require.Equal(t, []Rule{rule}, depMap.dependencies(rule2)) require.False(t, depMap.isIndependent(rule2)) - require.Zero(t, depMap.dependents(rule3)) + require.Equal(t, []Rule{rule5}, depMap.dependents(rule3)) require.Zero(t, depMap.dependencies(rule3)) - require.True(t, depMap.isIndependent(rule3)) + require.False(t, depMap.isIndependent(rule3)) require.Zero(t, depMap.dependents(rule4)) require.Equal(t, []Rule{rule}, depMap.dependencies(rule4)) require.False(t, depMap.isIndependent(rule4)) + + require.Zero(t, depMap.dependents(rule5)) + require.Equal(t, []Rule{rule3}, depMap.dependencies(rule5)) + require.False(t, depMap.isIndependent(rule5)) } func TestNoDependency(t *testing.T) { From bf7ec404b2a4933a81b0ffe2baba32b92d471655 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Thu, 6 Feb 2025 13:39:57 +0100 Subject: [PATCH 1378/1397] Prepare release 3.2.0 Signed-off-by: Jan Fajerski --- CHANGELOG.md | 4 +++- VERSION | 2 +- web/ui/mantine-ui/package.json | 4 ++-- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/package.json | 2 +- 7 files changed, 17 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4334a17137..b5cb893bab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,8 @@ ## unreleased -## 3.2.0-rc.1 / 2025-01-29 + +## 3.2.0 / 2025-02-17 * [CHANGE] relabel: Replace actions can now use UTF-8 characters in `targetLabel` field. Note that `$` or `${}` will be expanded. This also apply to `replacement` field for `LabelMap` action. #15851 * [CHANGE] rulefmt: Rule names can use UTF-8 characters, except `{` and `}` characters (due to common mistake checks). #15851 @@ -14,6 +15,7 @@ * [ENHANCEMENT] ui: Make "hide empty rules" and hide empty rules" persistent #15807 * [ENHANCEMENT] web/api: Add a limit parameter to `/query` and `/query_range`. #15552 * [ENHANCEMENT] api: Add fields Node and ServerTime to `/status`. #15784 +* [PERF] Scraping: defer computing labels for dropped targets until they are needed by the UI. #15261 * [BUGFIX] remotewrite2: Fix invalid metadata bug for metrics without metadata. #15829 * [BUGFIX] remotewrite2: Fix the unit field propagation. #15825 * [BUGFIX] scrape: Fix WAL metadata for histograms and summaries. #15832 diff --git a/VERSION b/VERSION index f338b6234b..944880fa15 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.2.0-rc.1 +3.2.0 diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 1bc742de81..636f827c8a 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,7 +1,7 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "0.302.0-rc.1", + "version": "0.302.0", "type": "module", "scripts": { "start": "vite", @@ -28,7 +28,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.302.0-rc.1", + "@prometheus-io/codemirror-promql": "0.302.0", "@reduxjs/toolkit": "^2.5.0", "@tabler/icons-react": "^3.28.1", "@tanstack/react-query": "^5.62.11", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index a0f9f823c1..457f644905 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.302.0-rc.1", + "version": "0.302.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.302.0-rc.1", + "@prometheus-io/lezer-promql": "0.302.0", "lru-cache": "^11.0.2" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index f41f56652b..a358151da1 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.302.0-rc.1", + "version": "0.302.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index c27ca7997e..1fffce00e7 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.302.0-rc.1", + "version": "0.302.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.302.0-rc.1", + "version": "0.302.0", "workspaces": [ "mantine-ui", "module/*" @@ -24,7 +24,7 @@ }, "mantine-ui": { "name": "@prometheus-io/mantine-ui", - "version": "0.301.0", + "version": "0.302.0", "dependencies": { "@codemirror/autocomplete": "^6.18.4", "@codemirror/language": "^6.10.8", @@ -42,7 +42,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.301.0", + "@prometheus-io/codemirror-promql": "0.302.0", "@reduxjs/toolkit": "^2.5.0", "@tabler/icons-react": "^3.28.1", "@tanstack/react-query": "^5.62.11", @@ -146,10 +146,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.301.0", + "version": "0.302.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.301.0", + "@prometheus-io/lezer-promql": "0.302.0", "lru-cache": "^11.0.2" }, "devDependencies": { @@ -179,7 +179,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.301.0", + "version": "0.302.0", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.2", diff --git a/web/ui/package.json b/web/ui/package.json index fb26970ccf..4af8447049 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,7 +1,7 @@ { "name": "prometheus-io", "description": "Monorepo for the Prometheus UI", - "version": "0.302.0-rc.1", + "version": "0.302.0", "private": true, "scripts": { "build": "bash build_ui.sh --all", From 86bb04783c8fd0321e76ccdfbf3ad3faf29d4e65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EA=B9=80=EA=B0=80=EC=9C=A4?= <60080153+KaYunKIM@users.noreply.github.com> Date: Thu, 20 Feb 2025 00:24:30 +0900 Subject: [PATCH 1379/1397] Example config: Add labels to static_configs (#13327) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 김가윤 <60080153+KaYunKIM@users.noreply.github.com> Co-authored-by: Bryan Boreham --- documentation/examples/prometheus.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/documentation/examples/prometheus.yml b/documentation/examples/prometheus.yml index 312b578aad..9a45049c62 100644 --- a/documentation/examples/prometheus.yml +++ b/documentation/examples/prometheus.yml @@ -27,3 +27,6 @@ scrape_configs: static_configs: - targets: ["localhost:9090"] + # The label name is added as a label `label_name=` to any timeseries scraped from this config. + labels: + app: "prometheus" From c7d4b53ec18156f990bae7ae6b56a75c74b2d00d Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Mon, 10 Feb 2025 08:06:58 +0100 Subject: [PATCH 1380/1397] chore: enable unused-parameter from revive Signed-off-by: Matthieu MOREL --- .golangci.yml | 1 - cmd/prometheus/main.go | 44 +++---- cmd/prometheus/scrape_failure_log_test.go | 2 +- cmd/promtool/backfill_test.go | 2 +- cmd/promtool/main.go | 4 +- cmd/promtool/rules_test.go | 2 +- cmd/promtool/sd.go | 2 +- cmd/promtool/unittest.go | 2 +- discovery/aws/ec2.go | 4 +- discovery/aws/ec2_test.go | 4 +- discovery/aws/lightsail.go | 2 +- discovery/azure/azure_test.go | 10 +- discovery/consul/consul_test.go | 4 +- discovery/consul/metrics.go | 2 +- discovery/digitalocean/digitalocean.go | 2 +- discovery/dns/dns_test.go | 14 +-- discovery/eureka/client_test.go | 4 +- discovery/eureka/eureka.go | 2 +- discovery/eureka/eureka_test.go | 6 +- discovery/file/metrics.go | 2 +- discovery/gce/gce.go | 2 +- discovery/hetzner/hetzner.go | 2 +- discovery/http/http_test.go | 6 +- discovery/ionos/ionos.go | 2 +- discovery/kubernetes/kubernetes_test.go | 2 +- discovery/kubernetes/metrics.go | 2 +- discovery/marathon/marathon.go | 2 +- discovery/marathon/marathon_test.go | 2 +- discovery/moby/dockerswarm.go | 2 +- discovery/nomad/nomad_test.go | 4 +- discovery/openstack/hypervisor.go | 2 +- discovery/openstack/instance.go | 2 +- discovery/openstack/mock_test.go | 4 +- discovery/openstack/openstack.go | 2 +- discovery/ovhcloud/ovhcloud.go | 2 +- discovery/puppetdb/puppetdb.go | 2 +- discovery/puppetdb/puppetdb_test.go | 4 +- discovery/refresh/refresh_test.go | 2 +- discovery/scaleway/scaleway.go | 2 +- discovery/triton/triton.go | 2 +- discovery/triton/triton_test.go | 2 +- discovery/uyuni/uyuni.go | 2 +- discovery/uyuni/uyuni_test.go | 6 +- discovery/vultr/vultr.go | 2 +- discovery/xds/client_test.go | 4 +- discovery/xds/metrics.go | 2 +- discovery/xds/xds_test.go | 10 +- discovery/zookeeper/zookeeper.go | 4 +- discovery/zookeeper/zookeeper_test.go | 2 +- .../influxdb/client_test.go | 2 +- model/labels/regexp.go | 4 +- model/labels/regexp_test.go | 2 +- model/textparse/nhcbparse_test.go | 4 +- notifier/notifier_test.go | 8 +- promql/engine.go | 8 +- promql/engine_test.go | 4 +- promql/functions.go | 112 ++++++++--------- promql/histogram_stats_iterator_test.go | 2 +- promql/info.go | 2 +- rules/alerting_test.go | 10 +- rules/group.go | 2 +- rules/manager.go | 6 +- rules/manager_test.go | 28 ++--- rules/recording_test.go | 2 +- scrape/helpers_test.go | 10 +- scrape/manager_test.go | 6 +- scrape/scrape.go | 4 +- scrape/scrape_test.go | 114 +++++++++--------- scrape/target_test.go | 14 +-- storage/merge.go | 2 +- storage/merge_test.go | 6 +- storage/remote/client_test.go | 12 +- storage/remote/queue_manager_test.go | 4 +- storage/remote/write_handler.go | 2 +- storage/remote/write_handler_test.go | 6 +- storage/series.go | 2 +- tsdb/block.go | 2 +- tsdb/block_test.go | 2 +- tsdb/chunks/chunk_write_queue_test.go | 10 +- tsdb/chunks/head_chunks_test.go | 4 +- tsdb/db_test.go | 24 ++-- tsdb/fileutil/dir.go | 2 +- tsdb/fileutil/fileutil.go | 2 +- tsdb/head_other.go | 2 +- tsdb/head_read.go | 2 +- tsdb/head_test.go | 6 +- tsdb/index/index.go | 2 +- tsdb/isolation_test.go | 2 +- tsdb/ooo_head_read.go | 8 +- tsdb/querier.go | 4 +- tsdb/querier_test.go | 14 +-- tsdb/record/record.go | 2 +- tsdb/wlog/watcher_test.go | 6 +- util/httputil/compression_test.go | 2 +- web/api/v1/api_test.go | 10 +- web/api/v1/errors_test.go | 8 +- web/web.go | 10 +- 97 files changed, 350 insertions(+), 351 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 56509fe13f..2dfe8e5f0c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -155,7 +155,6 @@ linters-settings: - name: unexported-return - name: unreachable-code - name: unused-parameter - disabled: true - name: var-declaration - name: var-naming testifylint: diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index d69648d88b..4559d51837 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -154,7 +154,7 @@ func init() { // serverOnlyFlag creates server-only kingpin flag. func serverOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause { return app.Flag(name, fmt.Sprintf("%s Use with server mode only.", help)). - PreAction(func(parseContext *kingpin.ParseContext) error { + PreAction(func(_ *kingpin.ParseContext) error { // This will be invoked only if flag is actually provided by user. serverOnlyFlags = append(serverOnlyFlags, "--"+name) return nil @@ -164,7 +164,7 @@ func serverOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagCl // agentOnlyFlag creates agent-only kingpin flag. func agentOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause { return app.Flag(name, fmt.Sprintf("%s Use with agent mode only.", help)). - PreAction(func(parseContext *kingpin.ParseContext) error { + PreAction(func(_ *kingpin.ParseContext) error { // This will be invoked only if flag is actually provided by user. agentOnlyFlags = append(agentOnlyFlags, "--"+name) return nil @@ -526,7 +526,7 @@ func main() { promslogflag.AddFlags(a, &cfg.promslogConfig) - a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error { + a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(_ *kingpin.ParseContext) error { if err := documentcli.GenerateMarkdown(a.Model(), os.Stdout); err != nil { os.Exit(1) return err @@ -1021,7 +1021,7 @@ func main() { } return nil }, - func(err error) { + func(_ error) { close(cancel) webHandler.SetReady(web.Stopping) notifs.AddNotification(notifications.ShuttingDown) @@ -1036,7 +1036,7 @@ func main() { logger.Info("Scrape discovery manager stopped") return err }, - func(err error) { + func(_ error) { logger.Info("Stopping scrape discovery manager...") cancelScrape() }, @@ -1050,7 +1050,7 @@ func main() { logger.Info("Notify discovery manager stopped") return err }, - func(err error) { + func(_ error) { logger.Info("Stopping notify discovery manager...") cancelNotify() }, @@ -1064,7 +1064,7 @@ func main() { ruleManager.Run() return nil }, - func(err error) { + func(_ error) { ruleManager.Stop() }, ) @@ -1083,7 +1083,7 @@ func main() { logger.Info("Scrape manager stopped") return err }, - func(err error) { + func(_ error) { // Scrape manager needs to be stopped before closing the local TSDB // so that it doesn't try to write samples to a closed storage. // We should also wait for rule manager to be fully stopped to ensure @@ -1101,7 +1101,7 @@ func main() { tracingManager.Run() return nil }, - func(err error) { + func(_ error) { tracingManager.Stop() }, ) @@ -1182,7 +1182,7 @@ func main() { } } }, - func(err error) { + func(_ error) { // Wait for any in-progress reloads to complete to avoid // reloading things after they have been shutdown. cancel <- struct{}{} @@ -1214,7 +1214,7 @@ func main() { <-cancel return nil }, - func(err error) { + func(_ error) { close(cancel) }, ) @@ -1267,7 +1267,7 @@ func main() { <-cancel return nil }, - func(err error) { + func(_ error) { if err := fanoutStorage.Close(); err != nil { logger.Error("Error stopping storage", "err", err) } @@ -1322,7 +1322,7 @@ func main() { <-cancel return nil }, - func(e error) { + func(_ error) { if err := fanoutStorage.Close(); err != nil { logger.Error("Error stopping storage", "err", err) } @@ -1339,7 +1339,7 @@ func main() { } return nil }, - func(err error) { + func(_ error) { cancelWeb() }, ) @@ -1361,7 +1361,7 @@ func main() { logger.Info("Notifier manager stopped") return nil }, - func(err error) { + func(_ error) { notifierManager.Stop() }, ) @@ -1638,29 +1638,29 @@ func (s *readyStorage) Appender(ctx context.Context) storage.Appender { type notReadyAppender struct{} // SetOptions does nothing in this appender implementation. -func (n notReadyAppender) SetOptions(opts *storage.AppendOptions) {} +func (n notReadyAppender) SetOptions(_ *storage.AppendOptions) {} -func (n notReadyAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { +func (n notReadyAppender) Append(_ storage.SeriesRef, _ labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } -func (n notReadyAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { +func (n notReadyAppender) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } -func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { +func (n notReadyAppender) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } -func (n notReadyAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { +func (n notReadyAppender) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } -func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { +func (n notReadyAppender) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } -func (n notReadyAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { +func (n notReadyAppender) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } diff --git a/cmd/prometheus/scrape_failure_log_test.go b/cmd/prometheus/scrape_failure_log_test.go index 8d86d719f9..e04b1aaa2d 100644 --- a/cmd/prometheus/scrape_failure_log_test.go +++ b/cmd/prometheus/scrape_failure_log_test.go @@ -171,7 +171,7 @@ func reloadPrometheus(t *testing.T, port int) { // startGarbageServer sets up a mock server that returns a 500 Internal Server Error // for all requests. It also increments the request count each time it's hit. func startGarbageServer(t *testing.T, requestCount *atomic.Int32) string { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { requestCount.Inc() w.WriteHeader(http.StatusInternalServerError) })) diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go index da64b5dca1..8a599510a9 100644 --- a/cmd/promtool/backfill_test.go +++ b/cmd/promtool/backfill_test.go @@ -45,7 +45,7 @@ func sortSamples(samples []backfillSample) { }) } -func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { +func queryAllSeries(t testing.TB, q storage.Querier, _, _ int64) []backfillSample { ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) samples := []backfillSample{} for ss.Next() { diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 00280500ed..81fdd8f4ff 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -1324,7 +1324,7 @@ func labelsSetPromQL(query, labelMatchType, name, value string) error { return fmt.Errorf("invalid label match type: %s", labelMatchType) } - parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error { if n, ok := node.(*parser.VectorSelector); ok { var found bool for i, l := range n.LabelMatchers { @@ -1355,7 +1355,7 @@ func labelsDeletePromQL(query, name string) error { return err } - parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error { if n, ok := node.(*parser.VectorSelector); ok { for i, l := range n.LabelMatchers { if l.Name == name { diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index e22b8a556b..3cb47aa8af 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -35,7 +35,7 @@ type mockQueryRangeAPI struct { samples model.Matrix } -func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { +func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, _ string, _ v1.Range, _ ...v1.Option) (model.Value, v1.Warnings, error) { return mockAPI.samples, v1.Warnings{}, nil } diff --git a/cmd/promtool/sd.go b/cmd/promtool/sd.go index 8863fbeac0..884864205c 100644 --- a/cmd/promtool/sd.go +++ b/cmd/promtool/sd.go @@ -38,7 +38,7 @@ type sdCheckResult struct { } // CheckSD performs service discovery for the given job name and reports the results. -func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int { +func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, _ prometheus.Registerer) int { logger := promslog.New(&promslog.Config{}) cfg, err := config.LoadFile(sdConfigFiles, false, logger) diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 7f494e27aa..06b0e28c51 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -224,7 +224,7 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde QueryFunc: rules.EngineQueryFunc(suite.QueryEngine(), suite.Storage()), Appendable: suite.Storage(), Context: context.Background(), - NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {}, + NotifyFunc: func(_ context.Context, _ string, _ ...*rules.Alert) {}, Logger: promslog.NewNopLogger(), } m := rules.NewManager(opts) diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index 0f35c401e6..7e35a1807f 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -101,7 +101,7 @@ type EC2SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*EC2SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*EC2SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &ec2Metrics{ refreshMetrics: rmi, } @@ -262,7 +262,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error } input := &ec2.DescribeInstancesInput{Filters: filters} - if err := ec2Client.DescribeInstancesPagesWithContext(ctx, input, func(p *ec2.DescribeInstancesOutput, lastPage bool) bool { + if err := ec2Client.DescribeInstancesPagesWithContext(ctx, input, func(p *ec2.DescribeInstancesOutput, _ bool) bool { for _, r := range p.Reservations { for _, inst := range r.Instances { if inst.PrivateIpAddress == nil { diff --git a/discovery/aws/ec2_test.go b/discovery/aws/ec2_test.go index f34065c23e..888816b4e2 100644 --- a/discovery/aws/ec2_test.go +++ b/discovery/aws/ec2_test.go @@ -399,7 +399,7 @@ func newMockEC2Client(ec2Data *ec2DataStore) *mockEC2Client { return &client } -func (m *mockEC2Client) DescribeAvailabilityZonesWithContext(ctx aws.Context, input *ec2.DescribeAvailabilityZonesInput, opts ...request.Option) (*ec2.DescribeAvailabilityZonesOutput, error) { +func (m *mockEC2Client) DescribeAvailabilityZonesWithContext(_ aws.Context, _ *ec2.DescribeAvailabilityZonesInput, _ ...request.Option) (*ec2.DescribeAvailabilityZonesOutput, error) { if len(m.ec2Data.azToAZID) == 0 { return nil, errors.New("No AZs found") } @@ -420,7 +420,7 @@ func (m *mockEC2Client) DescribeAvailabilityZonesWithContext(ctx aws.Context, in }, nil } -func (m *mockEC2Client) DescribeInstancesPagesWithContext(ctx aws.Context, input *ec2.DescribeInstancesInput, fn func(*ec2.DescribeInstancesOutput, bool) bool, opts ...request.Option) error { +func (m *mockEC2Client) DescribeInstancesPagesWithContext(_ aws.Context, _ *ec2.DescribeInstancesInput, fn func(*ec2.DescribeInstancesOutput, bool) bool, _ ...request.Option) error { r := ec2.Reservation{} r.SetInstances(m.ec2Data.instances) r.SetOwnerId(m.ec2Data.ownerID) diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index b892867f1b..fb249b8256 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -83,7 +83,7 @@ type LightsailSDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*LightsailSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*LightsailSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &lightsailMetrics{ refreshMetrics: rmi, } diff --git a/discovery/azure/azure_test.go b/discovery/azure/azure_test.go index b905e9fcef..d7141561e2 100644 --- a/discovery/azure/azure_test.go +++ b/discovery/azure/azure_test.go @@ -723,11 +723,11 @@ func createMockAzureClient(t *testing.T, vmResp []armcompute.VirtualMachinesClie func defaultMockInterfaceServer(interfaceResp armnetwork.Interface) fakenetwork.InterfacesServer { return fakenetwork.InterfacesServer{ - Get: func(ctx context.Context, resourceGroupName, networkInterfaceName string, options *armnetwork.InterfacesClientGetOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetResponse], errResp azfake.ErrorResponder) { + Get: func(_ context.Context, _, _ string, _ *armnetwork.InterfacesClientGetOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetResponse], errResp azfake.ErrorResponder) { resp.SetResponse(http.StatusOK, armnetwork.InterfacesClientGetResponse{Interface: interfaceResp}, nil) return }, - GetVirtualMachineScaleSetNetworkInterface: func(ctx context.Context, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName string, options *armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse], errResp azfake.ErrorResponder) { + GetVirtualMachineScaleSetNetworkInterface: func(_ context.Context, _, _, _, _ string, _ *armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse], errResp azfake.ErrorResponder) { resp.SetResponse(http.StatusOK, armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse{Interface: interfaceResp}, nil) return }, @@ -736,7 +736,7 @@ func defaultMockInterfaceServer(interfaceResp armnetwork.Interface) fakenetwork. func defaultMockVMServer(vmResp []armcompute.VirtualMachinesClientListAllResponse) fake.VirtualMachinesServer { return fake.VirtualMachinesServer{ - NewListAllPager: func(options *armcompute.VirtualMachinesClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachinesClientListAllResponse]) { + NewListAllPager: func(_ *armcompute.VirtualMachinesClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachinesClientListAllResponse]) { for _, page := range vmResp { resp.AddPage(http.StatusOK, page, nil) } @@ -747,7 +747,7 @@ func defaultMockVMServer(vmResp []armcompute.VirtualMachinesClientListAllRespons func defaultMockVMSSServer(vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse) fake.VirtualMachineScaleSetsServer { return fake.VirtualMachineScaleSetsServer{ - NewListAllPager: func(options *armcompute.VirtualMachineScaleSetsClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListAllResponse]) { + NewListAllPager: func(_ *armcompute.VirtualMachineScaleSetsClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListAllResponse]) { for _, page := range vmssResp { resp.AddPage(http.StatusOK, page, nil) } @@ -758,7 +758,7 @@ func defaultMockVMSSServer(vmssResp []armcompute.VirtualMachineScaleSetsClientLi func defaultMockVMSSVMServer(vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse) fake.VirtualMachineScaleSetVMsServer { return fake.VirtualMachineScaleSetVMsServer{ - NewListPager: func(resourceGroupName, virtualMachineScaleSetName string, options *armcompute.VirtualMachineScaleSetVMsClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMsClientListResponse]) { + NewListPager: func(_, _ string, _ *armcompute.VirtualMachineScaleSetVMsClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMsClientListResponse]) { for _, page := range vmssvmResp { resp.AddPage(http.StatusOK, page, nil) } diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index cdbb80baba..ba3f63ccb5 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -399,14 +399,14 @@ func TestGetDatacenterShouldReturnError(t *testing.T) { }{ { // Define a handler that will return status 500. - handler: func(w http.ResponseWriter, r *http.Request) { + handler: func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) }, errMessage: "Unexpected response code: 500 ()", }, { // Define a handler that will return incorrect response. - handler: func(w http.ResponseWriter, r *http.Request) { + handler: func(w http.ResponseWriter, _ *http.Request) { w.Write([]byte(`{"Config": {"Not-Datacenter": "test-dc"}}`)) }, errMessage: "invalid value '' for Config.Datacenter", diff --git a/discovery/consul/metrics.go b/discovery/consul/metrics.go index 8266e7cc60..b49509bd8f 100644 --- a/discovery/consul/metrics.go +++ b/discovery/consul/metrics.go @@ -31,7 +31,7 @@ type consulMetrics struct { metricRegisterer discovery.MetricRegisterer } -func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &consulMetrics{ rpcFailuresCount: prometheus.NewCounter( prometheus.CounterOpts{ diff --git a/discovery/digitalocean/digitalocean.go b/discovery/digitalocean/digitalocean.go index eeaedd8869..d0ececd9e9 100644 --- a/discovery/digitalocean/digitalocean.go +++ b/discovery/digitalocean/digitalocean.go @@ -65,7 +65,7 @@ func init() { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &digitaloceanMetrics{ refreshMetrics: rmi, } diff --git a/discovery/dns/dns_test.go b/discovery/dns/dns_test.go index f01a075c45..ea46ad3237 100644 --- a/discovery/dns/dns_test.go +++ b/discovery/dns/dns_test.go @@ -52,7 +52,7 @@ func TestDNS(t *testing.T) { Port: 80, Type: "A", }, - lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { + lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) { return nil, errors.New("some error") }, expected: []*targetgroup.Group{}, @@ -65,7 +65,7 @@ func TestDNS(t *testing.T) { Port: 80, Type: "A", }, - lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { + lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.A{A: net.IPv4(192, 0, 2, 2)}, @@ -97,7 +97,7 @@ func TestDNS(t *testing.T) { Port: 80, Type: "AAAA", }, - lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { + lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.AAAA{AAAA: net.IPv6loopback}, @@ -128,7 +128,7 @@ func TestDNS(t *testing.T) { Type: "SRV", RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { + lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.SRV{Port: 3306, Target: "db1.example.com."}, @@ -167,7 +167,7 @@ func TestDNS(t *testing.T) { Names: []string{"_mysql._tcp.db.example.com."}, RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { + lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.SRV{Port: 3306, Target: "db1.example.com."}, @@ -198,7 +198,7 @@ func TestDNS(t *testing.T) { Names: []string{"_mysql._tcp.db.example.com."}, RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { + lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) { return &dns.Msg{}, nil }, expected: []*targetgroup.Group{ @@ -215,7 +215,7 @@ func TestDNS(t *testing.T) { Port: 25, RefreshInterval: model.Duration(time.Minute), }, - lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { + lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) { return &dns.Msg{ Answer: []dns.RR{ &dns.MX{Preference: 0, Mx: "smtp1.example.com."}, diff --git a/discovery/eureka/client_test.go b/discovery/eureka/client_test.go index 83f6fd5ff1..f85409a11e 100644 --- a/discovery/eureka/client_test.go +++ b/discovery/eureka/client_test.go @@ -172,7 +172,7 @@ func TestFetchApps(t *testing.T) { ` // Simulate apps with a valid XML response. - respHandler := func(w http.ResponseWriter, r *http.Request) { + respHandler := func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, appsXML) @@ -199,7 +199,7 @@ func TestFetchApps(t *testing.T) { func Test500ErrorHttpResponse(t *testing.T) { // Simulate 500 error. - respHandler := func(w http.ResponseWriter, r *http.Request) { + respHandler := func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, ``) diff --git a/discovery/eureka/eureka.go b/discovery/eureka/eureka.go index 3cac667f85..459b608e96 100644 --- a/discovery/eureka/eureka.go +++ b/discovery/eureka/eureka.go @@ -77,7 +77,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &eurekaMetrics{ refreshMetrics: rmi, } diff --git a/discovery/eureka/eureka_test.go b/discovery/eureka/eureka_test.go index b499410bfc..5ea9a6c74e 100644 --- a/discovery/eureka/eureka_test.go +++ b/discovery/eureka/eureka_test.go @@ -58,7 +58,7 @@ func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, err func TestEurekaSDHandleError(t *testing.T) { var ( errTesting = "non 2xx status '500' response during eureka service discovery" - respHandler = func(w http.ResponseWriter, r *http.Request) { + respHandler = func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, ``) @@ -76,7 +76,7 @@ func TestEurekaSDEmptyList(t *testing.T) { 1 ` - respHandler = func(w http.ResponseWriter, r *http.Request) { + respHandler = func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, appsXML) @@ -235,7 +235,7 @@ func TestEurekaSDSendGroup(t *testing.T) { ` - respHandler = func(w http.ResponseWriter, r *http.Request) { + respHandler = func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, appsXML) diff --git a/discovery/file/metrics.go b/discovery/file/metrics.go index c01501e4ef..3e3df7bbf6 100644 --- a/discovery/file/metrics.go +++ b/discovery/file/metrics.go @@ -30,7 +30,7 @@ type fileMetrics struct { metricRegisterer discovery.MetricRegisterer } -func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { fm := &fileMetrics{ fileSDReadErrorsCount: prometheus.NewCounter( prometheus.CounterOpts{ diff --git a/discovery/gce/gce.go b/discovery/gce/gce.go index 9a5b0e856e..32f1bb6722 100644 --- a/discovery/gce/gce.go +++ b/discovery/gce/gce.go @@ -83,7 +83,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &gceMetrics{ refreshMetrics: rmi, } diff --git a/discovery/hetzner/hetzner.go b/discovery/hetzner/hetzner.go index 02e2272999..97d48f6d70 100644 --- a/discovery/hetzner/hetzner.go +++ b/discovery/hetzner/hetzner.go @@ -64,7 +64,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &hetznerMetrics{ refreshMetrics: rmi, } diff --git a/discovery/http/http_test.go b/discovery/http/http_test.go index 9d3a3fb5e7..3af9e4e504 100644 --- a/discovery/http/http_test.go +++ b/discovery/http/http_test.go @@ -75,7 +75,7 @@ func TestHTTPValidRefresh(t *testing.T) { } func TestHTTPInvalidCode(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusBadRequest) })) @@ -104,7 +104,7 @@ func TestHTTPInvalidCode(t *testing.T) { } func TestHTTPInvalidFormat(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { fmt.Fprintln(w, "{}") })) @@ -212,7 +212,7 @@ func TestContentTypeRegex(t *testing.T) { func TestSourceDisappeared(t *testing.T) { var stubResponse string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/json") fmt.Fprintln(w, stubResponse) })) diff --git a/discovery/ionos/ionos.go b/discovery/ionos/ionos.go index 1badda48cb..475e6c30eb 100644 --- a/discovery/ionos/ionos.go +++ b/discovery/ionos/ionos.go @@ -89,7 +89,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &ionosMetrics{ refreshMetrics: rmi, } diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index e34bed899c..68d2d0ce35 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -312,7 +312,7 @@ func TestFailuresCountMetric(t *testing.T) { require.Equal(t, float64(0), prom_testutil.ToFloat64(n.metrics.failuresCount)) // Simulate an error on watch requests. - c.Discovery().(*fakediscovery.FakeDiscovery).PrependWatchReactor("*", func(action kubetesting.Action) (bool, watch.Interface, error) { + c.Discovery().(*fakediscovery.FakeDiscovery).PrependWatchReactor("*", func(_ kubetesting.Action) (bool, watch.Interface, error) { return true, nil, apierrors.NewUnauthorized("unauthorized") }) diff --git a/discovery/kubernetes/metrics.go b/discovery/kubernetes/metrics.go index fe419bc782..ba3cb1d32a 100644 --- a/discovery/kubernetes/metrics.go +++ b/discovery/kubernetes/metrics.go @@ -28,7 +28,7 @@ type kubernetesMetrics struct { metricRegisterer discovery.MetricRegisterer } -func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &kubernetesMetrics{ eventCount: prometheus.NewCounterVec( prometheus.CounterOpts{ diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 9c93e43f51..0c2c2e9702 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -80,7 +80,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &marathonMetrics{ refreshMetrics: rmi, } diff --git a/discovery/marathon/marathon_test.go b/discovery/marathon/marathon_test.go index 659899f163..61d8ef900d 100644 --- a/discovery/marathon/marathon_test.go +++ b/discovery/marathon/marathon_test.go @@ -243,7 +243,7 @@ func TestMarathonZeroTaskPorts(t *testing.T) { func Test500ErrorHttpResponseWithValidJSONBody(t *testing.T) { // Simulate 500 error with a valid JSON response. - respHandler := func(w http.ResponseWriter, r *http.Request) { + respHandler := func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/json") io.WriteString(w, `{}`) diff --git a/discovery/moby/dockerswarm.go b/discovery/moby/dockerswarm.go index ae12116301..57c0af7171 100644 --- a/discovery/moby/dockerswarm.go +++ b/discovery/moby/dockerswarm.go @@ -70,7 +70,7 @@ type Filter struct { } // NewDiscovererMetrics implements discovery.Config. -func (*DockerSwarmSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*DockerSwarmSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &dockerswarmMetrics{ refreshMetrics: rmi, } diff --git a/discovery/nomad/nomad_test.go b/discovery/nomad/nomad_test.go index c08f017496..a73b45785d 100644 --- a/discovery/nomad/nomad_test.go +++ b/discovery/nomad/nomad_test.go @@ -76,7 +76,7 @@ func (s *NomadSDTestSuite) SetupTest(t *testing.T) { } func (m *SDMock) HandleServicesList() { - m.Mux.HandleFunc("/v1/services", func(w http.ResponseWriter, r *http.Request) { + m.Mux.HandleFunc("/v1/services", func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("content-type", "application/json; charset=utf-8") w.WriteHeader(http.StatusOK) @@ -99,7 +99,7 @@ func (m *SDMock) HandleServicesList() { } func (m *SDMock) HandleServiceHashiCupsGet() { - m.Mux.HandleFunc("/v1/service/hashicups", func(w http.ResponseWriter, r *http.Request) { + m.Mux.HandleFunc("/v1/service/hashicups", func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("content-type", "application/json; charset=utf-8") w.WriteHeader(http.StatusOK) diff --git a/discovery/openstack/hypervisor.go b/discovery/openstack/hypervisor.go index 5cea68c4a5..e7a6362052 100644 --- a/discovery/openstack/hypervisor.go +++ b/discovery/openstack/hypervisor.go @@ -77,7 +77,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group // OpenStack API reference // https://developer.openstack.org/api-ref/compute/#list-hypervisors-details pagerHypervisors := hypervisors.List(client, nil) - err = pagerHypervisors.EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) { + err = pagerHypervisors.EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { hypervisorList, err := hypervisors.ExtractHypervisors(page) if err != nil { return false, fmt.Errorf("could not extract hypervisors: %w", err) diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index dea327afe3..6c2f79b3a4 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -119,7 +119,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, pagerFIP := floatingips.List(networkClient, floatingips.ListOpts{}) floatingIPList := make(map[floatingIPKey]string) floatingIPPresent := make(map[string]struct{}) - err = pagerFIP.EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) { + err = pagerFIP.EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { result, err := floatingips.ExtractFloatingIPs(page) if err != nil { return false, fmt.Errorf("could not extract floatingips: %w", err) diff --git a/discovery/openstack/mock_test.go b/discovery/openstack/mock_test.go index 36620defeb..34e09c710f 100644 --- a/discovery/openstack/mock_test.go +++ b/discovery/openstack/mock_test.go @@ -62,7 +62,7 @@ func testHeader(t *testing.T, r *http.Request, header, expected string) { // HandleVersionsSuccessfully mocks version call. func (m *SDMock) HandleVersionsSuccessfully() { - m.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + m.Mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprintf(w, ` { "versions": { @@ -90,7 +90,7 @@ func (m *SDMock) HandleVersionsSuccessfully() { // HandleAuthSuccessfully mocks auth call. func (m *SDMock) HandleAuthSuccessfully() { - m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, _ *http.Request) { w.Header().Add("X-Subject-Token", tokenID) w.WriteHeader(http.StatusCreated) diff --git a/discovery/openstack/openstack.go b/discovery/openstack/openstack.go index eb1d4d5a4d..d7b58787a1 100644 --- a/discovery/openstack/openstack.go +++ b/discovery/openstack/openstack.go @@ -67,7 +67,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &openstackMetrics{ refreshMetrics: rmi, } diff --git a/discovery/ovhcloud/ovhcloud.go b/discovery/ovhcloud/ovhcloud.go index a75e9694fe..492bca603a 100644 --- a/discovery/ovhcloud/ovhcloud.go +++ b/discovery/ovhcloud/ovhcloud.go @@ -54,7 +54,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &ovhcloudMetrics{ refreshMetrics: rmi, } diff --git a/discovery/puppetdb/puppetdb.go b/discovery/puppetdb/puppetdb.go index b71842ff52..e249bc4afa 100644 --- a/discovery/puppetdb/puppetdb.go +++ b/discovery/puppetdb/puppetdb.go @@ -82,7 +82,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &puppetdbMetrics{ refreshMetrics: rmi, } diff --git a/discovery/puppetdb/puppetdb_test.go b/discovery/puppetdb/puppetdb_test.go index 4585b78223..57e198e131 100644 --- a/discovery/puppetdb/puppetdb_test.go +++ b/discovery/puppetdb/puppetdb_test.go @@ -184,7 +184,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) { } func TestPuppetDBInvalidCode(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusBadRequest) })) @@ -212,7 +212,7 @@ func TestPuppetDBInvalidCode(t *testing.T) { } func TestPuppetDBInvalidFormat(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { fmt.Fprintln(w, "{}") })) diff --git a/discovery/refresh/refresh_test.go b/discovery/refresh/refresh_test.go index 0d4460ffa6..7c57d0532a 100644 --- a/discovery/refresh/refresh_test.go +++ b/discovery/refresh/refresh_test.go @@ -56,7 +56,7 @@ func TestRefresh(t *testing.T) { } var i int - refresh := func(ctx context.Context) ([]*targetgroup.Group, error) { + refresh := func(_ context.Context) ([]*targetgroup.Group, error) { i++ switch i { case 1: diff --git a/discovery/scaleway/scaleway.go b/discovery/scaleway/scaleway.go index bc9282feaa..47ac092000 100644 --- a/discovery/scaleway/scaleway.go +++ b/discovery/scaleway/scaleway.go @@ -105,7 +105,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &scalewayMetrics{ refreshMetrics: rmi, } diff --git a/discovery/triton/triton.go b/discovery/triton/triton.go index 5ec7b65215..5efe49e23d 100644 --- a/discovery/triton/triton.go +++ b/discovery/triton/triton.go @@ -71,7 +71,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &tritonMetrics{ refreshMetrics: rmi, } diff --git a/discovery/triton/triton_test.go b/discovery/triton/triton_test.go index b2d06afaf6..b0dccbf898 100644 --- a/discovery/triton/triton_test.go +++ b/discovery/triton/triton_test.go @@ -230,7 +230,7 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) { func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet { var ( td, m, _ = newTritonDiscovery(c) - s = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + s = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { fmt.Fprintln(w, dstr) })) ) diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index a37083575c..11b1888db4 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -113,7 +113,7 @@ type Discovery struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &uyuniMetrics{ refreshMetrics: rmi, } diff --git a/discovery/uyuni/uyuni_test.go b/discovery/uyuni/uyuni_test.go index 09be23e2b4..40d92fe79c 100644 --- a/discovery/uyuni/uyuni_test.go +++ b/discovery/uyuni/uyuni_test.go @@ -59,7 +59,7 @@ func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, err func TestUyuniSDHandleError(t *testing.T) { var ( errTesting = "unable to login to Uyuni API: request error: bad status code - 500" - respHandler = func(w http.ResponseWriter, r *http.Request) { + respHandler = func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, ``) @@ -75,7 +75,7 @@ func TestUyuniSDLogin(t *testing.T) { var ( errTesting = "unable to get the managed system groups information of monitored clients: request error: bad status code - 500" call = 0 - respHandler = func(w http.ResponseWriter, r *http.Request) { + respHandler = func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/xml") switch call { case 0: @@ -106,7 +106,7 @@ func TestUyuniSDLogin(t *testing.T) { func TestUyuniSDSkipLogin(t *testing.T) { var ( errTesting = "unable to get the managed system groups information of monitored clients: request error: bad status code - 500" - respHandler = func(w http.ResponseWriter, r *http.Request) { + respHandler = func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/xml") io.WriteString(w, ``) diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go index 3e9f68864a..24eebe2938 100644 --- a/discovery/vultr/vultr.go +++ b/discovery/vultr/vultr.go @@ -76,7 +76,7 @@ type SDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &vultrMetrics{ refreshMetrics: rmi, } diff --git a/discovery/xds/client_test.go b/discovery/xds/client_test.go index 2cf5b2f9cb..bf0e53b348 100644 --- a/discovery/xds/client_test.go +++ b/discovery/xds/client_test.go @@ -106,7 +106,7 @@ func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, } func TestHTTPResourceClientFetchEmptyResponse(t *testing.T) { - client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { + client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(_ *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { return nil, nil }) defer cleanup() @@ -146,7 +146,7 @@ func TestHTTPResourceClientFetchFullResponse(t *testing.T) { } func TestHTTPResourceClientServerError(t *testing.T) { - client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { + client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(_ *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) { return nil, errors.New("server error") }) defer cleanup() diff --git a/discovery/xds/metrics.go b/discovery/xds/metrics.go index 597d516566..bdc9598f2c 100644 --- a/discovery/xds/metrics.go +++ b/discovery/xds/metrics.go @@ -29,7 +29,7 @@ type xdsMetrics struct { metricRegisterer discovery.MetricRegisterer } -func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &xdsMetrics{ fetchFailuresCount: prometheus.NewCounter( prometheus.CounterOpts{ diff --git a/discovery/xds/xds_test.go b/discovery/xds/xds_test.go index db10adc1a2..af2784bcb2 100644 --- a/discovery/xds/xds_test.go +++ b/discovery/xds/xds_test.go @@ -85,7 +85,7 @@ func createTestHTTPServer(t *testing.T, responder discoveryResponder) *httptest. } func constantResourceParser(targets []model.LabelSet, err error) resourceParser { - return func(resources []*anypb.Any, typeUrl string) ([]model.LabelSet, error) { + return func(_ []*anypb.Any, _ string) ([]model.LabelSet, error) { return targets, err } } @@ -120,7 +120,7 @@ func (rc testResourceClient) Close() { func TestPollingRefreshSkipUpdate(t *testing.T) { rc := &testResourceClient{ - fetch: func(ctx context.Context) (*v3.DiscoveryResponse, error) { + fetch: func(_ context.Context) (*v3.DiscoveryResponse, error) { return nil, nil }, } @@ -167,7 +167,7 @@ func TestPollingRefreshAttachesGroupMetadata(t *testing.T) { rc := &testResourceClient{ server: server, protocolVersion: ProtocolV3, - fetch: func(ctx context.Context) (*v3.DiscoveryResponse, error) { + fetch: func(_ context.Context) (*v3.DiscoveryResponse, error) { return &v3.DiscoveryResponse{}, nil }, } @@ -223,14 +223,14 @@ func TestPollingDisappearingTargets(t *testing.T) { rc := &testResourceClient{ server: server, protocolVersion: ProtocolV3, - fetch: func(ctx context.Context) (*v3.DiscoveryResponse, error) { + fetch: func(_ context.Context) (*v3.DiscoveryResponse, error) { return &v3.DiscoveryResponse{}, nil }, } // On the first poll, send back two targets. On the next, send just one. counter := 0 - parser := func(resources []*anypb.Any, typeUrl string) ([]model.LabelSet, error) { + parser := func(_ []*anypb.Any, _ string) ([]model.LabelSet, error) { counter++ if counter == 1 { return []model.LabelSet{ diff --git a/discovery/zookeeper/zookeeper.go b/discovery/zookeeper/zookeeper.go index a1cfe3d055..af26cc5a0e 100644 --- a/discovery/zookeeper/zookeeper.go +++ b/discovery/zookeeper/zookeeper.go @@ -59,7 +59,7 @@ type ServersetSDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*ServersetSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*ServersetSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &discovery.NoopDiscovererMetrics{} } @@ -101,7 +101,7 @@ type NerveSDConfig struct { } // NewDiscovererMetrics implements discovery.Config. -func (*NerveSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func (*NerveSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { return &discovery.NoopDiscovererMetrics{} } diff --git a/discovery/zookeeper/zookeeper_test.go b/discovery/zookeeper/zookeeper_test.go index c2b41ce7a3..5f1b75c088 100644 --- a/discovery/zookeeper/zookeeper_test.go +++ b/discovery/zookeeper/zookeeper_test.go @@ -31,6 +31,6 @@ func TestNewDiscoveryError(t *testing.T) { []string{"unreachable.test"}, time.Second, []string{"/"}, nil, - func(data []byte, path string) (model.LabelSet, error) { return nil, nil }) + func(_ []byte, _ string) (model.LabelSet, error) { return nil, nil }) require.Error(t, err) } diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go index 3f756a9be4..f78d4db794 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go @@ -72,7 +72,7 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123 ` server := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { + func(_ http.ResponseWriter, r *http.Request) { require.Equal(t, http.MethodPost, r.Method, "Unexpected method.") require.Equal(t, "/api/v2/write", r.URL.Path, "Unexpected path.") b, err := io.ReadAll(r.Body) diff --git a/model/labels/regexp.go b/model/labels/regexp.go index bfd9034059..cf6c9158e9 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -991,7 +991,7 @@ func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) Str return true } - analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, _ StringMatcher) bool { // Ensure we don't have mixed case sensitivity. if caseSensitiveSet && caseSensitive != prefixCaseSensitive { return false @@ -1026,7 +1026,7 @@ func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) Str findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool { multiMatcher.add(matcher.s) return true - }, func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + }, func(prefix string, _ bool, matcher StringMatcher) bool { multiMatcher.addPrefix(prefix, caseSensitive, matcher) return true }) diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go index e9745502ff..bb6fce3104 100644 --- a/model/labels/regexp_test.go +++ b/model/labels/regexp_test.go @@ -987,7 +987,7 @@ func TestFindEqualOrPrefixStringMatchers(t *testing.T) { ok = findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool { matches = append(matches, match{matcher.s, matcher.caseSensitive}) return true - }, func(prefix string, prefixCaseSensitive bool, right StringMatcher) bool { + }, func(prefix string, prefixCaseSensitive bool, _ StringMatcher) bool { matches = append(matches, match{prefix, prefixCaseSensitive}) return true }) diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go index f75c7f5677..916196c24b 100644 --- a/model/textparse/nhcbparse_test.go +++ b/model/textparse/nhcbparse_test.go @@ -604,14 +604,14 @@ func TestNHCBParser_NoNHCBWhenExponential(t *testing.T) { return "ProtoBuf", factory, []int{1, 2, 3}, parserOptions{useUTF8sep: true, hasCreatedTimeStamp: true} }, func() (string, parserFactory, []int, parserOptions) { - factory := func(keepClassic bool) Parser { + factory := func(_ bool) Parser { input := createTestOpenMetricsHistogram() return NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) } return "OpenMetrics", factory, []int{1}, parserOptions{hasCreatedTimeStamp: true} }, func() (string, parserFactory, []int, parserOptions) { - factory := func(keepClassic bool) Parser { + factory := func(_ bool) Parser { input := createTestPromHistogram() return NewPromParser([]byte(input), labels.NewSymbolTable()) } diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index 97b0274f29..9bc6f3180f 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -348,7 +348,7 @@ func TestCustomDo(t *testing.T) { var received bool h := NewManager(&Options{ - Do: func(_ context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + Do: func(_ context.Context, _ *http.Client, req *http.Request) (*http.Response, error) { received = true body, err := io.ReadAll(req.Body) @@ -447,7 +447,7 @@ func TestHandlerQueuing(t *testing.T) { errc = make(chan error, 1) ) - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { // Notify the test function that we have received something. select { case called <- struct{}{}: @@ -724,7 +724,7 @@ func TestHangingNotifier(t *testing.T) { // Set up a faulty Alertmanager. var faultyCalled atomic.Bool - faultyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + faultyServer := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { faultyCalled.Store(true) select { case <-done: @@ -736,7 +736,7 @@ func TestHangingNotifier(t *testing.T) { // Set up a functional Alertmanager. var functionalCalled atomic.Bool - functionalServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + functionalServer := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { functionalCalled.Store(true) })) functionalURL, err := url.Parse(functionalServer.URL) diff --git a/promql/engine.go b/promql/engine.go index f1904ec4ba..cf5a269882 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -558,7 +558,7 @@ func (ng *Engine) validateOpts(expr parser.Expr) error { var atModifierUsed, negativeOffsetUsed bool var validationErr error - parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error { switch n := node.(type) { case *parser.VectorSelector: if n.Timestamp != nil || n.StartOrEnd == parser.START || n.StartOrEnd == parser.END { @@ -1969,7 +1969,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, case *parser.NumberLiteral: span.SetAttributes(attribute.Float64("value", e.Val)) - return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(_ []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) @@ -2138,7 +2138,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1) } - return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(_ []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if vs.Timestamp != nil { // This is a special case for "timestamp()" when the @ modifier is used, to ensure that // we return a point for each time step in this case. @@ -3789,7 +3789,7 @@ func NewHashRatioSampler() *HashRatioSampler { return &HashRatioSampler{} } -func (s *HashRatioSampler) sampleOffset(ts int64, sample *Sample) float64 { +func (s *HashRatioSampler) sampleOffset(_ int64, sample *Sample) float64 { const ( float64MaxUint64 = float64(math.MaxUint64) ) diff --git a/promql/engine_test.go b/promql/engine_test.go index 5c4bb744f5..5dddebe5df 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -263,7 +263,7 @@ func TestQueryError(t *testing.T) { } engine := promqltest.NewTestEngineWithOpts(t, opts) errStorage := promql.ErrStorage{errors.New("storage error")} - queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { + queryable := storage.QueryableFunc(func(_, _ int64) (storage.Querier, error) { return &errQuerier{err: errStorage}, nil }) ctx, cancelCtx := context.WithCancel(context.Background()) @@ -2290,7 +2290,7 @@ func TestQueryLogger_error(t *testing.T) { ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) defer cancelCtx() testErr := errors.New("failure") - query := engine.NewTestQuery(func(ctx context.Context) error { + query := engine.NewTestQuery(func(_ context.Context) error { return testErr }) diff --git a/promql/functions.go b/promql/functions.go index 1cb8b2af2d..ab6b598cfc 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -59,7 +59,7 @@ import ( type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) // === time() float64 === -func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{ F: float64(enh.Ts) / 1000, }}, nil @@ -498,7 +498,7 @@ func filterFloats(v Vector) Vector { } // === sort(node parser.ValueTypeVector) (Vector, Annotations) === -func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSort(vals []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take descending sort with NaN first and // reverse it. byValueSorter := vectorByReverseValueHeap(filterFloats(vals[0].(Vector))) @@ -507,7 +507,7 @@ func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) } // === sortDesc(node parser.ValueTypeVector) (Vector, Annotations) === -func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortDesc(vals []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { // NaN should sort to the bottom, so take ascending sort with NaN first and // reverse it. byValueSorter := vectorByValueHeap(filterFloats(vals[0].(Vector))) @@ -516,7 +516,7 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel } // === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === -func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortByLabel(vals []parser.Value, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { lbls := stringSliceFromArgs(args[1:]) slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { for _, label := range lbls { @@ -542,7 +542,7 @@ func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNode } // === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === -func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { lbls := stringSliceFromArgs(args[1:]) slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { for _, label := range lbls { @@ -589,7 +589,7 @@ func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, ann } // === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === -func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcClamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) minVal := vals[1].(Vector)[0].F maxVal := vals[2].(Vector)[0].F @@ -597,14 +597,14 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper } // === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === -func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcClampMax(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) maxVal := vals[1].(Vector)[0].F return clamp(vec, math.Inf(-1), maxVal, enh) } // === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === -func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcClampMin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) minVal := vals[1].(Vector)[0].F return clamp(vec, minVal, math.Inf(+1), enh) @@ -641,7 +641,7 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper } // === Scalar(node parser.ValueTypeVector) Scalar === -func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcScalar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { var ( v = vals[0].(Vector) value float64 @@ -766,14 +766,14 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } // === count_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcCountOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return aggrOverTime(vals, enh, func(s Series) float64 { return float64(len(s.Floats) + len(s.Histograms)) }), nil } // === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) === -func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { el := vals[0].(Matrix)[0] var f FPoint @@ -998,13 +998,13 @@ func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe // This function will return 1 if the matrix has at least one element. // Due to engine optimization, this function is only called when this condition is true. // Then, the engine post-processes the results to get the expected output. -func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAbsentOverTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: 1}), nil } // === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) === -func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return aggrOverTime(vals, enh, func(s Series) float64 { +func funcPresentOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return aggrOverTime(vals, enh, func(_ Series) float64 { return 1 }), nil } @@ -1026,126 +1026,126 @@ func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float6 } // === abs(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAbs(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Abs), nil } // === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcCeil(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Ceil), nil } // === floor(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcFloor(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Floor), nil } // === exp(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcExp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Exp), nil } // === sqrt(Vector VectorNode) (Vector, Annotations) === -func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSqrt(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Sqrt), nil } // === ln(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcLn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Log), nil } // === log2(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcLog2(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Log2), nil } // === log10(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcLog10(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Log10), nil } // === sin(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Sin), nil } // === cos(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcCos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Cos), nil } // === tan(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcTan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Tan), nil } // === asin(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAsin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Asin), nil } // === acos(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAcos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Acos), nil } // === atan(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAtan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Atan), nil } // === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Sinh), nil } // === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcCosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Cosh), nil } // === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcTanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Tanh), nil } // === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAsinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Asinh), nil } // === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAcosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Acosh), nil } // === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcAtanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, math.Atanh), nil } // === rad(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcRad(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { return v * math.Pi / 180 }), nil } // === deg(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDeg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { return v * 180 / math.Pi }), nil } // === pi() Scalar === -func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcPi(_ []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) { return Vector{Sample{F: math.Pi}}, nil } // === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcSgn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return simpleFunc(vals, enh, func(v float64) float64 { switch { case v < 0: @@ -1159,7 +1159,7 @@ func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) } // === timestamp(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcTimestamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) for _, el := range vec { if !enh.enableDelayedNameRemoval { @@ -1284,7 +1284,7 @@ func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNo } // === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcHistogramCount(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -1305,7 +1305,7 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN } // === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcHistogramSum(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -1326,7 +1326,7 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod } // === histogram_avg(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcHistogramAvg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -1347,7 +1347,7 @@ func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNod } // === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcHistogramStdDev(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -1390,7 +1390,7 @@ func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *Eval } // === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcHistogramStdVar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { inVec := vals[0].(Vector) for _, sample := range inVec { @@ -1433,7 +1433,7 @@ func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *Eval } // === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) === -func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcHistogramFraction(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { lower := vals[0].(Vector)[0].F upper := vals[1].(Vector)[0].F inVec := vals[2].(Vector) @@ -1550,7 +1550,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev } // === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcResets(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { floats := vals[0].(Matrix)[0].Floats histograms := vals[0].(Matrix)[0].Histograms resets := 0 @@ -1595,7 +1595,7 @@ func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe } // === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcChanges(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { floats := vals[0].(Matrix)[0].Floats histograms := vals[0].(Matrix)[0].Histograms changes := 0 @@ -1683,7 +1683,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio } // === Vector(s Scalar) (Vector, Annotations) === -func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcVector(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{ Metric: labels.Labels{}, @@ -1765,56 +1765,56 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo } // === days_in_month(v Vector) Scalar === -func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDaysInMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day()) }), nil } // === day_of_month(v Vector) Scalar === -func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDayOfMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Day()) }), nil } // === day_of_week(v Vector) Scalar === -func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDayOfWeek(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Weekday()) }), nil } // === day_of_year(v Vector) Scalar === -func funcDayOfYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDayOfYear(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.YearDay()) }), nil } // === hour(v Vector) Scalar === -func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcHour(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Hour()) }), nil } // === minute(v Vector) Scalar === -func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcMinute(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Minute()) }), nil } // === month(v Vector) Scalar === -func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Month()) }), nil } // === year(v Vector) Scalar === -func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcYear(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return dateWrapper(vals, enh, func(t time.Time) float64 { return float64(t.Year()) }), nil diff --git a/promql/histogram_stats_iterator_test.go b/promql/histogram_stats_iterator_test.go index ea0e8b469f..3b99f6ea6f 100644 --- a/promql/histogram_stats_iterator_test.go +++ b/promql/histogram_stats_iterator_test.go @@ -173,7 +173,7 @@ func (h *histogramIterator) Next() chunkenc.ValueType { return chunkenc.ValNone } -func (h *histogramIterator) Seek(t int64) chunkenc.ValueType { panic("not implemented") } +func (h *histogramIterator) Seek(_ int64) chunkenc.ValueType { panic("not implemented") } func (h *histogramIterator) At() (int64, float64) { panic("not implemented") } diff --git a/promql/info.go b/promql/info.go index 3fe9a2ce99..0197330822 100644 --- a/promql/info.go +++ b/promql/info.go @@ -83,7 +83,7 @@ loop: func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints { var nodeTimestamp *int64 var offset int64 - parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error { switch n := node.(type) { case *parser.VectorSelector: if n.Timestamp != nil { diff --git a/rules/alerting_test.go b/rules/alerting_test.go index f7bdf4a955..9d8e10711b 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -109,7 +109,7 @@ func TestAlertingRuleTemplateWithHistogram(t *testing.T) { NegativeBuckets: []float64{-2, 2, 2, 7, 5, 5, 2}, } - q := func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { + q := func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) { return []promql.Sample{{H: &h}}, nil } @@ -678,7 +678,7 @@ func TestQueryForStateSeries(t *testing.T) { tests := []testInput{ // Test for empty series. { - selectMockFunction: func(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + selectMockFunction: func(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { return storage.EmptySeriesSet() }, expectedSeries: nil, @@ -686,7 +686,7 @@ func TestQueryForStateSeries(t *testing.T) { }, // Test for error series. { - selectMockFunction: func(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + selectMockFunction: func(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { return storage.ErrSeriesSet(testError) }, expectedSeries: nil, @@ -694,7 +694,7 @@ func TestQueryForStateSeries(t *testing.T) { }, // Test for mock series. { - selectMockFunction: func(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + selectMockFunction: func(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { return storage.TestSeriesSet(storage.MockSeries( []int64{1, 2, 3}, []float64{1, 2, 3}, @@ -989,7 +989,7 @@ func TestAlertingEvalWithOrigin(t *testing.T) { true, promslog.NewNopLogger(), ) - _, err = rule.Eval(ctx, 0, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) { + _, err = rule.Eval(ctx, 0, now, func(ctx context.Context, _ string, _ time.Time) (promql.Vector, error) { detail = FromOriginContext(ctx) return nil, nil }, nil, 0) diff --git a/rules/group.go b/rules/group.go index a83c33a8e8..c9f5162cda 100644 --- a/rules/group.go +++ b/rules/group.go @@ -1117,7 +1117,7 @@ func buildDependencyMap(rules []Rule) dependencyMap { break } - parser.Inspect(rule.Query(), func(node parser.Node, path []parser.Node) error { + parser.Inspect(rule.Query(), func(node parser.Node, _ []parser.Node) error { if n, ok := node.(*parser.VectorSelector); ok { // Find the name matcher for the rule. var nameMatcher *labels.Matcher diff --git a/rules/manager.go b/rules/manager.go index 6e96df2168..a38be82ebe 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -429,7 +429,7 @@ type Sender interface { // SendAlerts implements the rules.NotifyFunc for a Notifier. func SendAlerts(s Sender, externalURL string) NotifyFunc { - return func(ctx context.Context, expr string, alerts ...*Alert) { + return func(_ context.Context, expr string, alerts ...*Alert) { var res []*notifier.Alert for _, alert := range alerts { @@ -508,7 +508,7 @@ func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyControlle } } -func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, rule Rule) bool { +func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) bool { return c.sema.TryAcquire(1) } @@ -561,7 +561,7 @@ func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) return false } -func (c sequentialRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules { +func (c sequentialRuleEvalController) SplitGroupIntoBatches(_ context.Context, _ *Group) []ConcurrentRules { return nil } diff --git a/rules/manager_test.go b/rules/manager_test.go index 9ef1fcf074..46a87787ce 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -375,7 +375,7 @@ func TestForStateRestore(t *testing.T) { Queryable: storage, Context: context.Background(), Logger: promslog.NewNopLogger(), - NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {}, + NotifyFunc: func(_ context.Context, _ string, _ ...*Alert) {}, OutageTolerance: 30 * time.Minute, ForGracePeriod: 10 * time.Minute, } @@ -917,7 +917,7 @@ func TestNotify(t *testing.T) { } engine := promqltest.NewTestEngineWithOpts(t, engineOpts) var lastNotified []*Alert - notifyFunc := func(ctx context.Context, expr string, alerts ...*Alert) { + notifyFunc := func(_ context.Context, _ string, alerts ...*Alert) { lastNotified = alerts } opts := &ManagerOptions{ @@ -1356,7 +1356,7 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) { testValue = 3 } - skipEvalIterationFunc := func(ctx context.Context, g *Group, evalTimestamp time.Time) { + skipEvalIterationFunc := func(_ context.Context, _ *Group, _ time.Time) { testValue = 4 } @@ -1395,7 +1395,7 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) { Queryable: storage, Context: context.Background(), Logger: promslog.NewNopLogger(), - NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {}, + NotifyFunc: func(_ context.Context, _ string, _ ...*Alert) {}, OutageTolerance: 30 * time.Minute, ForGracePeriod: 10 * time.Minute, } @@ -1528,7 +1528,7 @@ func TestManager_LoadGroups_ShouldCheckWhetherEachRuleHasDependentsAndDependenci Context: context.Background(), Logger: promslog.NewNopLogger(), Appendable: storage, - QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil }, + QueryFunc: func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) { return nil, nil }, }) t.Run("load a mix of dependent and independent rules", func(t *testing.T) { @@ -2282,7 +2282,7 @@ func TestNewRuleGroupRestoration(t *testing.T) { interval = 60 * time.Second ) - waitForEvaluations := func(t *testing.T, ch <-chan int32, targetCount int32) { + waitForEvaluations := func(_ *testing.T, ch <-chan int32, targetCount int32) { for { select { case cnt := <-ch: @@ -2300,11 +2300,11 @@ func TestNewRuleGroupRestoration(t *testing.T) { option := optsFactory(store, &maxInflight, &inflightQueries, maxConcurrency) option.Queryable = store option.Appendable = store - option.NotifyFunc = func(ctx context.Context, expr string, alerts ...*Alert) {} + option.NotifyFunc = func(_ context.Context, _ string, _ ...*Alert) {} var evalCount atomic.Int32 ch := make(chan int32) - noopEvalIterFunc := func(ctx context.Context, g *Group, evalTimestamp time.Time) { + noopEvalIterFunc := func(_ context.Context, _ *Group, _ time.Time) { evalCount.Inc() ch <- evalCount.Load() } @@ -2345,7 +2345,7 @@ func TestNewRuleGroupRestorationWithRestoreNewGroupOption(t *testing.T) { interval = 60 * time.Second ) - waitForEvaluations := func(t *testing.T, ch <-chan int32, targetCount int32) { + waitForEvaluations := func(_ *testing.T, ch <-chan int32, targetCount int32) { for { select { case cnt := <-ch: @@ -2364,11 +2364,11 @@ func TestNewRuleGroupRestorationWithRestoreNewGroupOption(t *testing.T) { option.Queryable = store option.Appendable = store option.RestoreNewRuleGroups = true - option.NotifyFunc = func(ctx context.Context, expr string, alerts ...*Alert) {} + option.NotifyFunc = func(_ context.Context, _ string, _ ...*Alert) {} var evalCount atomic.Int32 ch := make(chan int32) - noopEvalIterFunc := func(ctx context.Context, g *Group, evalTimestamp time.Time) { + noopEvalIterFunc := func(_ context.Context, _ *Group, _ time.Time) { evalCount.Inc() ch <- evalCount.Load() } @@ -2510,7 +2510,7 @@ func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.I ConcurrentEvalsEnabled: concurrent, MaxConcurrentEvals: maxConcurrent, Appendable: storage, - QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { + QueryFunc: func(_ context.Context, _ string, ts time.Time) (promql.Vector, error) { inflightMu.Lock() current := inflightQueries.Add(1) @@ -2659,7 +2659,7 @@ func TestRuleDependencyController_AnalyseRules(t *testing.T) { Context: context.Background(), Logger: promslog.NewNopLogger(), Appendable: storage, - QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil }, + QueryFunc: func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) { return nil, nil }, }) groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, tc.ruleFile) @@ -2688,7 +2688,7 @@ func BenchmarkRuleDependencyController_AnalyseRules(b *testing.B) { Context: context.Background(), Logger: promslog.NewNopLogger(), Appendable: storage, - QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil }, + QueryFunc: func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) { return nil, nil }, }) groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, "fixtures/rules_multiple.yaml") diff --git a/rules/recording_test.go b/rules/recording_test.go index 3fbf11c435..e7ec18d0d4 100644 --- a/rules/recording_test.go +++ b/rules/recording_test.go @@ -246,7 +246,7 @@ func TestRecordingEvalWithOrigin(t *testing.T) { require.NoError(t, err) rule := NewRecordingRule(name, expr, lbs) - _, err = rule.Eval(ctx, 0, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) { + _, err = rule.Eval(ctx, 0, now, func(ctx context.Context, _ string, _ time.Time) (promql.Vector, error) { detail = FromOriginContext(ctx) return nil, nil }, nil, 0) diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go index 2719a467bc..f8e74aa48d 100644 --- a/scrape/helpers_test.go +++ b/scrape/helpers_test.go @@ -43,7 +43,7 @@ func (a nopAppendable) Appender(_ context.Context) storage.Appender { type nopAppender struct{} -func (a nopAppender) SetOptions(opts *storage.AppendOptions) {} +func (a nopAppender) SetOptions(_ *storage.AppendOptions) {} func (a nopAppender) Append(storage.SeriesRef, labels.Labels, int64, float64) (storage.SeriesRef, error) { return 1, nil @@ -57,7 +57,7 @@ func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *h return 3, nil } -func (a nopAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { +func (a nopAppender) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { return 0, nil } @@ -137,7 +137,7 @@ type collectResultAppender struct { pendingMetadata []metadataEntry } -func (a *collectResultAppender) SetOptions(opts *storage.AppendOptions) {} +func (a *collectResultAppender) SetOptions(_ *storage.AppendOptions) {} func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { a.mtx.Lock() @@ -184,7 +184,7 @@ func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels. return a.next.AppendHistogram(ref, l, t, h, fh) } -func (a *collectResultAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { +func (a *collectResultAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, _, ct int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { return a.AppendHistogram(ref, l, ct, &histogram.Histogram{}, nil) } @@ -205,7 +205,7 @@ func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.L return a.next.UpdateMetadata(ref, l, m) } -func (a *collectResultAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { +func (a *collectResultAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, _, ct int64) (storage.SeriesRef, error) { return a.Append(ref, l, ct, 0.0) } diff --git a/scrape/manager_test.go b/scrape/manager_test.go index 75ac9ea692..96381fa736 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -479,7 +479,7 @@ func loadConfiguration(t testing.TB, c string) *config.Config { func noopLoop() loop { return &testLoop{ - startFunc: func(interval, timeout time.Duration, errc chan<- error) {}, + startFunc: func(_, _ time.Duration, _ chan<- error) {}, stopFunc: func() {}, } } @@ -730,7 +730,7 @@ func setupTestServer(t *testing.T, typ string, toWrite []byte) *httptest.Server once := sync.Once{} server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { fail := true once.Do(func() { fail = false @@ -972,7 +972,7 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) { once := sync.Once{} // Start fake HTTP target to that allow one scrape only. server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { fail := true once.Do(func() { fail = false diff --git a/scrape/scrape.go b/scrape/scrape.go index 020bddb757..2cd3d78d53 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -456,7 +456,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { for _, t := range targets { // Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage. nonEmpty := false - t.LabelsRange(func(l labels.Label) { nonEmpty = true }) + t.LabelsRange(func(_ labels.Label) { nonEmpty = true }) switch { case nonEmpty: all = append(all, t) @@ -820,7 +820,7 @@ func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) { return s.client.Do(s.req.WithContext(ctx)) } -func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error) { +func (s *targetScraper) readResponse(_ context.Context, resp *http.Response, w io.Writer) (string, error) { defer func() { io.Copy(io.Discard, resp.Body) resp.Body.Close() diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index b30dbfa1b9..ccc2351d97 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -120,7 +120,7 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde // Create an appender for adding samples to the storage. app := s.Appender(context.Background()) capp := &collectResultAppender{next: app} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return capp }, 0) // Current time for generating timestamps. now := time.Now() @@ -215,7 +215,7 @@ test_metric2{foo="bar"} 22 // Create an appender for adding samples to the storage. capp := &collectResultAppender{next: nopAppender{}} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return capp }, 0) now := time.Now() slApp := sl.appender(context.Background()) @@ -249,12 +249,12 @@ type nopScraper struct { scraper } -func (n nopScraper) Report(start time.Time, dur time.Duration, err error) {} +func (n nopScraper) Report(_ time.Time, _ time.Duration, _ error) {} func TestScrapeReportMetadataUpdate(t *testing.T) { // Create an appender for adding samples to the storage. capp := &collectResultAppender{next: nopAppender{}} - sl := newBasicScrapeLoop(t, context.Background(), nopScraper{}, func(ctx context.Context) storage.Appender { return capp }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nopScraper{}, func(_ context.Context) storage.Appender { return capp }, 0) now := time.Now() slApp := sl.appender(context.Background()) @@ -503,7 +503,7 @@ func TestScrapePoolReload(t *testing.T) { // equivalents have been stopped. newLoop := func(opts scrapeLoopOptions) loop { l := &testLoop{interval: time.Duration(reloadCfg.ScrapeInterval), timeout: time.Duration(reloadCfg.ScrapeTimeout)} - l.startFunc = func(interval, timeout time.Duration, errc chan<- error) { + l.startFunc = func(interval, timeout time.Duration, _ chan<- error) { require.Equal(t, 3*time.Second, interval, "Unexpected scrape interval") require.Equal(t, 2*time.Second, timeout, "Unexpected scrape timeout") @@ -593,7 +593,7 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) { } newLoop := func(opts scrapeLoopOptions) loop { l := &testLoop{interval: opts.interval, timeout: opts.timeout} - l.startFunc = func(interval, timeout time.Duration, errc chan<- error) { + l.startFunc = func(interval, timeout time.Duration, _ chan<- error) { require.Equal(t, 5*time.Second, interval, "Unexpected scrape interval") require.Equal(t, 3*time.Second, timeout, "Unexpected scrape timeout") } @@ -651,10 +651,10 @@ func TestScrapePoolTargetLimit(t *testing.T) { var wg sync.WaitGroup // On starting to run, new loops created on reload check whether their preceding // equivalents have been stopped. - newLoop := func(opts scrapeLoopOptions) loop { + newLoop := func(_ scrapeLoopOptions) loop { wg.Add(1) l := &testLoop{ - startFunc: func(interval, timeout time.Duration, errc chan<- error) { + startFunc: func(_, _ time.Duration, _ chan<- error) { wg.Done() }, stopFunc: func() {}, @@ -884,10 +884,10 @@ func TestScrapePoolRaces(t *testing.T) { func TestScrapePoolScrapeLoopsStarted(t *testing.T) { var wg sync.WaitGroup - newLoop := func(opts scrapeLoopOptions) loop { + newLoop := func(_ scrapeLoopOptions) loop { wg.Add(1) l := &testLoop{ - startFunc: func(interval, timeout time.Duration, errc chan<- error) { + startFunc: func(_, _ time.Duration, _ chan<- error) { wg.Done() }, stopFunc: func() {}, @@ -1022,7 +1022,7 @@ func TestScrapeLoopStop(t *testing.T) { signal = make(chan struct{}, 1) appender = &collectResultAppender{} scraper = &testScraper{} - app = func(ctx context.Context) storage.Appender { return appender } + app = func(_ context.Context) storage.Appender { return appender } ) // Since we're writing samples directly below we need to provide a protocol fallback. @@ -1078,7 +1078,7 @@ func TestScrapeLoopRun(t *testing.T) { errc = make(chan error) scraper = &testScraper{} - app = func(ctx context.Context) storage.Appender { return &nopAppender{} } + app = func(_ context.Context) storage.Appender { return &nopAppender{} } scrapeMetrics = newTestScrapeMetrics(t) ) @@ -1186,7 +1186,7 @@ func TestScrapeLoopForcedErr(t *testing.T) { errc = make(chan error) scraper = &testScraper{} - app = func(ctx context.Context) storage.Appender { return &nopAppender{} } + app = func(_ context.Context) storage.Appender { return &nopAppender{} } ) ctx, cancel := context.WithCancel(context.Background()) @@ -1235,7 +1235,7 @@ func TestScrapeLoopMetadata(t *testing.T) { nil, nil, nopMutator, nopMutator, - func(ctx context.Context) storage.Appender { return nopAppender{} }, + func(_ context.Context) storage.Appender { return nopAppender{} }, cache, labels.NewSymbolTable(), 0, @@ -1540,7 +1540,7 @@ func TestSetOptionsHandlingStaleness(t *testing.T) { ) sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) numScrapes := 0 - scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { numScrapes++ if numScrapes == cue { action(sl) @@ -1608,7 +1608,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { var ( signal = make(chan struct{}, 1) scraper = &testScraper{} - app = func(ctx context.Context) storage.Appender { return appender } + app = func(_ context.Context) storage.Appender { return appender } ) ctx, cancel := context.WithCancel(context.Background()) @@ -1617,7 +1617,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { // Succeed once, several failures, then stop. numScrapes := 0 - scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { numScrapes++ switch numScrapes { @@ -1654,7 +1654,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { var ( signal = make(chan struct{}, 1) scraper = &testScraper{} - app = func(ctx context.Context) storage.Appender { return appender } + app = func(_ context.Context) storage.Appender { return appender } numScrapes = 0 ) @@ -1663,7 +1663,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain") // Succeed once, several failures, then stop. - scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { numScrapes++ switch numScrapes { case 1: @@ -1716,7 +1716,7 @@ func TestScrapeLoopCache(t *testing.T) { numScrapes := 0 - scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { switch numScrapes { case 1, 2: _, ok := sl.cache.series["metric_a"] @@ -1770,7 +1770,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { var ( signal = make(chan struct{}, 1) scraper = &testScraper{} - app = func(ctx context.Context) storage.Appender { return appender } + app = func(_ context.Context) storage.Appender { return appender } ) ctx, cancel := context.WithCancel(context.Background()) @@ -1778,7 +1778,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { numScrapes := 0 - scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { numScrapes++ if numScrapes < 5 { s := "" @@ -1866,7 +1866,7 @@ func TestScrapeLoopAppend(t *testing.T) { labels: labels.FromStrings(test.discoveryLabels...), } - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0) sl.sampleMutator = func(l labels.Labels) labels.Labels { return mutateSampleLabels(l, discoveryLabels, test.honorLabels, nil) } @@ -1954,7 +1954,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { for name, tc := range testcases { t.Run(name, func(t *testing.T) { app := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0) sl.sampleMutator = func(l labels.Labels) labels.Labels { return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil) } @@ -1978,7 +1978,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) { func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { // collectResultAppender's AddFast always returns ErrNotFound if we don't give it a next. app := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0) fakeRef := storage.SeriesRef(1) expValue := float64(1) @@ -2016,7 +2016,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { resApp := &collectResultAppender{} app := &limitAppender{Appender: resApp, limit: 1} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0) sl.sampleMutator = func(l labels.Labels) labels.Labels { if l.Has("deleteme") { return labels.EmptyLabels() @@ -2075,7 +2075,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { resApp := &collectResultAppender{} app := &bucketLimitAppender{Appender: resApp, limit: 2} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0) sl.enableNativeHistogramIngestion = true sl.sampleMutator = func(l labels.Labels) labels.Labels { if l.Has("deleteme") { @@ -2187,7 +2187,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) { defer s.Close() capp := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return capp }, 0) now := time.Now() slApp := sl.appender(context.Background()) @@ -2219,7 +2219,7 @@ func TestScrapeLoopAppendFailsWithNoContentType(t *testing.T) { app := &collectResultAppender{} // Explicitly setting the lack of fallback protocol here to make it obvious. - sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0, "") + sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0, "") now := time.Now() slApp := sl.appender(context.Background()) @@ -2233,7 +2233,7 @@ func TestScrapeLoopAppendEmptyWithNoContentType(t *testing.T) { app := &collectResultAppender{} // Explicitly setting the lack of fallback protocol here to make it obvious. - sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0, "") + sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0, "") now := time.Now() slApp := sl.appender(context.Background()) @@ -2245,7 +2245,7 @@ func TestScrapeLoopAppendEmptyWithNoContentType(t *testing.T) { func TestScrapeLoopAppendStaleness(t *testing.T) { app := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0) now := time.Now() slApp := sl.appender(context.Background()) @@ -2275,7 +2275,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) { func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { app := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0) now := time.Now() slApp := sl.appender(context.Background()) _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "text/plain", now) @@ -2299,7 +2299,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) { app := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0) sl.trackTimestampsStaleness = true now := time.Now() @@ -2654,7 +2654,7 @@ metric: < labels: labels.FromStrings(test.discoveryLabels...), } - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0) sl.enableNativeHistogramIngestion = test.enableNativeHistogramsIngestion sl.sampleMutator = func(l labels.Labels) labels.Labels { return mutateSampleLabels(l, discoveryLabels, false, nil) @@ -2738,7 +2738,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) { app := &collectResultAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0) sl.sampleMutator = func(l labels.Labels) labels.Labels { return mutateSampleLabels(l, discoveryLabels, false, nil) } @@ -2775,13 +2775,13 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) { var ( scraper = &testScraper{} appender = &collectResultAppender{} - app = func(ctx context.Context) storage.Appender { return appender } + app = func(_ context.Context) storage.Appender { return appender } ) ctx, cancel := context.WithCancel(context.Background()) sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) - scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + scraper.scrapeFunc = func(_ context.Context, _ io.Writer) error { cancel() return errors.New("scrape failed") } @@ -2794,13 +2794,13 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) { var ( scraper = &testScraper{} appender = &collectResultAppender{} - app = func(ctx context.Context) storage.Appender { return appender } + app = func(_ context.Context) storage.Appender { return appender } ) ctx, cancel := context.WithCancel(context.Background()) sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond) - scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { cancel() w.Write([]byte("a{l=\"\xff\"} 1\n")) return nil @@ -2829,7 +2829,7 @@ func (app *errorAppender) Append(ref storage.SeriesRef, lset labels.Labels, t in func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T) { app := &errorAppender{} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0) now := time.Unix(1, 0) slApp := sl.appender(context.Background()) @@ -2853,7 +2853,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) { app := &collectResultAppender{} sl := newBasicScrapeLoop(t, context.Background(), nil, - func(ctx context.Context) storage.Appender { + func(_ context.Context) storage.Appender { return &timeLimitAppender{ Appender: app, maxTime: timestamp.FromTime(time.Now().Add(10 * time.Minute)), @@ -2965,7 +2965,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) { block := make(chan struct{}) server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { <-block }), ) @@ -3021,7 +3021,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) { func TestTargetScrapeScrapeNotFound(t *testing.T) { server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotFound) }), ) @@ -3057,7 +3057,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) { ) var gzipResponse bool server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", `text/plain; version=0.0.4`) if gzipResponse { w.Header().Set("Content-Encoding", "gzip") @@ -3147,11 +3147,11 @@ func (ts *testScraper) Report(start time.Time, duration time.Duration, err error ts.lastError = err } -func (ts *testScraper) scrape(ctx context.Context) (*http.Response, error) { +func (ts *testScraper) scrape(_ context.Context) (*http.Response, error) { return nil, ts.scrapeErr } -func (ts *testScraper) readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error) { +func (ts *testScraper) readResponse(ctx context.Context, _ *http.Response, w io.Writer) (string, error) { if ts.scrapeFunc != nil { return "", ts.scrapeFunc(ctx, w) } @@ -3164,7 +3164,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) { app := s.Appender(context.Background()) capp := &collectResultAppender{next: app} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return capp }, 0) now := time.Now() slApp := sl.appender(context.Background()) @@ -3190,7 +3190,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) { capp := &collectResultAppender{next: app} - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return capp }, 0) sl.honorTimestamps = false now := time.Now() @@ -3253,7 +3253,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { app := s.Appender(context.Background()) ctx, cancel := context.WithCancel(context.Background()) - sl := newBasicScrapeLoop(t, context.Background(), &testScraper{}, func(ctx context.Context) storage.Appender { return app }, 0) + sl := newBasicScrapeLoop(t, context.Background(), &testScraper{}, func(_ context.Context) storage.Appender { return app }, 0) sl.sampleMutator = func(l labels.Labels) labels.Labels { if l.Has("drop") { return labels.FromStrings("no", "name") // This label set will trigger an error. @@ -3582,7 +3582,7 @@ func TestScrapeReportSingleAppender(t *testing.T) { numScrapes := 0 - scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { numScrapes++ if numScrapes%4 == 0 { return errors.New("scrape failed") @@ -3795,7 +3795,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) { labels: labels.FromStrings(test.discoveryLabels...), } - sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0) sl.sampleMutator = func(l labels.Labels) labels.Labels { return mutateSampleLabels(l, discoveryLabels, false, nil) } @@ -4393,7 +4393,7 @@ metric: < scrapeCount := 0 scraped := make(chan bool) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { if metricsText.contentType != "" { w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`) for _, text := range metricsText.text { @@ -4493,7 +4493,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t * var ( signal = make(chan struct{}, 1) scraper = &testScraper{} - app = func(ctx context.Context) storage.Appender { return appender } + app = func(_ context.Context) storage.Appender { return appender } ) ctx, cancel := context.WithCancel(context.Background()) @@ -4503,7 +4503,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t * // Succeed once, several failures, then stop. numScrapes := 0 - scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { + scraper.scrapeFunc = func(_ context.Context, w io.Writer) error { numScrapes++ switch numScrapes { @@ -4830,7 +4830,7 @@ func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expec buffer := protoMarshalDelimited(t, histogramMetricFamily) // Create a HTTP server to serve /metrics via ProtoBuf - metricsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + metricsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`) w.Write(buffer) })) @@ -5073,7 +5073,7 @@ func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice cha var scrapes int scrapedTwice = make(chan bool) - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, scrapeText) scrapes++ if scrapes == 2 { @@ -5085,7 +5085,7 @@ func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice cha // Regression test for the panic fixed in https://github.com/prometheus/prometheus/pull/15523. func TestScrapePoolScrapeAfterReload(t *testing.T) { h := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { + func(w http.ResponseWriter, _ *http.Request) { w.Write([]byte{0x42, 0x42}) }, )) diff --git a/scrape/target_test.go b/scrape/target_test.go index 9dad18f01c..85f2280c41 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -147,7 +147,7 @@ func newTestTarget(targetURL string, _ time.Duration, lbls labels.Labels) *Targe func TestNewHTTPBearerToken(t *testing.T) { server := httptest.NewServer( http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { + func(_ http.ResponseWriter, r *http.Request) { expected := "Bearer 1234" received := r.Header.Get("Authorization") require.Equal(t, expected, received, "Authorization header was not set correctly.") @@ -168,7 +168,7 @@ func TestNewHTTPBearerToken(t *testing.T) { func TestNewHTTPBearerTokenFile(t *testing.T) { server := httptest.NewServer( http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { + func(_ http.ResponseWriter, r *http.Request) { expected := "Bearer 12345" received := r.Header.Get("Authorization") require.Equal(t, expected, received, "Authorization header was not set correctly.") @@ -189,7 +189,7 @@ func TestNewHTTPBearerTokenFile(t *testing.T) { func TestNewHTTPBasicAuth(t *testing.T) { server := httptest.NewServer( http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { + func(_ http.ResponseWriter, r *http.Request) { username, password, ok := r.BasicAuth() require.True(t, ok, "Basic authorization header was not set correctly.") require.Equal(t, "user", username) @@ -214,7 +214,7 @@ func TestNewHTTPBasicAuth(t *testing.T) { func TestNewHTTPCACert(t *testing.T) { server := httptest.NewUnstartedServer( http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { + func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", `text/plain; version=0.0.4`) w.Write([]byte{}) }, @@ -238,7 +238,7 @@ func TestNewHTTPCACert(t *testing.T) { func TestNewHTTPClientCert(t *testing.T) { server := httptest.NewUnstartedServer( http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { + func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", `text/plain; version=0.0.4`) w.Write([]byte{}) }, @@ -267,7 +267,7 @@ func TestNewHTTPClientCert(t *testing.T) { func TestNewHTTPWithServerName(t *testing.T) { server := httptest.NewUnstartedServer( http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { + func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", `text/plain; version=0.0.4`) w.Write([]byte{}) }, @@ -292,7 +292,7 @@ func TestNewHTTPWithServerName(t *testing.T) { func TestNewHTTPWithBadServerName(t *testing.T) { server := httptest.NewUnstartedServer( http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { + func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", `text/plain; version=0.0.4`) w.Write([]byte{}) }, diff --git a/storage/merge.go b/storage/merge.go index 1953d5df09..bc70ceea55 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -133,7 +133,7 @@ func filterChunkQueriers(qs []ChunkQuerier) []ChunkQuerier { } // Select returns a set of series that matches the given label matchers. -func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { +func (q *mergeGenericQuerier) Select(ctx context.Context, _ bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) var limit int if hints != nil { diff --git a/storage/merge_test.go b/storage/merge_test.go index 92d951b839..2a4683bd0b 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -959,7 +959,7 @@ func (m *mockQuerier) Select(_ context.Context, sortSeries bool, _ *SelectHints, return &mockSeriesSet{idx: -1, series: ret, warnings: m.warnings, err: m.err} } -func (m *mockQuerier) LabelValues(_ context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (m *mockQuerier) LabelValues(_ context.Context, name string, _ *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { m.mtx.Lock() m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{ name: name, @@ -1409,7 +1409,7 @@ func BenchmarkMergeLabelValuesWithLimit(b *testing.B) { }, } - b.Run("benchmark", func(b *testing.B) { + b.Run("benchmark", func(_ *testing.B) { ctx := context.Background() hints := &LabelHints{ Limit: 1000, @@ -1696,7 +1696,7 @@ func (e errIterator) Next() chunkenc.ValueType { return chunkenc.ValNone } -func (e errIterator) Seek(t int64) chunkenc.ValueType { +func (e errIterator) Seek(_ int64) chunkenc.ValueType { return chunkenc.ValNone } diff --git a/storage/remote/client_test.go b/storage/remote/client_test.go index c8b3d487e7..9fd8680cde 100644 --- a/storage/remote/client_test.go +++ b/storage/remote/client_test.go @@ -61,7 +61,7 @@ func TestStoreHTTPErrorHandling(t *testing.T) { for _, test := range tests { server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { http.Error(w, longErrMessage, test.code) }), ) @@ -93,7 +93,7 @@ func TestStoreHTTPErrorHandling(t *testing.T) { func TestClientRetryAfter(t *testing.T) { setupServer := func(statusCode int) *httptest.Server { return httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Retry-After", "5") http.Error(w, longErrMessage, statusCode) }), @@ -180,7 +180,7 @@ func TestClientCustomHeaders(t *testing.T) { var called bool server := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { called = true receivedHeaders := r.Header for name, value := range headersToSend { @@ -271,7 +271,7 @@ func TestReadClient(t *testing.T) { StartTimestampMs: 4000, EndTimestampMs: 12000, }, - httpHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + httpHandler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse") flusher, ok := w.(http.Flusher) @@ -324,7 +324,7 @@ func TestReadClient(t *testing.T) { }, { name: "unsupported content type", - httpHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + httpHandler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "foobar") }), expectedErrorContains: "unsupported content type", @@ -399,7 +399,7 @@ func TestReadClient(t *testing.T) { } func sampledResponseHTTPHandler(t *testing.T) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/x-protobuf") resp := prompb.ReadResponse{ diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 27d5aa47b6..7143059edf 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -761,7 +761,7 @@ func TestDisableReshardOnRetry(t *testing.T) { metrics = newQueueManagerMetrics(nil, "", "") client = &MockWriteClient{ - StoreFunc: func(ctx context.Context, b []byte, i int) (WriteResponseStats, error) { + StoreFunc: func(_ context.Context, _ []byte, _ int) (WriteResponseStats, error) { onStoreCalled() return WriteResponseStats{}, RecoverableError{ @@ -839,7 +839,7 @@ func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([ return samples, series } -func createProtoTimeseriesWithOld(numSamples, baseTs int64, extraLabels ...labels.Label) []prompb.TimeSeries { +func createProtoTimeseriesWithOld(numSamples, baseTs int64, _ ...labels.Label) []prompb.TimeSeries { samples := make([]prompb.TimeSeries, numSamples) // use a fixed rand source so tests are consistent r := rand.New(rand.NewSource(99)) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 02585539c0..073ffed2e5 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -531,7 +531,7 @@ type OTLPOptions struct { // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. -func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler { +func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler { ex := &rwExporter{ writeHandler: &writeHandler{ logger: logger, diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index b37b3632b9..5bf6f5632e 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -860,7 +860,7 @@ func (m *mockAppendable) Appender(_ context.Context) storage.Appender { return m } -func (m *mockAppendable) SetOptions(opts *storage.AppendOptions) { +func (m *mockAppendable) SetOptions(_ *storage.AppendOptions) { panic("unimplemented") } @@ -956,7 +956,7 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t return 0, nil } -func (m *mockAppendable) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { +func (m *mockAppendable) AppendHistogramCTZeroSample(_ storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { if m.appendCTZeroSampleErr != nil { return 0, m.appendCTZeroSampleErr } @@ -1006,7 +1006,7 @@ func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp return 0, nil } -func (m *mockAppendable) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { +func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { if m.appendCTZeroSampleErr != nil { return 0, m.appendCTZeroSampleErr } diff --git a/storage/series.go b/storage/series.go index a3dbec7088..e61b225937 100644 --- a/storage/series.go +++ b/storage/series.go @@ -65,7 +65,7 @@ func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]chunks.Sampl if err != nil { return &ChunkSeriesEntry{ Lset: lset, - ChunkIteratorFn: func(it chunks.Iterator) chunks.Iterator { + ChunkIteratorFn: func(_ chunks.Iterator) chunks.Iterator { return errChunksIterator{err: err} }, } diff --git a/tsdb/block.go b/tsdb/block.go index 4ffd2463c3..ae20c174d6 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -656,7 +656,7 @@ Outer: func (pb *Block) CleanTombstones(dest string, c Compactor) ([]ulid.ULID, bool, error) { numStones := 0 - if err := pb.tombstones.Iter(func(id storage.SeriesRef, ivs tombstones.Intervals) error { + if err := pb.tombstones.Iter(func(_ storage.SeriesRef, ivs tombstones.Intervals) error { numStones += len(ivs) return nil }); err != nil { diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 3e25cff3da..776beb4396 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -602,7 +602,7 @@ func testPostingsForLabelMatching(t *testing.T, offset storage.SeriesRef, setUp { name: "missing label", labelName: "missing", - match: func(val string) bool { + match: func(_ string) bool { return true }, exp: nil, diff --git a/tsdb/chunks/chunk_write_queue_test.go b/tsdb/chunks/chunk_write_queue_test.go index c908d47f5b..a3be29140d 100644 --- a/tsdb/chunks/chunk_write_queue_test.go +++ b/tsdb/chunks/chunk_write_queue_test.go @@ -63,7 +63,7 @@ func TestChunkWriteQueue_WritingThroughQueue(t *testing.T) { gotCutFile bool ) - blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) error { + blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, _, cutFile bool) error { gotSeriesRef = seriesRef gotMint = mint gotMaxt = maxt @@ -82,7 +82,7 @@ func TestChunkWriteQueue_WritingThroughQueue(t *testing.T) { ref := newChunkDiskMapperRef(321, 123) cutFile := true awaitCb := make(chan struct{}) - require.NoError(t, q.addJob(chunkWriteJob{seriesRef: seriesRef, mint: mint, maxt: maxt, chk: chunk, ref: ref, cutFile: cutFile, callback: func(err error) { + require.NoError(t, q.addJob(chunkWriteJob{seriesRef: seriesRef, mint: mint, maxt: maxt, chk: chunk, ref: ref, cutFile: cutFile, callback: func(_ error) { close(awaitCb) }})) <-awaitCb @@ -101,7 +101,7 @@ func TestChunkWriteQueue_WrappingAroundSizeLimit(t *testing.T) { unblockChunkWriterCh := make(chan struct{}, sizeLimit) // blockingChunkWriter blocks until the unblockChunkWriterCh channel returns a value. - blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) error { + blockingChunkWriter := func(_ HeadSeriesRef, _, _ int64, _ chunkenc.Chunk, _ ChunkDiskMapperRef, _, _ bool) error { <-unblockChunkWriterCh return nil } @@ -117,7 +117,7 @@ func TestChunkWriteQueue_WrappingAroundSizeLimit(t *testing.T) { callbackWg.Add(1) require.NoError(t, q.addJob(chunkWriteJob{ ref: chunkRef, - callback: func(err error) { + callback: func(_ error) { callbackWg.Done() }, })) @@ -212,7 +212,7 @@ func BenchmarkChunkWriteQueue_addJob(b *testing.B) { for _, concurrentWrites := range []int{1, 10, 100, 1000} { b.Run(fmt.Sprintf("%d concurrent writes", concurrentWrites), func(b *testing.B) { issueReadSignal := make(chan struct{}) - q := newChunkWriteQueue(nil, 1000, func(ref HeadSeriesRef, i, i2 int64, chunk chunkenc.Chunk, ref2 ChunkDiskMapperRef, ooo, b bool) error { + q := newChunkWriteQueue(nil, 1000, func(_ HeadSeriesRef, _, _ int64, _ chunkenc.Chunk, _ ChunkDiskMapperRef, _, _ bool) error { if withReads { select { case issueReadSignal <- struct{}{}: diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index b2aa39c3b0..4c0b27e82f 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -155,7 +155,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) { hrw = createChunkDiskMapper(t, dir) idx := 0 - require.NoError(t, hrw.IterateAllChunks(func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error { + require.NoError(t, hrw.IterateAllChunks(func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, _, maxt int64, numSamples uint16, _ chunkenc.Encoding, isOOO bool) error { t.Helper() expData := expectedData[idx] @@ -574,7 +574,7 @@ func createChunk(t *testing.T, idx int, hrw *ChunkDiskMapper) (seriesRef HeadSer if rand.Intn(2) == 0 { isOOO = true } - chunkRef = hrw.WriteChunk(seriesRef, mint, maxt, chunk, isOOO, func(cbErr error) { + chunkRef = hrw.WriteChunk(seriesRef, mint, maxt, chunk, isOOO, func(_ error) { require.NoError(t, err) close(awaitCb) }) diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 826be61a42..ab2ece9e3b 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1012,7 +1012,7 @@ func TestWALFlushedOnDBClose(t *testing.T) { func TestWALSegmentSizeOptions(t *testing.T) { tests := map[int]func(dbdir string, segmentSize int){ // Default Wal Size. - 0: func(dbDir string, segmentSize int) { + 0: func(dbDir string, _ int) { filesAndDir, err := os.ReadDir(filepath.Join(dbDir, "wal")) require.NoError(t, err) files := []os.FileInfo{} @@ -1051,7 +1051,7 @@ func TestWALSegmentSizeOptions(t *testing.T) { require.Greater(t, int64(segmentSize), lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name()) }, // Wal disabled. - -1: func(dbDir string, segmentSize int) { + -1: func(dbDir string, _ int) { // Check that WAL dir is not there. _, err := os.Stat(filepath.Join(dbDir, "wal")) require.Error(t, err) @@ -1553,7 +1553,7 @@ func TestSizeRetention(t *testing.T) { // Create a WAL checkpoint, and compare sizes. first, last, err := wlog.Segments(db.Head().wal.Dir()) require.NoError(t, err) - _, err = wlog.Checkpoint(promslog.NewNopLogger(), db.Head().wal, first, last-1, func(x chunks.HeadSeriesRef) bool { return false }, 0) + _, err = wlog.Checkpoint(promslog.NewNopLogger(), db.Head().wal, first, last-1, func(_ chunks.HeadSeriesRef) bool { return false }, 0) require.NoError(t, err) blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics. walSize, err = db.Head().wal.Size() @@ -5549,7 +5549,7 @@ func TestQuerierOOOQuery(t *testing.T) { sampleFunc func(ts int64) chunks.Sample }{ "float": { - appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { + appendFunc: func(app storage.Appender, ts int64, _ bool) (storage.SeriesRef, error) { return app.Append(0, labels.FromStrings("foo", "bar1"), ts, float64(ts)) }, sampleFunc: func(ts int64) chunks.Sample { @@ -5582,7 +5582,7 @@ func TestQuerierOOOQuery(t *testing.T) { }, "integer histogram counter resets": { // Adding counter reset to all histograms means each histogram will have its own chunk. - appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { + appendFunc: func(app storage.Appender, ts int64, _ bool) (storage.SeriesRef, error) { h := tsdbutil.GenerateTestHistogram(ts) h.CounterResetHint = histogram.CounterReset // For this scenario, ignore the counterReset argument. return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) @@ -5610,7 +5610,7 @@ func testQuerierOOOQuery(t *testing.T, series1 := labels.FromStrings("foo", "bar1") type filterFunc func(t int64) bool - defaultFilterFunc := func(t int64) bool { return true } + defaultFilterFunc := func(_ int64) bool { return true } minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample, filter filterFunc, counterReset bool) ([]chunks.Sample, int) { @@ -5865,7 +5865,7 @@ func TestChunkQuerierOOOQuery(t *testing.T) { checkInUseBucket bool }{ "float": { - appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { + appendFunc: func(app storage.Appender, ts int64, _ bool) (storage.SeriesRef, error) { return app.Append(0, labels.FromStrings("foo", "bar1"), ts, float64(ts)) }, sampleFunc: func(ts int64) chunks.Sample { @@ -5898,7 +5898,7 @@ func TestChunkQuerierOOOQuery(t *testing.T) { }, "integer histogram counter resets": { // Adding counter reset to all histograms means each histogram will have its own chunk. - appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { + appendFunc: func(app storage.Appender, ts int64, _ bool) (storage.SeriesRef, error) { h := tsdbutil.GenerateTestHistogram(ts) h.CounterResetHint = histogram.CounterReset // For this scenario, ignore the counterReset argument. return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil) @@ -5909,7 +5909,7 @@ func TestChunkQuerierOOOQuery(t *testing.T) { }, "integer histogram with recode": { // Histograms have increasing number of buckets so their chunks are recoded. - appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) { + appendFunc: func(app storage.Appender, ts int64, _ bool) (storage.SeriesRef, error) { n := ts / time.Minute.Milliseconds() return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nBucketHistogram(n), nil) }, @@ -5941,7 +5941,7 @@ func testChunkQuerierOOOQuery(t *testing.T, series1 := labels.FromStrings("foo", "bar1") type filterFunc func(t int64) bool - defaultFilterFunc := func(t int64) bool { return true } + defaultFilterFunc := func(_ int64) bool { return true } minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample, filter filterFunc, counterReset bool) ([]chunks.Sample, int) { @@ -6221,7 +6221,7 @@ func testOOONativeHistogramsWithCounterResets(t *testing.T, scenario sampleTypeS opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds() type resetFunc func(v int64) bool - defaultResetFunc := func(v int64) bool { return false } + defaultResetFunc := func(_ int64) bool { return false } lbls := labels.FromStrings("foo", "bar1") minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } @@ -9293,7 +9293,7 @@ func TestNewCompactorFunc(t *testing.T) { opts := DefaultOptions() block1 := ulid.MustNew(1, nil) block2 := ulid.MustNew(2, nil) - opts.NewCompactorFunc = func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) { + opts.NewCompactorFunc = func(_ context.Context, _ prometheus.Registerer, _ *slog.Logger, _ []int64, _ chunkenc.Pool, _ *Options) (Compactor, error) { return &mockCompactorFn{ planFn: func() ([]string, error) { return []string{block1.String(), block2.String()}, nil diff --git a/tsdb/fileutil/dir.go b/tsdb/fileutil/dir.go index e6ac4ec989..1672a92d4c 100644 --- a/tsdb/fileutil/dir.go +++ b/tsdb/fileutil/dir.go @@ -20,7 +20,7 @@ import ( func DirSize(dir string) (int64, error) { var size int64 - err := filepath.Walk(dir, func(filePath string, info os.FileInfo, err error) error { + err := filepath.Walk(dir, func(_ string, info os.FileInfo, err error) error { if err != nil { return err } diff --git a/tsdb/fileutil/fileutil.go b/tsdb/fileutil/fileutil.go index 5e479f48b9..523f99292c 100644 --- a/tsdb/fileutil/fileutil.go +++ b/tsdb/fileutil/fileutil.go @@ -76,7 +76,7 @@ func copyFile(src, dest string) error { func readDirs(src string) ([]string, error) { var files []string - err := filepath.Walk(src, func(path string, f os.FileInfo, err error) error { + err := filepath.Walk(src, func(path string, _ os.FileInfo, _ error) error { relativePath := strings.TrimPrefix(path, src) if len(relativePath) > 0 { files = append(files, relativePath) diff --git a/tsdb/head_other.go b/tsdb/head_other.go index c73872c12e..45bb2285f0 100644 --- a/tsdb/head_other.go +++ b/tsdb/head_other.go @@ -27,6 +27,6 @@ func (s *memSeries) labels() labels.Labels { } // RebuildSymbolTable is a no-op when not using dedupelabels. -func (h *Head) RebuildSymbolTable(logger *slog.Logger) *labels.SymbolTable { +func (h *Head) RebuildSymbolTable(_ *slog.Logger) *labels.SymbolTable { return nil } diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 438c7e6a4d..f37fd17d60 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -489,7 +489,7 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi // oooChunk returns the chunk for the HeadChunkID by m-mapping it from the disk. // It never returns the head OOO chunk. -func (s *memSeries) oooChunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool) (chunk chunkenc.Chunk, maxTime int64, err error) { +func (s *memSeries) oooChunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, _ *sync.Pool) (chunk chunkenc.Chunk, maxTime int64, err error) { // ix represents the index of chunk in the s.ooo.oooMmappedChunks slice. The chunk id's are // incremented by 1 when new chunk is created, hence (id - firstOOOChunkID) gives the slice index. ix := int(id) - int(s.ooo.firstOOOChunkID) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index e498578c10..065e5ff008 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -2500,7 +2500,7 @@ func TestMemSeriesIsolation(t *testing.T) { return i } - testIsolation := func(h *Head, i int) { + testIsolation := func(_ *Head, _ int) { } // Test isolation without restart of Head. @@ -5133,7 +5133,7 @@ func testWBLReplay(t *testing.T, scenario sampleTypeScenario) { var expOOOSamples []chunks.Sample l := labels.FromStrings("foo", "bar") - appendSample := func(mins int64, val float64, isOOO bool) { + appendSample := func(mins int64, _ float64, isOOO bool) { app := h.Appender(context.Background()) _, s, err := scenario.appendFunc(app, l, mins*time.Minute.Milliseconds(), mins) require.NoError(t, err) @@ -5896,7 +5896,7 @@ func TestCuttingNewHeadChunks(t *testing.T) { }{ "float samples": { numTotalSamples: 180, - floatValFunc: func(i int) float64 { + floatValFunc: func(_ int) float64 { return 1. }, expectedChks: []struct { diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 911b1a6ecc..42ecd7245d 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -1473,7 +1473,7 @@ func (r *Reader) Close() error { return r.c.Close() } -func (r *Reader) lookupSymbol(ctx context.Context, o uint32) (string, error) { +func (r *Reader) lookupSymbol(_ context.Context, o uint32) (string, error) { if s, ok := r.nameSymbols[o]; ok { return s, nil } diff --git a/tsdb/isolation_test.go b/tsdb/isolation_test.go index 36083a1029..70404efbbf 100644 --- a/tsdb/isolation_test.go +++ b/tsdb/isolation_test.go @@ -72,7 +72,7 @@ func TestIsolation(t *testing.T) { func countOpenReads(iso *isolation) int { count := 0 - iso.TraverseOpenReads(func(s *isolationState) bool { + iso.TraverseOpenReads(func(_ *isolationState) bool { count++ return true }) diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 2a1a44d18e..6ab99a96e6 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -484,15 +484,15 @@ func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *l return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, 0, chks) } -func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { +func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, _ string, _ ...*labels.Matcher) ([]string, error) { return nil, errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { +func (ir *OOOCompactionHeadIndexReader) LabelValues(_ context.Context, _ string, _ ...*labels.Matcher) ([]string, error) { return nil, errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(_ context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { +func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(_ context.Context, _ bool, _ ...*labels.Matcher) (index.Postings, error) { return nil, errors.New("not implemented") } @@ -504,7 +504,7 @@ func (ir *OOOCompactionHeadIndexReader) LabelValueFor(context.Context, storage.S return "", errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) { +func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(_ context.Context, _ index.Postings) ([]string, error) { return nil, errors.New("not implemented") } diff --git a/tsdb/querier.go b/tsdb/querier.go index ce99df6a13..676bcf4627 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -77,12 +77,12 @@ func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, er }, nil } -func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { res, err := q.index.SortedLabelValues(ctx, name, matchers...) return res, nil, err } -func (q *blockBaseQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *blockBaseQuerier) LabelNames(ctx context.Context, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { res, err := q.index.LabelNames(ctx, matchers...) return res, nil, err } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 9a7743ec12..56249d3bd6 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -104,7 +104,7 @@ type seriesSamples struct { // Index: labels -> postings -> chunkMetas -> chunkRef. // ChunkReader: ref -> vals. func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkReader, int64, int64) { - sort.Slice(tc, func(i, j int) bool { + sort.Slice(tc, func(i, _ int) bool { return labels.Compare(labels.FromMap(tc[i].lset), labels.FromMap(tc[i].lset)) < 0 }) @@ -3318,7 +3318,7 @@ func (m mockMatcherIndex) LabelValueFor(context.Context, storage.SeriesRef, stri return "", errors.New("label value for called") } -func (m mockMatcherIndex) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) { +func (m mockMatcherIndex) LabelNamesFor(_ context.Context, _ index.Postings) ([]string, error) { return nil, errors.New("label names for called") } @@ -3326,15 +3326,15 @@ func (m mockMatcherIndex) Postings(context.Context, string, ...string) (index.Po return index.EmptyPostings(), nil } -func (m mockMatcherIndex) SortedPostings(p index.Postings) index.Postings { +func (m mockMatcherIndex) SortedPostings(_ index.Postings) index.Postings { return index.EmptyPostings() } -func (m mockMatcherIndex) ShardedPostings(ps index.Postings, shardIndex, shardCount uint64) index.Postings { +func (m mockMatcherIndex) ShardedPostings(ps index.Postings, _, _ uint64) index.Postings { return ps } -func (m mockMatcherIndex) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { +func (m mockMatcherIndex) Series(_ storage.SeriesRef, _ *labels.ScratchBuilder, _ *[]chunks.Meta) error { return nil } @@ -3590,13 +3590,13 @@ func TestQueryWithDeletedHistograms(t *testing.T) { "intCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) { return tsdbutil.GenerateTestHistogram(int64(i)), nil }, - "intgauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) { + "intgauge": func(_ int) (*histogram.Histogram, *histogram.FloatHistogram) { return tsdbutil.GenerateTestGaugeHistogram(rand.Int63() % 1000), nil }, "floatCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) { return nil, tsdbutil.GenerateTestFloatHistogram(int64(i)) }, - "floatGauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) { + "floatGauge": func(_ int) (*histogram.Histogram, *histogram.FloatHistogram) { return nil, tsdbutil.GenerateTestGaugeFloatHistogram(rand.Int63() % 1000) }, } diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 4d2a52b9af..692976cdf8 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -204,7 +204,7 @@ type Decoder struct { builder labels.ScratchBuilder } -func NewDecoder(t *labels.SymbolTable) Decoder { // FIXME remove t +func NewDecoder(_ *labels.SymbolTable) Decoder { // FIXME remove t return Decoder{builder: labels.NewScratchBuilder(0)} } diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index a793c90a95..786912704e 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -399,7 +399,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) { } } - Checkpoint(promslog.NewNopLogger(), w, 0, 1, func(x chunks.HeadSeriesRef) bool { return true }, 0) + Checkpoint(promslog.NewNopLogger(), w, 0, 1, func(_ chunks.HeadSeriesRef) bool { return true }, 0) w.Truncate(1) // Write more records after checkpointing. @@ -490,7 +490,7 @@ func TestReadCheckpoint(t *testing.T) { } _, err = w.NextSegmentSync() require.NoError(t, err) - _, err = Checkpoint(promslog.NewNopLogger(), w, 30, 31, func(x chunks.HeadSeriesRef) bool { return true }, 0) + _, err = Checkpoint(promslog.NewNopLogger(), w, 30, 31, func(_ chunks.HeadSeriesRef) bool { return true }, 0) require.NoError(t, err) require.NoError(t, w.Truncate(32)) @@ -653,7 +653,7 @@ func TestCheckpointSeriesReset(t *testing.T) { return wt.checkNumSeries() == seriesCount }, 10*time.Second, 1*time.Second) - _, err = Checkpoint(promslog.NewNopLogger(), w, 2, 4, func(x chunks.HeadSeriesRef) bool { return true }, 0) + _, err = Checkpoint(promslog.NewNopLogger(), w, 2, 4, func(_ chunks.HeadSeriesRef) bool { return true }, 0) require.NoError(t, err) err = w.Truncate(5) diff --git a/util/httputil/compression_test.go b/util/httputil/compression_test.go index fd3f1f66d4..212a08fa93 100644 --- a/util/httputil/compression_test.go +++ b/util/httputil/compression_test.go @@ -40,7 +40,7 @@ func setup() func() { } func getCompressionHandlerFunc() CompressionHandler { - hf := func(w http.ResponseWriter, r *http.Request) { + hf := func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) w.Write([]byte("Hello World!")) } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index fcf9c2a243..1ef22b40a1 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -314,7 +314,7 @@ func (m *rulesRetrieverMock) CreateRuleGroups() { Appendable: storage, Context: context.Background(), Logger: promslog.NewNopLogger(), - NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {}, + NotifyFunc: func(_ context.Context, _ string, _ ...*rules.Alert) {}, } var r []rules.Rule @@ -951,7 +951,7 @@ func TestStats(t *testing.T) { }, { name: "custom handler with known value", - renderer: func(ctx context.Context, s *stats.Statistics, p string) stats.QueryStats { + renderer: func(_ context.Context, _ *stats.Statistics, p string) stats.QueryStats { if p == "known" { return testStats{"Custom Value"} } @@ -4127,7 +4127,7 @@ func TestRespondSuccess_DefaultCodecCannotEncodeResponse(t *testing.T) { } func TestRespondError(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { api := API{} api.respondError(w, &apiError{errorTimeout, errors.New("message")}, "test") })) @@ -4723,11 +4723,11 @@ type fakeEngine struct { query fakeQuery } -func (e *fakeEngine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) { +func (e *fakeEngine) NewInstantQuery(_ context.Context, _ storage.Queryable, _ promql.QueryOpts, _ string, _ time.Time) (promql.Query, error) { return &e.query, nil } -func (e *fakeEngine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) { +func (e *fakeEngine) NewRangeQuery(_ context.Context, _ storage.Queryable, _ promql.QueryOpts, _ string, _, _ time.Time, _ time.Duration) (promql.Query, error) { return &e.query, nil } diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go index 1cc90a4b62..eb929c33ce 100644 --- a/web/api/v1/errors_test.go +++ b/web/api/v1/errors_test.go @@ -158,15 +158,15 @@ type errorTestQueryable struct { err error } -func (t errorTestQueryable) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { +func (t errorTestQueryable) ExemplarQuerier(_ context.Context) (storage.ExemplarQuerier, error) { return nil, t.err } -func (t errorTestQueryable) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) { +func (t errorTestQueryable) ChunkQuerier(_, _ int64) (storage.ChunkQuerier, error) { return nil, t.err } -func (t errorTestQueryable) Querier(mint, maxt int64) (storage.Querier, error) { +func (t errorTestQueryable) Querier(_, _ int64) (storage.Querier, error) { if t.q != nil { return t.q, nil } @@ -190,7 +190,7 @@ func (t errorTestQuerier) Close() error { return nil } -func (t errorTestQuerier) Select(_ context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { +func (t errorTestQuerier) Select(_ context.Context, _ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { if t.s != nil { return t.s } diff --git a/web/web.go b/web/web.go index 246108e5b1..21624141ad 100644 --- a/web/web.go +++ b/web/web.go @@ -441,7 +441,7 @@ func New(logger *slog.Logger, o *Options) *Handler { router.Get("/consoles/*filepath", readyf(h.consoles)) - serveReactApp := func(w http.ResponseWriter, r *http.Request) { + serveReactApp := func(w http.ResponseWriter, _ *http.Request) { indexPath := reactAssetsRoot + "/index.html" f, err := ui.Assets.Open(indexPath) if err != nil { @@ -539,18 +539,18 @@ func New(logger *slog.Logger, o *Options) *Handler { router.Get("/debug/*subpath", serveDebug) router.Post("/debug/*subpath", serveDebug) - router.Get("/-/healthy", func(w http.ResponseWriter, r *http.Request) { + router.Get("/-/healthy", func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) fmt.Fprintf(w, "%s is Healthy.\n", o.AppName) }) router.Head("/-/healthy", func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) }) - router.Get("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) { + router.Get("/-/ready", readyf(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) fmt.Fprintf(w, "%s is Ready.\n", o.AppName) })) - router.Head("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) { + router.Head("/-/ready", readyf(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) })) @@ -896,7 +896,7 @@ func (h *Handler) consolesPath() string { } func setPathWithPrefix(prefix string) func(handlerName string, handler http.HandlerFunc) http.HandlerFunc { - return func(handlerName string, handler http.HandlerFunc) http.HandlerFunc { + return func(_ string, handler http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { handler(w, r.WithContext(httputil.ContextWithPath(r.Context(), prefix+r.URL.Path))) } From d5b18be9e2116de19520881eaa41449221078c6c Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Wed, 19 Feb 2025 20:30:39 +0100 Subject: [PATCH 1381/1397] ci: fix build-all status trigger to contain always Otherwise a failure in the dependent job skips the status job, which is treated as success. Signed-off-by: Jan Fajerski --- .github/workflows/ci.yml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 35a2651a0a..8b0857bf16 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -153,10 +153,21 @@ jobs: parallelism: 12 thread: ${{ matrix.thread }} build_all_status: + # This status check aggregates the individual matrix jobs of the "Build + # Prometheus for all architectures" step into a final status. Fails if a + # single matrix job fails, succeeds if all matrix jobs succeed. + # See https://github.com/orgs/community/discussions/4324 for why this is + # needed name: Report status of build Prometheus for all architectures runs-on: ubuntu-latest needs: [build_all] - if: github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-') + # The run condition needs to include always(). Otherwise actions + # behave unexpected: + # only "needs" will make the Status Report be skipped if one of the builds fails https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/using-jobs-in-a-workflow#defining-prerequisite-jobs + # And skipped is treated as success https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/collaborat[…]n-repositories-with-code-quality-features/about-status-checks + # Adding always ensures that the status check is run independently of the + # results of Build All + if: always() && github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-') steps: - name: Successful build if: ${{ !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled')) }} From 5868e36d91ed27a40561c47ac5482c0d2844dcf1 Mon Sep 17 00:00:00 2001 From: Matt Hughes Date: Thu, 20 Feb 2025 11:22:23 +0000 Subject: [PATCH 1382/1397] scraper: fix UTF-8 scraping header always sent with PrometheusText1.0.0 The `Accept` header should not include `escape=allow-utf-8` unless explicitly requested. Conveniently, there was already a test covering this header's value, it just required updating so it also asserts that this value in the header is not set in the cases we don't expect it to be set. I also converted those tests into table tests to help make failures clearer. Issue: https://github.com/prometheus/prometheus/issues/15857 Signed-off-by: Matt Hughes --- config/config.go | 2 +- scrape/scrape_test.go | 49 +++++++++++++++++++++++++++++++++++-------- 2 files changed, 41 insertions(+), 10 deletions(-) diff --git a/config/config.go b/config/config.go index 73282ac429..465affe082 100644 --- a/config/config.go +++ b/config/config.go @@ -523,7 +523,7 @@ var ( ScrapeProtocolsHeaders = map[ScrapeProtocol]string{ PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", PrometheusText0_0_4: "text/plain;version=0.0.4", - PrometheusText1_0_0: "text/plain;version=1.0.0;escaping=allow-utf-8", + PrometheusText1_0_0: "text/plain;version=1.0.0", OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1", OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0", } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index b30dbfa1b9..5d79650828 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -2889,6 +2889,8 @@ func TestTargetScraperScrapeOK(t *testing.T) { accept := r.Header.Get("Accept") if allowUTF8 { require.Containsf(t, accept, "escaping=allow-utf-8", "Expected Accept header to allow utf8, got %q", accept) + } else { + require.NotContainsf(t, accept, "escaping=allow-utf-8", "Expected Accept header to not allow utf8, got %q", accept) } if protobufParsing { require.True(t, strings.HasPrefix(accept, "application/vnd.google.protobuf;"), @@ -2924,7 +2926,7 @@ func TestTargetScraperScrapeOK(t *testing.T) { panic(err) } - runTest := func(acceptHeader string) { + runTest := func(t *testing.T, acceptHeader string) { ts := &targetScraper{ Target: &Target{ labels: labels.FromStrings( @@ -2951,14 +2953,43 @@ func TestTargetScraperScrapeOK(t *testing.T) { require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String()) } - runTest(acceptHeader(config.DefaultScrapeProtocols, model.LegacyValidation)) - protobufParsing = true - runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols, model.LegacyValidation)) - protobufParsing = false - allowUTF8 = true - runTest(acceptHeader(config.DefaultScrapeProtocols, model.UTF8Validation)) - protobufParsing = true - runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols, model.UTF8Validation)) + for _, tc := range []struct { + scrapeProtocols []config.ScrapeProtocol + scheme model.ValidationScheme + protobufParsing bool + allowUTF8 bool + }{ + { + scrapeProtocols: config.DefaultScrapeProtocols, + scheme: model.LegacyValidation, + protobufParsing: false, + allowUTF8: false, + }, + { + scrapeProtocols: config.DefaultProtoFirstScrapeProtocols, + scheme: model.LegacyValidation, + protobufParsing: true, + allowUTF8: false, + }, + { + scrapeProtocols: config.DefaultScrapeProtocols, + scheme: model.UTF8Validation, + protobufParsing: false, + allowUTF8: true, + }, + { + scrapeProtocols: config.DefaultProtoFirstScrapeProtocols, + scheme: model.UTF8Validation, + protobufParsing: true, + allowUTF8: true, + }, + } { + t.Run(fmt.Sprintf("%+v", tc), func(t *testing.T) { + protobufParsing = tc.protobufParsing + allowUTF8 = tc.allowUTF8 + runTest(t, acceptHeader(tc.scrapeProtocols, tc.scheme)) + }) + } } func TestTargetScrapeScrapeCancel(t *testing.T) { From 8c8b83ed4ea947cb407b2103605fbe68254573df Mon Sep 17 00:00:00 2001 From: Ian Kerins Date: Thu, 20 Feb 2025 17:14:33 -0500 Subject: [PATCH 1383/1397] doc: fix broken kuma.io link (#16028) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit doc: fix broken kuma.io link I'm not actually familiar with kuma, but I noticed this link was broken, and I believe the one I've found here is equivalent. Signed-off-by: Ian Kerins Signed-off-by: Björn Rabenstein Co-authored-by: Björn Rabenstein --- docs/configuration/configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index d2edae26bd..9b1514942a 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1915,7 +1915,7 @@ which automates the Prometheus setup on top of Kubernetes. Kuma SD configurations allow retrieving scrape target from the [Kuma](https://kuma.io) control plane. -This SD discovers "monitoring assignments" based on Kuma [Dataplane Proxies](https://kuma.io/docs/latest/documentation/dps-and-data-model), +This SD discovers "monitoring assignments" based on Kuma [Dataplane Proxies](https://kuma.io/docs/latest/production/dp-config/dpp/#data-plane-proxy), via the MADS v1 (Monitoring Assignment Discovery Service) xDS API, and will create a target for each proxy inside a Prometheus-enabled mesh. From 83569907099f43e2c1bf504ea5246be76144d722 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Fri, 21 Feb 2025 10:37:04 +1100 Subject: [PATCH 1384/1397] promql: fix double quoting in invalid label name error from `count_values` (#16054) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit promql: fix double quoting in invalid label name error from `count_values` --------- Signed-off-by: Charles Korn Signed-off-by: Charles Korn Co-authored-by: Björn Rabenstein --- promql/engine.go | 2 +- promql/promqltest/testdata/aggregators.test | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/promql/engine.go b/promql/engine.go index f1904ec4ba..6ce0ebcb29 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1604,7 +1604,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, if e.Op == parser.COUNT_VALUES { valueLabel := param.(*parser.StringLiteral) if !model.LabelName(valueLabel.Val).IsValid() { - ev.errorf("invalid label name %q", valueLabel) + ev.errorf("invalid label name %s", valueLabel) } if !e.Without { sortedGrouping = append(sortedGrouping, valueLabel.Val) diff --git a/promql/promqltest/testdata/aggregators.test b/promql/promqltest/testdata/aggregators.test index 7479d2c3d3..1e3ab79a35 100644 --- a/promql/promqltest/testdata/aggregators.test +++ b/promql/promqltest/testdata/aggregators.test @@ -430,6 +430,10 @@ eval instant at 1m count_values by (job, group)("job", version) {job="7", group="canary"} 2 {job="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}", group="canary"} 2 +# Test an invalid label value. +eval_fail instant at 0 count_values("a\xc5z", version) + expected_fail_message invalid label name "a\xc5z" + # Tests for quantile. clear From b63604667228e08d4f733319b9e5a9f498c055be Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Sat, 22 Feb 2025 21:31:24 +0100 Subject: [PATCH 1385/1397] Fix health color for target pools with single target that is down Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx b/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx index 1e29c568e1..80eb58540c 100644 --- a/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx +++ b/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx @@ -54,7 +54,7 @@ type ScrapePools = { }; const poolPanelHealthClass = (pool: ScrapePool) => - pool.count > 1 && pool.downCount === pool.count + pool.count > 0 && pool.downCount === pool.count ? panelClasses.panelHealthErr : pool.downCount >= 1 ? panelClasses.panelHealthWarn From 528b3fc60fc3e06d27c02a59451a3ec969cd4a02 Mon Sep 17 00:00:00 2001 From: Matt Hughes Date: Thu, 20 Feb 2025 11:22:23 +0000 Subject: [PATCH 1386/1397] scraper: fix UTF-8 scraping header always sent with PrometheusText1.0.0 The `Accept` header should not include `escape=allow-utf-8` unless explicitly requested. Conveniently, there was already a test covering this header's value, it just required updating so it also asserts that this value in the header is not set in the cases we don't expect it to be set. I also converted those tests into table tests to help make failures clearer. Issue: https://github.com/prometheus/prometheus/issues/15857 Signed-off-by: Matt Hughes --- config/config.go | 2 +- scrape/scrape_test.go | 49 +++++++++++++++++++++++++++++++++++-------- 2 files changed, 41 insertions(+), 10 deletions(-) diff --git a/config/config.go b/config/config.go index 73282ac429..465affe082 100644 --- a/config/config.go +++ b/config/config.go @@ -523,7 +523,7 @@ var ( ScrapeProtocolsHeaders = map[ScrapeProtocol]string{ PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", PrometheusText0_0_4: "text/plain;version=0.0.4", - PrometheusText1_0_0: "text/plain;version=1.0.0;escaping=allow-utf-8", + PrometheusText1_0_0: "text/plain;version=1.0.0", OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1", OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0", } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 26117ffd08..75fed524c4 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -2825,6 +2825,8 @@ func TestTargetScraperScrapeOK(t *testing.T) { accept := r.Header.Get("Accept") if allowUTF8 { require.Containsf(t, accept, "escaping=allow-utf-8", "Expected Accept header to allow utf8, got %q", accept) + } else { + require.NotContainsf(t, accept, "escaping=allow-utf-8", "Expected Accept header to not allow utf8, got %q", accept) } if protobufParsing { require.True(t, strings.HasPrefix(accept, "application/vnd.google.protobuf;"), @@ -2860,7 +2862,7 @@ func TestTargetScraperScrapeOK(t *testing.T) { panic(err) } - runTest := func(acceptHeader string) { + runTest := func(t *testing.T, acceptHeader string) { ts := &targetScraper{ Target: &Target{ labels: labels.FromStrings( @@ -2887,14 +2889,43 @@ func TestTargetScraperScrapeOK(t *testing.T) { require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String()) } - runTest(acceptHeader(config.DefaultScrapeProtocols, model.LegacyValidation)) - protobufParsing = true - runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols, model.LegacyValidation)) - protobufParsing = false - allowUTF8 = true - runTest(acceptHeader(config.DefaultScrapeProtocols, model.UTF8Validation)) - protobufParsing = true - runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols, model.UTF8Validation)) + for _, tc := range []struct { + scrapeProtocols []config.ScrapeProtocol + scheme model.ValidationScheme + protobufParsing bool + allowUTF8 bool + }{ + { + scrapeProtocols: config.DefaultScrapeProtocols, + scheme: model.LegacyValidation, + protobufParsing: false, + allowUTF8: false, + }, + { + scrapeProtocols: config.DefaultProtoFirstScrapeProtocols, + scheme: model.LegacyValidation, + protobufParsing: true, + allowUTF8: false, + }, + { + scrapeProtocols: config.DefaultScrapeProtocols, + scheme: model.UTF8Validation, + protobufParsing: false, + allowUTF8: true, + }, + { + scrapeProtocols: config.DefaultProtoFirstScrapeProtocols, + scheme: model.UTF8Validation, + protobufParsing: true, + allowUTF8: true, + }, + } { + t.Run(fmt.Sprintf("%+v", tc), func(t *testing.T) { + protobufParsing = tc.protobufParsing + allowUTF8 = tc.allowUTF8 + runTest(t, acceptHeader(tc.scrapeProtocols, tc.scheme)) + }) + } } func TestTargetScrapeScrapeCancel(t *testing.T) { From 6ec89f14c782dec97ef0179e284b569f4cfff70d Mon Sep 17 00:00:00 2001 From: boforetech Date: Mon, 24 Feb 2025 13:25:57 +0100 Subject: [PATCH 1387/1397] refactor: use a more straightforward return value (#16070) Signed-off-by: beforetech --- scrape/helpers_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go index f8e74aa48d..fcef695385 100644 --- a/scrape/helpers_test.go +++ b/scrape/helpers_test.go @@ -159,7 +159,7 @@ func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels if err != nil { return 0, err } - return ref, err + return ref, nil } func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { From 804c49d58f3f3784c77c9c8ec17c9062092cae27 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Tue, 25 Feb 2025 20:09:49 +0100 Subject: [PATCH 1388/1397] Prepare release 3.2.1 Signed-off-by: Jan Fajerski --- CHANGELOG.md | 3 +++ VERSION | 2 +- web/ui/mantine-ui/package.json | 4 ++-- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/package.json | 2 +- 7 files changed, 17 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b5cb893bab..89d540eaf2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,9 @@ ## unreleased +## 3.2.1 / 2025-02-25 + +* [BUGFIX] Don't send Accept` header `escape=allow-utf-8` when `metric_name_validation_scheme: legacy` is configured. #16061 ## 3.2.0 / 2025-02-17 diff --git a/VERSION b/VERSION index 944880fa15..e4604e3afd 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.2.0 +3.2.1 diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json index 636f827c8a..aea0edf751 100644 --- a/web/ui/mantine-ui/package.json +++ b/web/ui/mantine-ui/package.json @@ -1,7 +1,7 @@ { "name": "@prometheus-io/mantine-ui", "private": true, - "version": "0.302.0", + "version": "0.302.1", "type": "module", "scripts": { "start": "vite", @@ -28,7 +28,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.302.0", + "@prometheus-io/codemirror-promql": "0.302.1", "@reduxjs/toolkit": "^2.5.0", "@tabler/icons-react": "^3.28.1", "@tanstack/react-query": "^5.62.11", diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 457f644905..ccc9da7471 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.302.0", + "version": "0.302.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.302.0", + "@prometheus-io/lezer-promql": "0.302.1", "lru-cache": "^11.0.2" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index a358151da1..b4f1a793a6 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.302.0", + "version": "0.302.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 1fffce00e7..8ec10d35c9 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.302.0", + "version": "0.302.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.302.0", + "version": "0.302.1", "workspaces": [ "mantine-ui", "module/*" @@ -24,7 +24,7 @@ }, "mantine-ui": { "name": "@prometheus-io/mantine-ui", - "version": "0.302.0", + "version": "0.302.1", "dependencies": { "@codemirror/autocomplete": "^6.18.4", "@codemirror/language": "^6.10.8", @@ -42,7 +42,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@nexucis/fuzzy": "^0.5.1", "@nexucis/kvsearch": "^0.9.1", - "@prometheus-io/codemirror-promql": "0.302.0", + "@prometheus-io/codemirror-promql": "0.302.1", "@reduxjs/toolkit": "^2.5.0", "@tabler/icons-react": "^3.28.1", "@tanstack/react-query": "^5.62.11", @@ -146,10 +146,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.302.0", + "version": "0.302.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.302.0", + "@prometheus-io/lezer-promql": "0.302.1", "lru-cache": "^11.0.2" }, "devDependencies": { @@ -179,7 +179,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.302.0", + "version": "0.302.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.2", diff --git a/web/ui/package.json b/web/ui/package.json index 4af8447049..3e58725f57 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,7 +1,7 @@ { "name": "prometheus-io", "description": "Monorepo for the Prometheus UI", - "version": "0.302.0", + "version": "0.302.1", "private": true, "scripts": { "build": "bash build_ui.sh --all", From 78599d0ec61f49f6d2bdd7db8df5c0c917e2a405 Mon Sep 17 00:00:00 2001 From: Justin Cichra Date: Wed, 26 Feb 2025 09:05:40 -0500 Subject: [PATCH 1389/1397] notifier: Consider alert relabeling when deciding if alerts are dropped When intentionally dropping all alerts in a batch, `SendAll` returns false, increasing the dropped_total metric. This makes it difficult to tell if there is a connection issue between Prometheus and AlertManager or a result of intentionally dropping alerts. Have `SendAll` return `true` when no batches were sent by keeping track of the number of AlertManager request attempts. If no attempts were made, then the send is successful. Fixes: #15978 Signed-off-by: Justin Cichra --- notifier/notifier.go | 32 +++++++++++++++------ notifier/notifier_test.go | 60 +++++++++++++++++++++++++++++++++------ 2 files changed, 74 insertions(+), 18 deletions(-) diff --git a/notifier/notifier.go b/notifier/notifier.go index fbc37c29ef..153c1039f8 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -36,7 +36,6 @@ import ( "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/sigv4" - "go.uber.org/atomic" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" @@ -552,10 +551,10 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { n.mtx.RUnlock() var ( - wg sync.WaitGroup - numSuccess atomic.Uint64 + wg sync.WaitGroup + amSetCovered sync.Map ) - for _, ams := range amSets { + for k, ams := range amSets { var ( payload []byte err error @@ -611,24 +610,28 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { cachedPayload = nil } + // Being here means len(ams.ams) > 0 + amSetCovered.Store(k, false) for _, am := range ams.ams { wg.Add(1) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ams.cfg.Timeout)) defer cancel() - go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) { - if err := n.sendOne(ctx, client, url, payload); err != nil { + go func(ctx context.Context, k string, client *http.Client, url string, payload []byte, count int) { + err := n.sendOne(ctx, client, url, payload) + if err != nil { n.logger.Error("Error sending alerts", "alertmanager", url, "count", count, "err", err) n.metrics.errors.WithLabelValues(url).Add(float64(count)) } else { - numSuccess.Inc() + amSetCovered.CompareAndSwap(k, false, true) } + n.metrics.latency.WithLabelValues(url).Observe(time.Since(begin).Seconds()) n.metrics.sent.WithLabelValues(url).Add(float64(count)) wg.Done() - }(ctx, ams.client, am.url().String(), payload, len(amAlerts)) + }(ctx, k, ams.client, am.url().String(), payload, len(amAlerts)) } ams.mtx.RUnlock() @@ -636,7 +639,18 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { wg.Wait() - return numSuccess.Load() > 0 + // Return false if there are any sets which were attempted (e.g. not filtered + // out) but have no successes. + allAmSetsCovered := true + amSetCovered.Range(func(_, value any) bool { + if !value.(bool) { + allAmSetsCovered = false + return false + } + return true + }) + + return allAmSetsCovered } func alertsToOpenAPIAlerts(alerts []*Alert) models.PostableAlerts { diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index 97b0274f29..5565aada72 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -140,17 +140,20 @@ func newTestHTTPServerBuilder(expected *[]*Alert, errc chan<- error, u, p string func TestHandlerSendAll(t *testing.T) { var ( - errc = make(chan error, 1) - expected = make([]*Alert, 0, maxBatchSize) - status1, status2 atomic.Int32 + errc = make(chan error, 1) + expected = make([]*Alert, 0, maxBatchSize) + status1, status2, status3 atomic.Int32 ) status1.Store(int32(http.StatusOK)) status2.Store(int32(http.StatusOK)) + status3.Store(int32(http.StatusOK)) server1 := newTestHTTPServerBuilder(&expected, errc, "prometheus", "testing_password", &status1) server2 := newTestHTTPServerBuilder(&expected, errc, "", "", &status2) + server3 := newTestHTTPServerBuilder(&expected, errc, "", "", &status3) defer server1.Close() defer server2.Close() + defer server3.Close() h := NewManager(&Options{}, nil) @@ -170,6 +173,9 @@ func TestHandlerSendAll(t *testing.T) { am2Cfg := config.DefaultAlertmanagerConfig am2Cfg.Timeout = model.Duration(time.Second) + am3Cfg := config.DefaultAlertmanagerConfig + am3Cfg.Timeout = model.Duration(time.Second) + h.alertmanagers["1"] = &alertmanagerSet{ ams: []alertmanager{ alertmanagerMock{ @@ -185,10 +191,18 @@ func TestHandlerSendAll(t *testing.T) { alertmanagerMock{ urlf: func() string { return server2.URL }, }, + alertmanagerMock{ + urlf: func() string { return server3.URL }, + }, }, cfg: &am2Cfg, } + h.alertmanagers["3"] = &alertmanagerSet{ + ams: []alertmanager{}, // empty set + cfg: &am3Cfg, + } + for i := range make([]struct{}, maxBatchSize) { h.queue = append(h.queue, &Alert{ Labels: labels.FromStrings("alertname", strconv.Itoa(i)), @@ -207,14 +221,25 @@ func TestHandlerSendAll(t *testing.T) { } } + // all ams in all sets are up require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly") checkNoErr() + // the only am in set 1 is down status1.Store(int32(http.StatusNotFound)) - require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly") + require.False(t, h.sendAll(h.queue...), "all sends failed unexpectedly") checkNoErr() + // reset it + status1.Store(int32(http.StatusOK)) + + // only one of the ams in set 2 is down status2.Store(int32(http.StatusInternalServerError)) + require.True(t, h.sendAll(h.queue...), "all sends succeeded unexpectedly") + checkNoErr() + + // both ams in set 2 are down + status3.Store(int32(http.StatusInternalServerError)) require.False(t, h.sendAll(h.queue...), "all sends succeeded unexpectedly") checkNoErr() } @@ -226,13 +251,15 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) { expected2 = make([]*Alert, 0, maxBatchSize) expected3 = make([]*Alert, 0) - statusOK atomic.Int32 + status1, status2, status3 atomic.Int32 ) - statusOK.Store(int32(http.StatusOK)) + status1.Store(int32(http.StatusOK)) + status2.Store(int32(http.StatusOK)) + status3.Store(int32(http.StatusOK)) - server1 := newTestHTTPServerBuilder(&expected1, errc, "", "", &statusOK) - server2 := newTestHTTPServerBuilder(&expected2, errc, "", "", &statusOK) - server3 := newTestHTTPServerBuilder(&expected3, errc, "", "", &statusOK) + server1 := newTestHTTPServerBuilder(&expected1, errc, "", "", &status1) + server2 := newTestHTTPServerBuilder(&expected2, errc, "", "", &status2) + server3 := newTestHTTPServerBuilder(&expected3, errc, "", "", &status3) defer server1.Close() defer server2.Close() @@ -331,6 +358,21 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) { } } + // all ams are up + require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly") + checkNoErr() + + // the only am in set 1 goes down + status1.Store(int32(http.StatusInternalServerError)) + require.False(t, h.sendAll(h.queue...), "all sends failed unexpectedly") + checkNoErr() + + // reset set 1 + status1.Store(int32(http.StatusOK)) + + // set 3 loses its only am, but all alerts were dropped + // so there was nothing to send, keeping sendAll true + status3.Store(int32(http.StatusInternalServerError)) require.True(t, h.sendAll(h.queue...), "all sends failed unexpectedly") checkNoErr() From efa4f4a60bee54d5263c24221bbbc4cddf0a5d6a Mon Sep 17 00:00:00 2001 From: Sebastian Schubert Date: Wed, 26 Feb 2025 17:57:44 +0100 Subject: [PATCH 1390/1397] doc: clarified behaviour of source_labels in relabel_config Signed-off-by: Sebastian Schubert --- docs/configuration/configuration.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 9b1514942a..3550094ff2 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -2551,7 +2551,8 @@ input to a subsequent relabeling step), use the `__tmp` label name prefix. This prefix is guaranteed to never be used by Prometheus itself. ```yaml -# The source labels select values from existing labels. Their content is concatenated +# The source_labels tells the rule what labels to fetch from the series. Any +# labels which do not exist get a blank value (""). Their content is concatenated # using the configured separator and matched against the configured regular expression # for the replace, keep, and drop actions. [ source_labels: '[' [, ...] ']' ] From 4b01140207d656b60e9e59920db8b04e83774f1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Rabenstein?= Date: Wed, 26 Feb 2025 18:20:42 +0100 Subject: [PATCH 1391/1397] doc: Update nodejs and npm requirements for build (#16082) Signed-off-by: beorn7 --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 658cee4640..a027328e20 100644 --- a/README.md +++ b/README.md @@ -68,8 +68,8 @@ Prometheus will now be reachable at . To build Prometheus from source code, You need: * Go [version 1.22 or greater](https://golang.org/doc/install). -* NodeJS [version 16 or greater](https://nodejs.org/). -* npm [version 7 or greater](https://www.npmjs.com/). +* NodeJS [version 22 or greater](https://nodejs.org/). +* npm [version 8 or greater](https://www.npmjs.com/). Start by cloning the repository: From 2fb6697eeaf198960d7b6cae48c8b91c218e2497 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Thu, 27 Feb 2025 10:02:23 +0100 Subject: [PATCH 1392/1397] Targets API: Remove superfluous if() and make variable names more intuitive Signed-off-by: Julius Volz --- web/api/v1/api.go | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 1b8798feb8..36ddcaa8b8 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1105,15 +1105,15 @@ func (api *API) scrapePools(r *http.Request) apiFuncResult { } func (api *API) targets(r *http.Request) apiFuncResult { - sortKeys := func(targets map[string][]*scrape.Target) ([]string, int) { + getSortedPools := func(targets map[string][]*scrape.Target) ([]string, int) { var n int - keys := make([]string, 0, len(targets)) - for k := range targets { - keys = append(keys, k) - n += len(targets[k]) + pools := make([]string, 0, len(targets)) + for p := range targets { + pools = append(pools, p) + n += len(targets[p]) } - slices.Sort(keys) - return keys, n + slices.Sort(pools) + return pools, n } scrapePool := r.URL.Query().Get("scrapePool") @@ -1125,14 +1125,14 @@ func (api *API) targets(r *http.Request) apiFuncResult { if showActive { targetsActive := api.targetRetriever(r.Context()).TargetsActive() - activeKeys, numTargets := sortKeys(targetsActive) + activePools, numTargets := getSortedPools(targetsActive) res.ActiveTargets = make([]*Target, 0, numTargets) - for _, key := range activeKeys { - if scrapePool != "" && key != scrapePool { + for _, p := range activePools { + if scrapePool != "" && p != scrapePool { continue } - for _, target := range targetsActive[key] { + for _, target := range targetsActive[p] { lastErrStr := "" lastErr := target.LastError() if lastErr != nil { @@ -1144,7 +1144,7 @@ func (api *API) targets(r *http.Request) apiFuncResult { res.ActiveTargets = append(res.ActiveTargets, &Target{ DiscoveredLabels: target.DiscoveredLabels(builder), Labels: target.Labels(builder), - ScrapePool: key, + ScrapePool: p, ScrapeURL: target.URL().String(), GlobalURL: globalURL.String(), LastError: func() string { @@ -1170,16 +1170,15 @@ func (api *API) targets(r *http.Request) apiFuncResult { } if showDropped { res.DroppedTargetCounts = api.targetRetriever(r.Context()).TargetsDroppedCounts() - } - if showDropped { + targetsDropped := api.targetRetriever(r.Context()).TargetsDropped() - droppedKeys, numTargets := sortKeys(targetsDropped) + droppedPools, numTargets := getSortedPools(targetsDropped) res.DroppedTargets = make([]*DroppedTarget, 0, numTargets) - for _, key := range droppedKeys { - if scrapePool != "" && key != scrapePool { + for _, p := range droppedPools { + if scrapePool != "" && p != scrapePool { continue } - for _, target := range targetsDropped[key] { + for _, target := range targetsDropped[p] { res.DroppedTargets = append(res.DroppedTargets, &DroppedTarget{ DiscoveredLabels: target.DiscoveredLabels(builder), }) From 5efaa84be1ea0ece30fddf0bc10cf4eae69bb50e Mon Sep 17 00:00:00 2001 From: Danial Eskandari <52372842+eskandaridanial@users.noreply.github.com> Date: Thu, 27 Feb 2025 13:55:52 +0330 Subject: [PATCH 1393/1397] config: set gogc default value when config body is empty (#16052) * fix: set gogc default value when config body is empty Signed-off-by: Danial Eskandari * refactor: explicitly check value 75 in `TestGoGCDefaultValueOnEmptyConfigBody` add GoGC assertion in `TestEmptyConfig`, also removed the no longer needed runtime config assignment in `TestEmptyGlobalBlock` Signed-off-by: Danial Eskandari * refactor: remove `TestGoGCDefaultValueOnEmptyConfigBody` to reduce duplicate assertions Signed-off-by: Danial Eskandari --------- Signed-off-by: Danial Eskandari --- config/config.go | 1 + config/config_test.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/config/config.go b/config/config.go index 465affe082..a395c3f53e 100644 --- a/config/config.go +++ b/config/config.go @@ -156,6 +156,7 @@ var ( // DefaultConfig is the default top-level configuration. DefaultConfig = Config{ GlobalConfig: DefaultGlobalConfig, + Runtime: DefaultRuntimeConfig, } // DefaultGlobalConfig is the default global configuration. diff --git a/config/config_test.go b/config/config_test.go index 437b858b00..faca7dda12 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -2222,6 +2222,7 @@ func TestEmptyConfig(t *testing.T) { exp := DefaultConfig exp.loaded = true require.Equal(t, exp, *c) + require.Equal(t, 75, c.Runtime.GoGC) } func TestExpandExternalLabels(t *testing.T) { @@ -2269,7 +2270,6 @@ func TestEmptyGlobalBlock(t *testing.T) { c, err := Load("global:\n", promslog.NewNopLogger()) require.NoError(t, err) exp := DefaultConfig - exp.Runtime = DefaultRuntimeConfig exp.loaded = true require.Equal(t, exp, *c) } From 911e305bc94812d55643c9b9c267b2c73c7cdf98 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Thu, 27 Feb 2025 13:06:22 +0100 Subject: [PATCH 1394/1397] Address review comments Signed-off-by: Julius Volz --- web/api/v1/api.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 36ddcaa8b8..519433a78d 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1108,9 +1108,9 @@ func (api *API) targets(r *http.Request) apiFuncResult { getSortedPools := func(targets map[string][]*scrape.Target) ([]string, int) { var n int pools := make([]string, 0, len(targets)) - for p := range targets { + for p, t := range targets { pools = append(pools, p) - n += len(targets[p]) + n += len(t) } slices.Sort(pools) return pools, n @@ -1128,11 +1128,11 @@ func (api *API) targets(r *http.Request) apiFuncResult { activePools, numTargets := getSortedPools(targetsActive) res.ActiveTargets = make([]*Target, 0, numTargets) - for _, p := range activePools { - if scrapePool != "" && p != scrapePool { + for _, pool := range activePools { + if scrapePool != "" && pool != scrapePool { continue } - for _, target := range targetsActive[p] { + for _, target := range targetsActive[pool] { lastErrStr := "" lastErr := target.LastError() if lastErr != nil { @@ -1144,7 +1144,7 @@ func (api *API) targets(r *http.Request) apiFuncResult { res.ActiveTargets = append(res.ActiveTargets, &Target{ DiscoveredLabels: target.DiscoveredLabels(builder), Labels: target.Labels(builder), - ScrapePool: p, + ScrapePool: pool, ScrapeURL: target.URL().String(), GlobalURL: globalURL.String(), LastError: func() string { @@ -1174,11 +1174,11 @@ func (api *API) targets(r *http.Request) apiFuncResult { targetsDropped := api.targetRetriever(r.Context()).TargetsDropped() droppedPools, numTargets := getSortedPools(targetsDropped) res.DroppedTargets = make([]*DroppedTarget, 0, numTargets) - for _, p := range droppedPools { - if scrapePool != "" && p != scrapePool { + for _, pool := range droppedPools { + if scrapePool != "" && pool != scrapePool { continue } - for _, target := range targetsDropped[p] { + for _, target := range targetsDropped[pool] { res.DroppedTargets = append(res.DroppedTargets, &DroppedTarget{ DiscoveredLabels: target.DiscoveredLabels(builder), }) From 4be2243f063c7545c2debdc0323c907e2911aec0 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Thu, 27 Feb 2025 10:06:11 +0100 Subject: [PATCH 1395/1397] Include scrape pool name for dropped targets in /api/v1/targets This is needed so we can display dropped targets in the right scrape pool in the web interface even if they have a non-default job name. See also https://github.com/prometheus/prometheus/pull/16078 Fixes https://github.com/prometheus/prometheus/issues/16065 Signed-off-by: Julius Volz --- docs/querying/api.md | 1 + web/api/v1/api.go | 2 ++ web/api/v1/api_test.go | 3 +++ web/ui/mantine-ui/src/api/responseTypes/targets.ts | 1 + .../pages/service-discovery/ServiceDiscoveryPoolsList.tsx | 5 ++--- 5 files changed, 9 insertions(+), 3 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index 1366dc02c2..10f963f209 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -692,6 +692,7 @@ $ curl http://localhost:9090/api/v1/targets "__scrape_timeout__": "10s", "job": "node" }, + "scrapePool": "node" } ] } diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 519433a78d..82aff1c940 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1021,6 +1021,7 @@ type ScrapePoolsDiscovery struct { type DroppedTarget struct { // Labels before any processing. DiscoveredLabels labels.Labels `json:"discoveredLabels"` + ScrapePool string `json:"scrapePool"` } // TargetDiscovery has all the active targets. @@ -1181,6 +1182,7 @@ func (api *API) targets(r *http.Request) apiFuncResult { for _, target := range targetsDropped[pool] { res.DroppedTargets = append(res.DroppedTargets, &DroppedTarget{ DiscoveredLabels: target.DiscoveredLabels(builder), + ScrapePool: pool, }) } } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 1ef22b40a1..98c0e741a7 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -1767,6 +1767,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "__scrape_interval__", "30s", "__scrape_timeout__", "15s", ), + ScrapePool: "blackbox", }, }, DroppedTargetCounts: map[string]int{"blackbox": 1}, @@ -1816,6 +1817,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "__scrape_interval__", "30s", "__scrape_timeout__", "15s", ), + ScrapePool: "blackbox", }, }, DroppedTargetCounts: map[string]int{"blackbox": 1}, @@ -1875,6 +1877,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "__scrape_interval__", "30s", "__scrape_timeout__", "15s", ), + ScrapePool: "blackbox", }, }, DroppedTargetCounts: map[string]int{"blackbox": 1}, diff --git a/web/ui/mantine-ui/src/api/responseTypes/targets.ts b/web/ui/mantine-ui/src/api/responseTypes/targets.ts index cc0e891f05..c054247284 100644 --- a/web/ui/mantine-ui/src/api/responseTypes/targets.ts +++ b/web/ui/mantine-ui/src/api/responseTypes/targets.ts @@ -18,6 +18,7 @@ export type Target = { export interface DroppedTarget { discoveredLabels: Labels; + scrapePool: string; } // Result type for /api/v1/targets endpoint. diff --git a/web/ui/mantine-ui/src/pages/service-discovery/ServiceDiscoveryPoolsList.tsx b/web/ui/mantine-ui/src/pages/service-discovery/ServiceDiscoveryPoolsList.tsx index 798783da2f..6aec62fcc5 100644 --- a/web/ui/mantine-ui/src/pages/service-discovery/ServiceDiscoveryPoolsList.tsx +++ b/web/ui/mantine-ui/src/pages/service-discovery/ServiceDiscoveryPoolsList.tsx @@ -109,8 +109,7 @@ const buildPoolsData = ( } for (const target of droppedTargets) { - const { job: poolName } = target.discoveredLabels; - const pool = pools[poolName]; + const pool = pools[target.scrapePool]; if (!pool) { // TODO: Should we do better here? throw new Error( @@ -131,7 +130,7 @@ const buildPoolsData = ( .map((value) => value.original); for (const target of filteredDroppedTargets) { - pools[target.discoveredLabels.job].targets.push({ + pools[target.scrapePool].targets.push({ discoveredLabels: target.discoveredLabels, isDropped: true, labels: {}, From 0e4e5a71bd006edf56362b903bfdf480edbf2f26 Mon Sep 17 00:00:00 2001 From: co63oc Date: Fri, 28 Feb 2025 08:24:25 +0800 Subject: [PATCH 1396/1397] Fix typos (#16076) Signed-off-by: co63oc --- cmd/promtool/main_test.go | 4 ++-- ...n.nonexistant => bad-sd-file-extension.nonexistent} | 0 discovery/aws/ec2_test.go | 4 ++-- docs/configuration/alerting_rules.md | 2 +- docs/migration.md | 4 ++-- docs/querying/api.md | 2 +- promql/promqltest/testdata/functions.test | 10 +++++----- storage/remote/write_handler.go | 2 +- 8 files changed, 14 insertions(+), 14 deletions(-) rename cmd/promtool/testdata/{bad-sd-file-extension.nonexistant => bad-sd-file-extension.nonexistent} (100%) diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 48bed9a2df..5d36a66fcd 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -136,8 +136,8 @@ func TestCheckSDFile(t *testing.T) { }, { name: "bad file extension", - file: "./testdata/bad-sd-file-extension.nonexistant", - err: "invalid file extension: \".nonexistant\"", + file: "./testdata/bad-sd-file-extension.nonexistent", + err: "invalid file extension: \".nonexistent\"", }, { name: "bad format", diff --git a/cmd/promtool/testdata/bad-sd-file-extension.nonexistant b/cmd/promtool/testdata/bad-sd-file-extension.nonexistent similarity index 100% rename from cmd/promtool/testdata/bad-sd-file-extension.nonexistant rename to cmd/promtool/testdata/bad-sd-file-extension.nonexistent diff --git a/discovery/aws/ec2_test.go b/discovery/aws/ec2_test.go index 888816b4e2..2955e0e02e 100644 --- a/discovery/aws/ec2_test.go +++ b/discovery/aws/ec2_test.go @@ -217,7 +217,7 @@ func TestEC2DiscoveryRefresh(t *testing.T) { State: &ec2.InstanceState{Name: strptr("running")}, SubnetId: strptr("azid-3"), VpcId: strptr("vpc-ipv4"), - // network intefaces + // network interfaces NetworkInterfaces: []*ec2.InstanceNetworkInterface{ // interface without subnet -> should be ignored { @@ -285,7 +285,7 @@ func TestEC2DiscoveryRefresh(t *testing.T) { State: &ec2.InstanceState{Name: strptr("running")}, SubnetId: strptr("azid-2"), VpcId: strptr("vpc-ipv6"), - // network intefaces + // network interfaces NetworkInterfaces: []*ec2.InstanceNetworkInterface{ // interface without primary IPv6, index 2 { diff --git a/docs/configuration/alerting_rules.md b/docs/configuration/alerting_rules.md index cd33dba8e3..0a442876c3 100644 --- a/docs/configuration/alerting_rules.md +++ b/docs/configuration/alerting_rules.md @@ -46,7 +46,7 @@ this alert firing for the specified duration after the firing condition was last This can be used to prevent situations such as flapping alerts, false resolutions due to lack of data loss, etc. Alerting rules without the `keep_firing_for` clause will deactivate on the first evaluation where the condition is not met (assuming -any optional `for` duration desribed above has been satisfied). +any optional `for` duration described above has been satisfied). The `labels` clause allows specifying a set of additional labels to be attached to the alert. Any existing conflicting labels will be overwritten. The label diff --git a/docs/migration.md b/docs/migration.md index 34dae93e85..44cd466b3d 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -26,7 +26,7 @@ This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0 - Prometheus v3 will no longer add ports to scrape targets according to the specified scheme. Target will now appear in labels as configured. - If you rely on scrape targets like - `https://example.com/metrics` or `http://exmaple.com/metrics` to be + `https://example.com/metrics` or `http://example.com/metrics` to be represented as `https://example.com/metrics:443` and `http://example.com/metrics:80` respectively, add them to your target URLs - `agent` @@ -139,7 +139,7 @@ may now fail if this fallback protocol is not specified. The TSDB format has been changed slightly in Prometheus v2.55 in preparation for changes to the index format. Consequently, a Prometheus v3 TSDB can only be read by a Prometheus v2.55 or newer. Keep that in mind when upgrading to v3 -- you will be only -able to downgrade to v2.55, not lower, without losing your TSDB persitent data. +able to downgrade to v2.55, not lower, without losing your TSDB persistent data. As an extra safety measure, you could optionally consider upgrading to v2.55 first and confirm Prometheus works as expected, before upgrading to v3. diff --git a/docs/querying/api.md b/docs/querying/api.md index 10f963f209..033a2dfcf5 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -45,7 +45,7 @@ The JSON response envelope format is as follows: // Only set if there were warnings while executing the request. // There will still be data in the data field. "warnings": [""], - // Only set if there were info-level annnotations while executing the request. + // Only set if there were info-level annotations while executing the request. "infos": [""] } ``` diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 49ab6c50ff..04d844bef9 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -1390,10 +1390,10 @@ clear eval instant at 50m absent(sum(nonexistent{job="testjob", instance="testinstance"})) {} 1 -eval instant at 50m absent(max(nonexistant)) +eval instant at 50m absent(max(nonexistent)) {} 1 -eval instant at 50m absent(nonexistant > 1) +eval instant at 50m absent(nonexistent > 1) {} 1 eval instant at 50m absent(a + b) @@ -1402,7 +1402,7 @@ eval instant at 50m absent(a + b) eval instant at 50m absent(a and b) {} 1 -eval instant at 50m absent(rate(nonexistant[5m])) +eval instant at 50m absent(rate(nonexistent[5m])) {} 1 clear @@ -1420,7 +1420,7 @@ eval instant at 1m absent_over_time(http_requests_total{handler!="/foo"}[5m]) eval instant at 1m absent_over_time(http_requests_total{handler="/foo", handler="/bar", handler="/foobar"}[5m]) {} 1 -eval instant at 1m absent_over_time(rate(nonexistant[5m])[5m:]) +eval instant at 1m absent_over_time(rate(nonexistent[5m])[5m:]) {} 1 eval instant at 1m absent_over_time(http_requests_total{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) @@ -1497,7 +1497,7 @@ eval instant at 1m present_over_time(http_requests_total{handler!="/foo"}[5m]) eval instant at 1m present_over_time(http_requests_total{handler="/foo", handler="/bar", handler="/foobar"}[5m]) -eval instant at 1m present_over_time(rate(nonexistant[5m])[5m:]) +eval instant at 1m present_over_time(rate(nonexistent[5m])[5m:]) eval instant at 1m present_over_time(http_requests_total{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m]) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 073ffed2e5..21e3693e50 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -513,7 +513,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * } // handleHistogramZeroSample appends CT as a zero-value sample with CT value as the sample timestamp. -// It doens't return errors in case of out of order CT. +// It doesn't return errors in case of out of order CT. func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, hist writev2.Histogram, ct int64) (storage.SeriesRef, error) { var err error if hist.IsFloatHistogram() { From 2ef8706c275bf06dcf482605abcd4def9e2d3af5 Mon Sep 17 00:00:00 2001 From: Giuliano Panzironi <45801466+giulianopz@users.noreply.github.com> Date: Mon, 3 Mar 2025 10:03:45 +0100 Subject: [PATCH 1397/1397] fix: format float to avoid overflow (#16083) Signed-off-by: giulianopz --- promql/parser/printer.go | 2 +- promql/parser/printer_test.go | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/promql/parser/printer.go b/promql/parser/printer.go index afe755e7dd..6f234a0290 100644 --- a/promql/parser/printer.go +++ b/promql/parser/printer.go @@ -232,7 +232,7 @@ func (node *SubqueryExpr) getSubqueryTimeSuffix() string { } func (node *NumberLiteral) String() string { - return fmt.Sprint(node.Val) + return strconv.FormatFloat(node.Val, 'f', -1, 64) } func (node *ParenExpr) String() string { diff --git a/promql/parser/printer_test.go b/promql/parser/printer_test.go index 0a557ad597..ce06d6ec4b 100644 --- a/promql/parser/printer_test.go +++ b/promql/parser/printer_test.go @@ -164,6 +164,10 @@ func TestExprString(t *testing.T) { in: "{``=\"0\"}", out: `{""="0"}`, }, + { + in: "1048576", + out: "1048576", + }, } model.NameValidationScheme = model.UTF8Validation