diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml index 3f557a0895..e7e813e3b6 100644 --- a/.github/workflows/lock.yml +++ b/.github/workflows/lock.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest if: github.repository_owner == 'prometheus' steps: - - uses: dessant/lock-threads@be8aa5be94131386884a6da4189effda9b14aa21 # v4.0.1 + - uses: dessant/lock-threads@1bf7ec25051fe7c00bdd17e6a7cf3d7bfb7dc771 # v5.0.1 with: process-only: 'issues' issue-inactive-days: '180' diff --git a/.golangci.yml b/.golangci.yml index 4df572c198..166b2e0d48 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -23,6 +23,7 @@ linters: - nolintlint - predeclared - revive + - testifylint - unconvert - unused @@ -35,13 +36,9 @@ issues: - path: _test.go linters: - errcheck - - path: tsdb/ + - path: "tsdb/head_wal.go" linters: - errorlint - - path: tsdb/ - text: "import 'github.com/pkg/errors' is not allowed" - linters: - - depguard - linters: - godot source: "^// ===" @@ -81,8 +78,55 @@ linters-settings: gofumpt: extra-rules: true revive: + # By default, revive will enable only the linting rules that are named in the configuration file. + # So, it's needed to explicitly set in configuration all required rules. + # The following configuration enables all the rules from the defaults.toml + # https://github.com/mgechev/revive/blob/master/defaults.toml rules: - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter - - name: unused-parameter - severity: warning + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md + - name: blank-imports + - name: context-as-argument + arguments: + # allow functions with test or bench signatures + - allowTypesBefore: "*testing.T,testing.TB" + - name: context-keys-type + - name: dot-imports + # A lot of false positives: incorrectly identifies channel draining as "empty code block". + # See https://github.com/mgechev/revive/issues/386 + - name: empty-block disabled: true + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: increment-decrement + - name: indent-error-flow + - name: package-comments + - name: range + - name: receiver-naming + - name: redefines-builtin-id + - name: superfluous-else + - name: time-naming + - name: unexported-return + - name: unreachable-code + - name: unused-parameter + disabled: true + - name: var-declaration + - name: var-naming + testifylint: + disable: + - float-compare + - go-require + enable: + - bool-compare + - compares + - empty + - error-is-as + - error-nil + - expected-actual + - len + - require-error + - suite-dont-use-pkg + - suite-extra-assert-call + diff --git a/CHANGELOG.md b/CHANGELOG.md index 35f9f77682..71b8c97fe4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,21 +1,16 @@ # Changelog -## 2.48.0-rc.2 / 2023-11-02 +## unreleased -* [ENHANCEMENT] Scraping: Add configuration option for tracking staleness of scraped timestamps. #13060 -* [BUGFIX] Storage: Fix crash caused by incorrect mixed samples handling. #13055 -* [BUGFIX] TSDB: Fix compactor failures by adding min time to histogram chunks. #13062 +* [ENHANCEMENT] TSDB: Make the wlog watcher read segments synchronously when not tailing. #13224 +* [BUGFIX] Agent: Participate in notify calls. #13223 -## 2.48.0-rc.1 / 2023-10-24 - -* [BUGFIX] PromQL: Reduce inefficiency introduced by warnings/annotations and temporarily remove possible non-counter warnings. #13012 - -## 2.48.0-rc.0 / 2023-10-10 +## 2.48.0 / 2023-11-16 * [CHANGE] Remote-write: respect Retry-After header on 5xx errors. #12677 * [FEATURE] Alerting: Add AWS SigV4 authentication support for Alertmanager endpoints. #12774 * [FEATURE] Promtool: Add support for histograms in the TSDB dump command. #12775 -* [FEATURE] PromQL: Add warnings (and annotations) to PromQL query results. #12152 #12982 #12988 +* [FEATURE] PromQL: Add warnings (and annotations) to PromQL query results. #12152 #12982 #12988 #13012 * [FEATURE] Remote-write: Add Azure AD OAuth authentication support for remote write requests. #12572 * [ENHANCEMENT] Remote-write: Add a header to count retried remote write requests. #12729 * [ENHANCEMENT] TSDB: Improve query performance by re-using iterator when moving between series. #12757 @@ -31,6 +26,7 @@ * [ENHANCEMENT] Scraping: Save memory when scraping by delaying creation of buffer. #12953 * [ENHANCEMENT] Agent: Allow ingestion of out-of-order samples. #12897 * [ENHANCEMENT] Promtool: Improve support for native histograms in TSDB analyze command. #12869 +* [ENHANCEMENT] Scraping: Add configuration option for tracking staleness of scraped timestamps. #13060 * [BUGFIX] SD: Ensure that discovery managers are properly canceled. #10569 * [BUGFIX] TSDB: Fix PostingsForMatchers race with creating new series. #12558 * [BUGFIX] TSDB: Fix handling of explicit counter reset header in histograms. #12772 @@ -40,6 +36,8 @@ * [BUGFIX] Promtool: Fix errors not being reported in check rules command. #12715 * [BUGFIX] TSDB: Avoid panics reported in logs when head initialization takes a long time. #12876 * [BUGFIX] TSDB: Ensure that WBL is repaired when possible. #12406 +* [BUGFIX] Storage: Fix crash caused by incorrect mixed samples handling. #13055 +* [BUGFIX] TSDB: Fix compactor failures by adding min time to histogram chunks. #13062 ## 2.47.1 / 2023-10-04 diff --git a/Makefile b/Makefile index 0dd8673af3..ab229f9311 100644 --- a/Makefile +++ b/Makefile @@ -93,9 +93,9 @@ endif # If we only want to only test go code we have to change the test target # which is called by all. ifeq ($(GO_ONLY),1) -test: common-test +test: common-test check-go-mod-version else -test: common-test ui-build-module ui-test ui-lint +test: common-test ui-build-module ui-test ui-lint check-go-mod-version endif .PHONY: npm_licenses @@ -138,3 +138,17 @@ bench_tsdb: $(PROMU) cli-documentation: $(GO) run ./cmd/prometheus/ --write-documentation > docs/command-line/prometheus.md $(GO) run ./cmd/promtool/ write-documentation > docs/command-line/promtool.md + +.PHONY: check-go-mod-version +check-go-mod-version: + @echo ">> checking go.mod version matching" + @./scripts/check-go-mod-version.sh + +.PHONY: update-all-go-deps +update-all-go-deps: + @$(MAKE) update-go-deps + @echo ">> updating Go dependencies in ./documentation/examples/remote_storage/" + @cd ./documentation/examples/remote_storage/ && for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ + $(GO) get -d $$m; \ + done + @cd ./documentation/examples/remote_storage/ && $(GO) mod tidy diff --git a/Makefile.common b/Makefile.common index 1338c61a76..f8d26a53ee 100644 --- a/Makefile.common +++ b/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.54.2 +GOLANGCI_LINT_VERSION ?= v1.55.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/RELEASE.md b/RELEASE.md index c39cc824ba..6ab2f63899 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -53,7 +53,8 @@ Release cadence of first pre-releases being cut is 6 weeks. | v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) | | v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) | | v2.48 | 2023-10-04 | Levi Harrison (GitHub: @LeviHarrison) | -| v2.49 | 2023-11-15 | **searching for volunteer** | +| v2.49 | 2023-12-05 | Bartek Plotka (GitHub: @bwplotka) | +| v2.50 | 2024-01-16 | **searching for volunteer** | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. @@ -108,14 +109,16 @@ This is also a good time to consider any experimental features and feature flags for promotion to stable or for deprecation or ultimately removal. Do any of these in pull requests, one per feature. +> NOTE: As a validation step check if all security alerts are closed after this step: https://github.com/prometheus/prometheus/security/dependabot. Sometimes it's ok +> if not critical and e.g. fix is not released yet (or it does not relate to +> upgrading) or when we are unaffected. + #### Manually updating Go dependencies This is usually only needed for `+incompatible` and `v0.0.0` non-semver updates. ```bash -make update-go-deps -git add go.mod go.sum -git commit -m "Update dependencies" +make update-all-go-deps ``` #### Manually updating React dependencies diff --git a/VERSION b/VERSION index 10429c7ad0..9a9feb0847 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.48.0-rc.2 +2.48.0 diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 4112cd842b..a8bd2f2b33 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -206,9 +206,15 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "native-histograms": c.tsdb.EnableNativeHistograms = true // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. - config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultNativeHistogramScrapeProtocols - config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultNativeHistogramScrapeProtocols + config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols + config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + case "created-timestamp-zero-ingestion": + c.scrape.EnableCreatedTimestampZeroIngestion = true + // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. + config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols + config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols + level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) case "": continue case "promql-at-modifier", "promql-negative-offset": @@ -614,14 +620,52 @@ func main() { discoveryManagerNotify discoveryManager ) + // Kubernetes client metrics are used by Kubernetes SD. + // They are registered here in the main function, because SD mechanisms + // can only register metrics specific to a SD instance. + // Kubernetes client metrics are the same for the whole process - + // they are not specific to an SD instance. + err = discovery.RegisterK8sClientMetricsWithPrometheus(prometheus.DefaultRegisterer) + if err != nil { + level.Error(logger).Log("msg", "failed to register Kubernetes client metrics", "err", err) + os.Exit(1) + } if cfg.enableNewSDManager { - discovery.RegisterMetrics() - discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), discovery.Name("scrape")) - discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), discovery.Name("notify")) + { + discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, discovery.Name("scrape")) + if discMgr == nil { + level.Error(logger).Log("msg", "failed to create a discovery manager scrape") + os.Exit(1) + } + discoveryManagerScrape = discMgr + } + + { + discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, discovery.Name("notify")) + if discMgr == nil { + level.Error(logger).Log("msg", "failed to create a discovery manager notify") + os.Exit(1) + } + discoveryManagerNotify = discMgr + } } else { - legacymanager.RegisterMetrics() - discoveryManagerScrape = legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), legacymanager.Name("scrape")) - discoveryManagerNotify = legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), legacymanager.Name("notify")) + { + discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, legacymanager.Name("scrape")) + if discMgr == nil { + level.Error(logger).Log("msg", "failed to create a discovery manager scrape") + os.Exit(1) + } + discoveryManagerScrape = discMgr + } + + { + discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, legacymanager.Name("notify")) + if discMgr == nil { + level.Error(logger).Log("msg", "failed to create a discovery manager notify") + os.Exit(1) + } + discoveryManagerNotify = discMgr + } } scrapeManager, err := scrape.NewManager( @@ -1127,6 +1171,7 @@ func main() { ) localStorage.Set(db, 0) + db.SetWriteNotified(remoteStorage) close(dbOpen) <-cancel return nil @@ -1448,6 +1493,10 @@ func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, return 0, tsdb.ErrNotReady } +func (n notReadyAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { + return 0, tsdb.ErrNotReady +} + func (n notReadyAppender) Commit() error { return tsdb.ErrNotReady } func (n notReadyAppender) Rollback() error { return tsdb.ErrNotReady } @@ -1586,7 +1635,6 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { RetentionDuration: int64(time.Duration(opts.RetentionDuration) / time.Millisecond), MaxBytes: int64(opts.MaxBytes), NoLockfile: opts.NoLockfile, - AllowOverlappingCompaction: true, WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize, SamplesPerChunk: opts.SamplesPerChunk, diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index e4f831939f..f4fe3855c6 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -346,7 +346,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames continue } - require.Equal(t, 1, len(g.GetMetric())) + require.Len(t, g.GetMetric(), 1) if _, ok := res[m]; ok { t.Error("expected only one metric family for", m) t.FailNow() diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index f20f2a22c0..dd6b56672c 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -284,7 +284,7 @@ func (p *queryLogTest) run(t *testing.T) { if !p.enabledAtStart { p.query(t) - require.Equal(t, 0, len(readQueryLog(t, queryLogFile.Name()))) + require.Empty(t, readQueryLog(t, queryLogFile.Name())) p.setQueryLog(t, queryLogFile.Name()) p.reloadConfig(t) } @@ -309,7 +309,7 @@ func (p *queryLogTest) run(t *testing.T) { p.query(t) ql = readQueryLog(t, queryLogFile.Name()) - require.Equal(t, qc, len(ql)) + require.Len(t, ql, qc) qc = len(ql) p.setQueryLog(t, queryLogFile.Name()) @@ -320,7 +320,7 @@ func (p *queryLogTest) run(t *testing.T) { ql = readQueryLog(t, queryLogFile.Name()) if p.exactQueryCount() { - require.Equal(t, qc, len(ql)) + require.Len(t, ql, qc) } else { require.Greater(t, len(ql), qc, "no queries logged") } @@ -340,7 +340,7 @@ func (p *queryLogTest) run(t *testing.T) { require.NoError(t, os.Rename(queryLogFile.Name(), newFile.Name())) ql = readQueryLog(t, newFile.Name()) if p.exactQueryCount() { - require.Equal(t, qc, len(ql)) + require.Len(t, ql, qc) } p.validateLastQuery(t, ql) qc = len(ql) @@ -351,7 +351,7 @@ func (p *queryLogTest) run(t *testing.T) { ql = readQueryLog(t, newFile.Name()) if p.exactQueryCount() { - require.Equal(t, qc, len(ql)) + require.Len(t, ql, qc) } else { require.Greater(t, len(ql), qc, "no queries logged") } diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go index f373ebd6e4..7d29690e47 100644 --- a/cmd/promtool/backfill_test.go +++ b/cmd/promtool/backfill_test.go @@ -61,7 +61,7 @@ func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMa func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, expectedBlockDuration int64, expectedSamples []backfillSample, expectedNumBlocks int) { blocks := db.Blocks() - require.Equal(t, expectedNumBlocks, len(blocks), "did not create correct number of blocks") + require.Len(t, blocks, expectedNumBlocks, "did not create correct number of blocks") for i, block := range blocks { require.Equal(t, block.MinTime()/expectedBlockDuration, (block.MaxTime()-1)/expectedBlockDuration, "block %d contains data outside of one aligned block duration", i) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 973795a86e..508b681b88 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -36,6 +36,7 @@ import ( "github.com/google/pprof/profile" "github.com/prometheus/client_golang/api" v1 "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/testutil/promlint" config_util "github.com/prometheus/common/config" @@ -198,6 +199,7 @@ func main() { testCmd := app.Command("test", "Unit testing.") testRulesCmd := testCmd.Command("rules", "Unit tests for rules.") + testRulesRun := testRulesCmd.Flag("run", "If set, will only run test groups whose names match the regular expression. Can be specified multiple times.").Strings() testRulesFiles := testRulesCmd.Arg( "test-rule-file", "The unit test file.", @@ -316,7 +318,7 @@ func main() { switch parsedCmd { case sdCheckCmd.FullCommand(): - os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort)) + os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort, prometheus.DefaultRegisterer)) case checkConfigCmd.FullCommand(): os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...)) @@ -366,6 +368,7 @@ func main() { EnableAtModifier: true, EnableNegativeOffset: true, }, + *testRulesRun, *testRulesFiles...), ) @@ -411,7 +414,7 @@ func checkExperimental(f bool) { } } -var lintError = fmt.Errorf("lint error") +var errLint = fmt.Errorf("lint error") type lintConfig struct { all bool @@ -763,7 +766,7 @@ func checkRulesFromStdin(ls lintConfig) (bool, bool) { fmt.Fprintln(os.Stderr, " FAILED:") for _, e := range errs { fmt.Fprintln(os.Stderr, e.Error()) - hasErrors = hasErrors || !errors.Is(e, lintError) + hasErrors = hasErrors || !errors.Is(e, errLint) } if hasErrors { return failed, hasErrors @@ -776,7 +779,7 @@ func checkRulesFromStdin(ls lintConfig) (bool, bool) { } failed = true for _, err := range errs { - hasErrors = hasErrors || !errors.Is(err, lintError) + hasErrors = hasErrors || !errors.Is(err, errLint) } } else { fmt.Printf(" SUCCESS: %d rules found\n", n) @@ -797,7 +800,7 @@ func checkRules(files []string, ls lintConfig) (bool, bool) { fmt.Fprintln(os.Stderr, " FAILED:") for _, e := range errs { fmt.Fprintln(os.Stderr, e.Error()) - hasErrors = hasErrors || !errors.Is(e, lintError) + hasErrors = hasErrors || !errors.Is(e, errLint) } if hasErrors { continue @@ -810,7 +813,7 @@ func checkRules(files []string, ls lintConfig) (bool, bool) { } failed = true for _, err := range errs { - hasErrors = hasErrors || !errors.Is(err, lintError) + hasErrors = hasErrors || !errors.Is(err, errLint) } } else { fmt.Printf(" SUCCESS: %d rules found\n", n) @@ -837,7 +840,7 @@ func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []e }) } errMessage += "Might cause inconsistency while recording expressions" - return 0, []error{fmt.Errorf("%w %s", lintError, errMessage)} + return 0, []error{fmt.Errorf("%w %s", errLint, errMessage)} } } diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 09c91f92a5..7306a3e648 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -56,7 +56,7 @@ func TestQueryRange(t *testing.T) { defer s.Close() urlObject, err := url.Parse(s.URL) - require.Equal(t, nil, err) + require.NoError(t, err) p := &promqlPrinter{} exitCode := QueryRange(urlObject, http.DefaultTransport, map[string]string{}, "up", "0", "300", 0, p) @@ -79,7 +79,7 @@ func TestQueryInstant(t *testing.T) { defer s.Close() urlObject, err := url.Parse(s.URL) - require.Equal(t, nil, err) + require.NoError(t, err) p := &promqlPrinter{} exitCode := QueryInstant(urlObject, http.DefaultTransport, "up", "300", p) diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index 1c06982880..75aad67864 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -91,13 +91,13 @@ func TestBackfillRuleIntegration(t *testing.T) { for _, err := range errs { require.NoError(t, err) } - require.Equal(t, 3, len(ruleImporter.groups)) + require.Len(t, ruleImporter.groups, 3) group1 := ruleImporter.groups[path1+";group0"] require.NotNil(t, group1) const defaultInterval = 60 require.Equal(t, defaultInterval*time.Second, group1.Interval()) gRules := group1.Rules() - require.Equal(t, 1, len(gRules)) + require.Len(t, gRules, 1) require.Equal(t, "rule1", gRules[0].Name()) require.Equal(t, "ruleExpr", gRules[0].Query().String()) require.Equal(t, 1, gRules[0].Labels().Len()) @@ -106,7 +106,7 @@ func TestBackfillRuleIntegration(t *testing.T) { require.NotNil(t, group2) require.Equal(t, defaultInterval*time.Second, group2.Interval()) g2Rules := group2.Rules() - require.Equal(t, 2, len(g2Rules)) + require.Len(t, g2Rules, 2) require.Equal(t, "grp2_rule1", g2Rules[0].Name()) require.Equal(t, "grp2_rule1_expr", g2Rules[0].Query().String()) require.Equal(t, 0, g2Rules[0].Labels().Len()) @@ -122,7 +122,7 @@ func TestBackfillRuleIntegration(t *testing.T) { require.NoError(t, err) blocks := db.Blocks() - require.Equal(t, (i+1)*tt.expectedBlockCount, len(blocks)) + require.Len(t, blocks, (i+1)*tt.expectedBlockCount) q, err := db.Querier(math.MinInt64, math.MaxInt64) require.NoError(t, err) diff --git a/cmd/promtool/sd.go b/cmd/promtool/sd.go index 7c5ae70365..155152e1ac 100644 --- a/cmd/promtool/sd.go +++ b/cmd/promtool/sd.go @@ -22,6 +22,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" @@ -37,7 +38,7 @@ type sdCheckResult struct { } // CheckSD performs service discovery for the given job name and reports the results. -func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool) int { +func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool, registerer prometheus.Registerer) int { logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) cfg, err := config.LoadFile(sdConfigFiles, false, false, logger) @@ -77,7 +78,7 @@ func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefault defer cancel() for _, cfg := range scrapeConfig.ServiceDiscoveryConfigs { - d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger}) + d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger, Registerer: registerer}) if err != nil { fmt.Fprintln(os.Stderr, "Could not create new discoverer", err) return failureExitCode diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go index 82bc9947f7..2f4d3aba7d 100644 --- a/cmd/promtool/sd_test.go +++ b/cmd/promtool/sd_test.go @@ -35,7 +35,7 @@ func TestSDCheckResult(t *testing.T) { }} reg, err := relabel.NewRegexp("(.*)") - require.Nil(t, err) + require.NoError(t, err) scrapeConfig := &config.ScrapeConfig{ ScrapeInterval: model.Duration(1 * time.Minute), diff --git a/cmd/promtool/testdata/rules_run.yml b/cmd/promtool/testdata/rules_run.yml new file mode 100644 index 0000000000..1905d89fa4 --- /dev/null +++ b/cmd/promtool/testdata/rules_run.yml @@ -0,0 +1,30 @@ +rule_files: + - rules.yml + +evaluation_interval: 1m + +# Minimal test cases to check focus on a rule group. +tests: + - name: correct test + input_series: + - series: test + values: 1 + + promql_expr_test: + - expr: test + eval_time: 0 + exp_samples: + - value: 1 + labels: test + + - name: wrong test + input_series: + - series: test + values: 0 + + promql_expr_test: + - expr: test + eval_time: 0 + exp_samples: + - value: 1 + labels: test diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index d37e03e52b..a25a8596d4 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -26,6 +26,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/grafana/regexp" "github.com/prometheus/common/model" "gopkg.in/yaml.v2" @@ -39,11 +40,16 @@ import ( // RulesUnitTest does unit testing of rules based on the unit testing files provided. // More info about the file format can be found in the docs. -func RulesUnitTest(queryOpts promql.LazyLoaderOpts, files ...string) int { +func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, files ...string) int { failed := false + var run *regexp.Regexp + if runStrings != nil { + run = regexp.MustCompile(strings.Join(runStrings, "|")) + } + for _, f := range files { - if errs := ruleUnitTest(f, queryOpts); errs != nil { + if errs := ruleUnitTest(f, queryOpts, run); errs != nil { fmt.Fprintln(os.Stderr, " FAILED:") for _, e := range errs { fmt.Fprintln(os.Stderr, e.Error()) @@ -61,7 +67,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, files ...string) int { return successExitCode } -func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts) []error { +func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts, run *regexp.Regexp) []error { fmt.Println("Unit Testing: ", filename) b, err := os.ReadFile(filename) @@ -96,6 +102,10 @@ func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts) []error { // Testing. var errs []error for _, t := range unitTestInp.Tests { + if !matchesRun(t.TestGroupName, run) { + continue + } + if t.Interval == 0 { t.Interval = unitTestInp.EvaluationInterval } @@ -111,6 +121,14 @@ func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts) []error { return nil } +func matchesRun(name string, run *regexp.Regexp) bool { + if run == nil { + return true + } + + return run.MatchString(name) +} + // unitTestFile holds the contents of a single unit test file. type unitTestFile struct { RuleFiles []string `yaml:"rule_files"` diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go index c96883113a..fb4012e3c1 100644 --- a/cmd/promtool/unittest_test.go +++ b/cmd/promtool/unittest_test.go @@ -125,7 +125,60 @@ func TestRulesUnitTest(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := RulesUnitTest(tt.queryOpts, tt.args.files...); got != tt.want { + if got := RulesUnitTest(tt.queryOpts, nil, tt.args.files...); got != tt.want { + t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestRulesUnitTestRun(t *testing.T) { + type args struct { + run []string + files []string + } + tests := []struct { + name string + args args + queryOpts promql.LazyLoaderOpts + want int + }{ + { + name: "Test all without run arg", + args: args{ + run: nil, + files: []string{"./testdata/rules_run.yml"}, + }, + want: 1, + }, + { + name: "Test all with run arg", + args: args{ + run: []string{"correct", "wrong"}, + files: []string{"./testdata/rules_run.yml"}, + }, + want: 1, + }, + { + name: "Test correct", + args: args{ + run: []string{"correct"}, + files: []string{"./testdata/rules_run.yml"}, + }, + want: 0, + }, + { + name: "Test wrong", + args: args{ + run: []string{"wrong"}, + files: []string{"./testdata/rules_run.yml"}, + }, + want: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := RulesUnitTest(tt.queryOpts, tt.args.run, tt.args.files...); got != tt.want { t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want) } }) diff --git a/config/config.go b/config/config.go index b832ac9a17..ddcca84dc7 100644 --- a/config/config.go +++ b/config/config.go @@ -454,12 +454,19 @@ var ( OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0", } + // DefaultScrapeProtocols is the set of scrape protocols that will be proposed + // to scrape target, ordered by priority. DefaultScrapeProtocols = []ScrapeProtocol{ OpenMetricsText1_0_0, OpenMetricsText0_0_1, PrometheusText0_0_4, } - DefaultNativeHistogramScrapeProtocols = []ScrapeProtocol{ + + // DefaultProtoFirstScrapeProtocols is like DefaultScrapeProtocols, but it + // favors protobuf Prometheus exposition format. + // Used by default for certain feature-flags like + // "native-histograms" and "created-timestamp-zero-ingestion". + DefaultProtoFirstScrapeProtocols = []ScrapeProtocol{ PrometheusProto, OpenMetricsText1_0_0, OpenMetricsText0_0_1, diff --git a/config/config_test.go b/config/config_test.go index 408622cd5a..e614a44637 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -568,6 +568,7 @@ var expectedConf = &Config{ ServiceDiscoveryConfigs: discovery.Configs{ &xds.KumaSDConfig{ Server: "http://kuma-control-plane.kuma-system.svc:5676", + ClientID: "main-prometheus", HTTPClientConfig: config.DefaultHTTPClientConfig, RefreshInterval: model.Duration(15 * time.Second), FetchTimeout: model.Duration(2 * time.Minute), @@ -1457,8 +1458,8 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) { got := &Config{} require.NoError(t, yaml.UnmarshalStrict(out, got)) - require.Equal(t, true, got.RemoteWriteConfigs[0].QueueConfig.RetryOnRateLimit) - require.Equal(t, false, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit) + require.True(t, got.RemoteWriteConfigs[0].QueueConfig.RetryOnRateLimit) + require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit) } func TestLoadConfig(t *testing.T) { @@ -1475,9 +1476,9 @@ func TestLoadConfig(t *testing.T) { func TestScrapeIntervalLarger(t *testing.T) { c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) - require.Equal(t, 1, len(c.ScrapeConfigs)) + require.Len(t, c.ScrapeConfigs, 1) for _, sc := range c.ScrapeConfigs { - require.Equal(t, true, sc.ScrapeInterval >= sc.ScrapeTimeout) + require.GreaterOrEqual(t, sc.ScrapeInterval, sc.ScrapeTimeout) } } @@ -1493,7 +1494,7 @@ func TestElideSecrets(t *testing.T) { yamlConfig := string(config) matches := secretRe.FindAllStringIndex(yamlConfig, -1) - require.Equal(t, 22, len(matches), "wrong number of secret matches found") + require.Len(t, matches, 22, "wrong number of secret matches found") require.NotContains(t, yamlConfig, "mysecret", "yaml marshal reveals authentication credentials.") } @@ -2063,7 +2064,7 @@ func TestAgentMode(t *testing.T) { c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger()) require.NoError(t, err) - require.Len(t, c.RemoteWriteConfigs, 0) + require.Empty(t, c.RemoteWriteConfigs) c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger()) require.NoError(t, err) @@ -2257,5 +2258,5 @@ func TestScrapeConfigDisableCompression(t *testing.T) { got := &Config{} require.NoError(t, yaml.UnmarshalStrict(out, got)) - require.Equal(t, false, got.ScrapeConfigs[0].EnableCompression) + require.False(t, got.ScrapeConfigs[0].EnableCompression) } diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index e034eff431..b584301649 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -221,6 +221,7 @@ scrape_configs: kuma_sd_configs: - server: http://kuma-control-plane.kuma-system.svc:5676 + client_id: main-prometheus - job_name: service-marathon marathon_sd_configs: diff --git a/config/testdata/roundtrip.good.yml b/config/testdata/roundtrip.good.yml index f2634d257a..24ab7d2592 100644 --- a/config/testdata/roundtrip.good.yml +++ b/config/testdata/roundtrip.good.yml @@ -108,6 +108,7 @@ scrape_configs: kuma_sd_configs: - server: http://kuma-control-plane.kuma-system.svc:5676 + client_id: main-prometheus marathon_sd_configs: - servers: diff --git a/discovery/README.md b/discovery/README.md index 19b579b399..4c06608625 100644 --- a/discovery/README.md +++ b/discovery/README.md @@ -234,6 +234,11 @@ type Config interface { type DiscovererOptions struct { Logger log.Logger + + // A registerer for the Discoverer's metrics. + Registerer prometheus.Registerer + + HTTPClientOptions []config.HTTPClientOption } ``` diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index 64c8fdce63..40e6e7cb79 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -30,6 +30,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -101,7 +102,7 @@ func (*EC2SDConfig) Name() string { return "ec2" } // NewDiscoverer returns a Discoverer for the EC2 Config. func (c *EC2SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewEC2Discovery(c, opts.Logger), nil + return NewEC2Discovery(c, opts.Logger, opts.Registerer), nil } // UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config. @@ -147,7 +148,7 @@ type EC2Discovery struct { } // NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets. -func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery { +func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, reg prometheus.Registerer) *EC2Discovery { if logger == nil { logger = log.NewNopLogger() } @@ -156,10 +157,13 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery { cfg: conf, } d.Discovery = refresh.NewDiscovery( - logger, - "ec2", - time.Duration(d.cfg.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "ec2", + Interval: time.Duration(d.cfg.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + }, ) return d } diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index c0198d6a77..5382ea0159 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -29,6 +29,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/lightsail" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -84,7 +85,7 @@ func (*LightsailSDConfig) Name() string { return "lightsail" } // NewDiscoverer returns a Discoverer for the Lightsail Config. func (c *LightsailSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewLightsailDiscovery(c, opts.Logger), nil + return NewLightsailDiscovery(c, opts.Logger, opts.Registerer), nil } // UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config. @@ -121,7 +122,7 @@ type LightsailDiscovery struct { } // NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets. -func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger) *LightsailDiscovery { +func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger, reg prometheus.Registerer) *LightsailDiscovery { if logger == nil { logger = log.NewNopLogger() } @@ -129,10 +130,13 @@ func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger) *Lightsai cfg: conf, } d.Discovery = refresh.NewDiscovery( - logger, - "lightsail", - time.Duration(d.cfg.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "lightsail", + Interval: time.Duration(d.cfg.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + }, ) return d } diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index faccadcf85..fa198c320f 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -28,9 +28,10 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" cache "github.com/Code-Hex/go-generics-cache" "github.com/Code-Hex/go-generics-cache/policy/lru" "github.com/go-kit/log" @@ -78,17 +79,6 @@ var ( AuthenticationMethod: authMethodOAuth, HTTPClientConfig: config_util.DefaultHTTPClientConfig, } - - failuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_azure_failures_total", - Help: "Number of Azure service discovery refresh failures.", - }) - cacheHitCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_azure_cache_hit_total", - Help: "Number of cache hit during refresh.", - }) ) var environments = map[string]cloud.Configuration{ @@ -105,7 +95,7 @@ func CloudConfigurationFromName(name string) (cloud.Configuration, error) { name = strings.ToUpper(name) env, ok := environments[name] if !ok { - return env, fmt.Errorf("There is no cloud configuration matching the name %q", name) + return env, fmt.Errorf("there is no cloud configuration matching the name %q", name) } return env, nil @@ -113,8 +103,6 @@ func CloudConfigurationFromName(name string) (cloud.Configuration, error) { func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(failuresCount) - prometheus.MustRegister(cacheHitCount) } // SDConfig is the configuration for Azure based service discovery. @@ -137,7 +125,7 @@ func (*SDConfig) Name() string { return "azure" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger), nil + return NewDiscovery(c, opts.Logger, opts.Registerer) } func validateAuthParam(param, name string) error { @@ -180,14 +168,16 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { type Discovery struct { *refresh.Discovery - logger log.Logger - cfg *SDConfig - port int - cache *cache.Cache[string, *armnetwork.Interface] + logger log.Logger + cfg *SDConfig + port int + cache *cache.Cache[string, *armnetwork.Interface] + failuresCount prometheus.Counter + cacheHitCount prometheus.Counter } // NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets. -func NewDiscovery(cfg *SDConfig, logger log.Logger) *Discovery { +func NewDiscovery(cfg *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { if logger == nil { logger = log.NewNopLogger() } @@ -197,16 +187,30 @@ func NewDiscovery(cfg *SDConfig, logger log.Logger) *Discovery { port: cfg.Port, logger: logger, cache: l, + failuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_azure_failures_total", + Help: "Number of Azure service discovery refresh failures.", + }), + cacheHitCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_azure_cache_hit_total", + Help: "Number of cache hit during refresh.", + }), } d.Discovery = refresh.NewDiscovery( - logger, - "azure", - time.Duration(cfg.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "azure", + Interval: time.Duration(cfg.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + Metrics: []prometheus.Collector{d.failuresCount, d.cacheHitCount}, + }, ) - return d + return d, nil } // azureClient represents multiple Azure Resource Manager providers. @@ -304,6 +308,7 @@ type virtualMachine struct { Location string OsType string ScaleSet string + InstanceID string Tags map[string]*string NetworkInterfaces []string Size string @@ -328,14 +333,14 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { client, err := createAzureClient(*d.cfg) if err != nil { - failuresCount.Inc() + d.failuresCount.Inc() return nil, fmt.Errorf("could not create Azure client: %w", err) } client.logger = d.logger machines, err := client.getVMs(ctx, d.cfg.ResourceGroup) if err != nil { - failuresCount.Inc() + d.failuresCount.Inc() return nil, fmt.Errorf("could not get virtual machines: %w", err) } @@ -344,14 +349,14 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { // Load the vms managed by scale sets. scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup) if err != nil { - failuresCount.Inc() + d.failuresCount.Inc() return nil, fmt.Errorf("could not get virtual machine scale sets: %w", err) } for _, scaleSet := range scaleSets { scaleSetVms, err := client.getScaleSetVMs(ctx, scaleSet) if err != nil { - failuresCount.Inc() + d.failuresCount.Inc() return nil, fmt.Errorf("could not get virtual machine scale set vms: %w", err) } machines = append(machines, scaleSetVms...) @@ -402,9 +407,13 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { var networkInterface *armnetwork.Interface if v, ok := d.getFromCache(nicID); ok { networkInterface = v - cacheHitCount.Add(1) + d.cacheHitCount.Add(1) } else { - networkInterface, err = client.getNetworkInterfaceByID(ctx, nicID) + if vm.ScaleSet == "" { + networkInterface, err = client.getVMNetworkInterfaceByID(ctx, nicID) + } else { + networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID) + } if err != nil { if errors.Is(err, errorNotFound) { level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err) @@ -461,7 +470,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { var tg targetgroup.Group for tgt := range ch { if tgt.err != nil { - failuresCount.Inc() + d.failuresCount.Inc() return nil, fmt.Errorf("unable to complete Azure service discovery: %w", tgt.err) } if tgt.labelSet != nil { @@ -622,6 +631,7 @@ func mapFromVMScaleSetVM(vm armcompute.VirtualMachineScaleSetVM, scaleSetName st Location: *(vm.Location), OsType: osType, ScaleSet: scaleSetName, + InstanceID: *(vm.InstanceID), Tags: tags, NetworkInterfaces: networkInterfaces, Size: size, @@ -630,21 +640,41 @@ func mapFromVMScaleSetVM(vm armcompute.VirtualMachineScaleSetVM, scaleSetName st var errorNotFound = errors.New("network interface does not exist") -// getNetworkInterfaceByID gets the network interface. +// getVMNetworkInterfaceByID gets the network interface. // If a 404 is returned from the Azure API, `errorNotFound` is returned. -func (client *azureClient) getNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) { +func (client *azureClient) getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) { r, err := newAzureResourceFromID(networkInterfaceID, client.logger) if err != nil { return nil, fmt.Errorf("could not parse network interface ID: %w", err) } - resp, err := client.nic.Get(ctx, r.ResourceGroupName, r.Name, nil) + resp, err := client.nic.Get(ctx, r.ResourceGroupName, r.Name, &armnetwork.InterfacesClientGetOptions{Expand: to.Ptr("IPConfigurations/PublicIPAddress")}) if err != nil { var responseError *azcore.ResponseError if errors.As(err, &responseError) && responseError.StatusCode == http.StatusNotFound { return nil, errorNotFound } - return nil, fmt.Errorf("Failed to retrieve Interface %v with error: %w", networkInterfaceID, err) + return nil, fmt.Errorf("failed to retrieve Interface %v with error: %w", networkInterfaceID, err) + } + + return &resp.Interface, nil +} + +// getVMScaleSetVMNetworkInterfaceByID gets the network interface. +// If a 404 is returned from the Azure API, `errorNotFound` is returned. +func (client *azureClient) getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) { + r, err := newAzureResourceFromID(networkInterfaceID, client.logger) + if err != nil { + return nil, fmt.Errorf("could not parse network interface ID: %w", err) + } + + resp, err := client.nic.GetVirtualMachineScaleSetNetworkInterface(ctx, r.ResourceGroupName, scaleSetName, instanceID, r.Name, &armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions{Expand: to.Ptr("IPConfigurations/PublicIPAddress")}) + if err != nil { + var responseError *azcore.ResponseError + if errors.As(err, &responseError) && responseError.StatusCode == http.StatusNotFound { + return nil, errorNotFound + } + return nil, fmt.Errorf("failed to retrieve Interface %v with error: %w", networkInterfaceID, err) } return &resp.Interface, nil diff --git a/discovery/azure/azure_test.go b/discovery/azure/azure_test.go index 6c3ec236b3..4ff937e0bc 100644 --- a/discovery/azure/azure_test.go +++ b/discovery/azure/azure_test.go @@ -17,7 +17,7 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/stretchr/testify/require" "go.uber.org/goleak" ) @@ -142,6 +142,7 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) { vmSize := armcompute.VirtualMachineSizeTypes(size) osType := armcompute.OperatingSystemTypesLinux vmType := "type" + instanceID := "123" location := "westeurope" computerName := "computer_name" networkProfile := armcompute.NetworkProfile{ @@ -166,6 +167,7 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) { ID: &id, Name: &name, Type: &vmType, + InstanceID: &instanceID, Location: &location, Tags: nil, Properties: properties, @@ -182,6 +184,7 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) { Tags: map[string]*string{}, NetworkInterfaces: []string{}, ScaleSet: scaleSet, + InstanceID: instanceID, Size: size, } @@ -197,6 +200,7 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) { vmSize := armcompute.VirtualMachineSizeTypes(size) osType := armcompute.OperatingSystemTypesLinux vmType := "type" + instanceID := "123" location := "westeurope" computerName := "computer_name" tags := map[string]*string{ @@ -224,6 +228,7 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) { ID: &id, Name: &name, Type: &vmType, + InstanceID: &instanceID, Location: &location, Tags: tags, Properties: properties, @@ -240,6 +245,7 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) { Tags: tags, NetworkInterfaces: []string{}, ScaleSet: scaleSet, + InstanceID: instanceID, Size: size, } @@ -269,7 +275,7 @@ func TestNewAzureResourceFromID(t *testing.T) { }, } { actual, err := newAzureResourceFromID(tc.id, nil) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, tc.expected.Name, actual.Name) require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName) } diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index b4cb152297..50f171a78a 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -71,41 +71,18 @@ const ( namespace = "prometheus" ) -var ( - rpcFailuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "sd_consul_rpc_failures_total", - Help: "The number of Consul RPC call failures.", - }) - rpcDuration = prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Namespace: namespace, - Name: "sd_consul_rpc_duration_seconds", - Help: "The duration of a Consul RPC call in seconds.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - []string{"endpoint", "call"}, - ) - - // Initialize metric vectors. - servicesRPCDuration = rpcDuration.WithLabelValues("catalog", "services") - serviceRPCDuration = rpcDuration.WithLabelValues("catalog", "service") - - // DefaultSDConfig is the default Consul SD configuration. - DefaultSDConfig = SDConfig{ - TagSeparator: ",", - Scheme: "http", - Server: "localhost:8500", - AllowStale: true, - RefreshInterval: model.Duration(30 * time.Second), - HTTPClientConfig: config.DefaultHTTPClientConfig, - } -) +// DefaultSDConfig is the default Consul SD configuration. +var DefaultSDConfig = SDConfig{ + TagSeparator: ",", + Scheme: "http", + Server: "localhost:8500", + AllowStale: true, + RefreshInterval: model.Duration(30 * time.Second), + HTTPClientConfig: config.DefaultHTTPClientConfig, +} func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(rpcFailuresCount, rpcDuration) } // SDConfig is the configuration for Consul service discovery. @@ -147,7 +124,7 @@ func (*SDConfig) Name() string { return "consul" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -184,22 +161,27 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // Discovery retrieves target information from a Consul server // and updates them via watches. type Discovery struct { - client *consul.Client - clientDatacenter string - clientNamespace string - clientPartition string - tagSeparator string - watchedServices []string // Set of services which will be discovered. - watchedTags []string // Tags used to filter instances of a service. - watchedNodeMeta map[string]string - allowStale bool - refreshInterval time.Duration - finalizer func() - logger log.Logger + client *consul.Client + clientDatacenter string + clientNamespace string + clientPartition string + tagSeparator string + watchedServices []string // Set of services which will be discovered. + watchedTags []string // Tags used to filter instances of a service. + watchedNodeMeta map[string]string + allowStale bool + refreshInterval time.Duration + finalizer func() + logger log.Logger + rpcFailuresCount prometheus.Counter + rpcDuration *prometheus.SummaryVec + servicesRPCDuration prometheus.Observer + serviceRPCDuration prometheus.Observer + metricRegisterer discovery.MetricRegisterer } // NewDiscovery returns a new Discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { if logger == nil { logger = log.NewNopLogger() } @@ -237,7 +219,35 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { clientPartition: conf.Partition, finalizer: wrapper.CloseIdleConnections, logger: logger, + rpcFailuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_consul_rpc_failures_total", + Help: "The number of Consul RPC call failures.", + }), + rpcDuration: prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Namespace: namespace, + Name: "sd_consul_rpc_duration_seconds", + Help: "The duration of a Consul RPC call in seconds.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"endpoint", "call"}, + ), } + + cd.metricRegisterer = discovery.NewMetricRegisterer( + reg, + []prometheus.Collector{ + cd.rpcFailuresCount, + cd.rpcDuration, + }, + ) + + // Initialize metric vectors. + cd.servicesRPCDuration = cd.rpcDuration.WithLabelValues("catalog", "services") + cd.serviceRPCDuration = cd.rpcDuration.WithLabelValues("catalog", "service") + return cd, nil } @@ -293,7 +303,7 @@ func (d *Discovery) getDatacenter() error { info, err := d.client.Agent().Self() if err != nil { level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err) - rpcFailuresCount.Inc() + d.rpcFailuresCount.Inc() return err } @@ -334,6 +344,13 @@ func (d *Discovery) initialize(ctx context.Context) { // Run implements the Discoverer interface. func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + err := d.metricRegisterer.RegisterMetrics() + if err != nil { + level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error()) + return + } + defer d.metricRegisterer.UnregisterMetrics() + if d.finalizer != nil { defer d.finalizer() } @@ -382,7 +399,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup. t0 := time.Now() srvs, meta, err := catalog.Services(opts.WithContext(ctx)) elapsed := time.Since(t0) - servicesRPCDuration.Observe(elapsed.Seconds()) + d.servicesRPCDuration.Observe(elapsed.Seconds()) // Check the context before in order to exit early. select { @@ -393,7 +410,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup. if err != nil { level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err) - rpcFailuresCount.Inc() + d.rpcFailuresCount.Inc() time.Sleep(retryInterval) return } @@ -449,13 +466,15 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup. // consulService contains data belonging to the same service. type consulService struct { - name string - tags []string - labels model.LabelSet - discovery *Discovery - client *consul.Client - tagSeparator string - logger log.Logger + name string + tags []string + labels model.LabelSet + discovery *Discovery + client *consul.Client + tagSeparator string + logger log.Logger + rpcFailuresCount prometheus.Counter + serviceRPCDuration prometheus.Observer } // Start watching a service. @@ -469,8 +488,10 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G serviceLabel: model.LabelValue(name), datacenterLabel: model.LabelValue(d.clientDatacenter), }, - tagSeparator: d.tagSeparator, - logger: d.logger, + tagSeparator: d.tagSeparator, + logger: d.logger, + rpcFailuresCount: d.rpcFailuresCount, + serviceRPCDuration: d.serviceRPCDuration, } go func() { @@ -508,7 +529,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr t0 := time.Now() serviceNodes, meta, err := health.ServiceMultipleTags(srv.name, srv.tags, false, opts.WithContext(ctx)) elapsed := time.Since(t0) - serviceRPCDuration.Observe(elapsed.Seconds()) + srv.serviceRPCDuration.Observe(elapsed.Seconds()) // Check the context before in order to exit early. select { @@ -520,7 +541,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr if err != nil { level.Error(srv.logger).Log("msg", "Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err) - rpcFailuresCount.Inc() + srv.rpcFailuresCount.Inc() time.Sleep(retryInterval) return } diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index c929601638..97cb8fbc9d 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -39,7 +40,7 @@ func TestConfiguredService(t *testing.T) { conf := &SDConfig{ Services: []string{"configuredServiceName"}, } - consulDiscovery, err := NewDiscovery(conf, nil) + consulDiscovery, err := NewDiscovery(conf, nil, prometheus.NewRegistry()) if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } @@ -56,7 +57,7 @@ func TestConfiguredServiceWithTag(t *testing.T) { Services: []string{"configuredServiceName"}, ServiceTags: []string{"http"}, } - consulDiscovery, err := NewDiscovery(conf, nil) + consulDiscovery, err := NewDiscovery(conf, nil, prometheus.NewRegistry()) if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } @@ -151,7 +152,7 @@ func TestConfiguredServiceWithTags(t *testing.T) { } for _, tc := range cases { - consulDiscovery, err := NewDiscovery(tc.conf, nil) + consulDiscovery, err := NewDiscovery(tc.conf, nil, prometheus.NewRegistry()) if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } @@ -165,7 +166,7 @@ func TestConfiguredServiceWithTags(t *testing.T) { func TestNonConfiguredService(t *testing.T) { conf := &SDConfig{} - consulDiscovery, err := NewDiscovery(conf, nil) + consulDiscovery, err := NewDiscovery(conf, nil, prometheus.NewRegistry()) if err != nil { t.Errorf("Unexpected error when initializing discovery %v", err) } @@ -262,19 +263,19 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) { func newDiscovery(t *testing.T, config *SDConfig) *Discovery { logger := log.NewNopLogger() - d, err := NewDiscovery(config, logger) + d, err := NewDiscovery(config, logger, prometheus.NewRegistry()) require.NoError(t, err) return d } func checkOneTarget(t *testing.T, tg []*targetgroup.Group) { - require.Equal(t, 1, len(tg)) + require.Len(t, tg, 1) target := tg[0] require.Equal(t, "test-dc", string(target.Labels["__meta_consul_dc"])) require.Equal(t, target.Source, string(target.Labels["__meta_consul_service"])) if target.Source == "test" { // test service should have one node. - require.Greater(t, len(target.Targets), 0, "Test service should have one node") + require.NotEmpty(t, target.Targets, "Test service should have one node") } } @@ -313,7 +314,7 @@ func TestNoTargets(t *testing.T) { }() targets := (<-ch)[0].Targets - require.Equal(t, 0, len(targets)) + require.Empty(t, targets) cancel() <-ch } @@ -484,7 +485,7 @@ oauth2: return } - require.Equal(t, config, test.expected) + require.Equal(t, test.expected, config) }) } } diff --git a/discovery/digitalocean/digitalocean.go b/discovery/digitalocean/digitalocean.go index e207388b3d..970258de04 100644 --- a/discovery/digitalocean/digitalocean.go +++ b/discovery/digitalocean/digitalocean.go @@ -24,6 +24,7 @@ import ( "github.com/digitalocean/godo" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -75,7 +76,7 @@ func (*SDConfig) Name() string { return "digitalocean" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -103,7 +104,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { d := &Discovery{ port: conf.Port, } @@ -125,10 +126,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { } d.Discovery = refresh.NewDiscovery( - logger, - "digitalocean", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "digitalocean", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + }, ) return d, nil } diff --git a/discovery/digitalocean/digitalocean_test.go b/discovery/digitalocean/digitalocean_test.go index a5da4b26e3..a959b312c1 100644 --- a/discovery/digitalocean/digitalocean_test.go +++ b/discovery/digitalocean/digitalocean_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" ) @@ -46,7 +47,7 @@ func TestDigitalOceanSDRefresh(t *testing.T) { cfg := DefaultSDConfig cfg.HTTPClientConfig.BearerToken = tokenID - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) endpoint, err := url.Parse(sdmock.Mock.Endpoint()) require.NoError(t, err) @@ -56,12 +57,12 @@ func TestDigitalOceanSDRefresh(t *testing.T) { tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 4, len(tg.Targets)) + require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/discovery.go b/discovery/discovery.go index 9dc010a09a..acc4c1efe9 100644 --- a/discovery/discovery.go +++ b/discovery/discovery.go @@ -18,6 +18,7 @@ import ( "reflect" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -42,6 +43,15 @@ type Discoverer interface { type DiscovererOptions struct { Logger log.Logger + // A registerer for the Discoverer's metrics. + // Some Discoverers may ignore this registerer and use the global one instead. + // For now this will work, because the Prometheus `main` function uses the global registry. + // However, in the future the Prometheus `main` function will be updated to not use the global registry. + // Hence, if a discoverer wants its metrics to be visible via the Prometheus executable's + // `/metrics` endpoint, it should use this explicit registerer. + // TODO(ptodev): Update this comment once the Prometheus `main` function does not use the global registry. + Registerer prometheus.Registerer + // Extra HTTP client options to expose to Discoverers. This field may be // ignored; Discoverer implementations must opt-in to reading it. HTTPClientOptions []config.HTTPClientOption diff --git a/discovery/dns/dns.go b/discovery/dns/dns.go index 96e07254f0..9b6bd6741e 100644 --- a/discovery/dns/dns.go +++ b/discovery/dns/dns.go @@ -42,35 +42,21 @@ const ( dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port" dnsMxRecordPrefix = model.MetaLabelPrefix + "dns_mx_record_" dnsMxRecordTargetLabel = dnsMxRecordPrefix + "target" + dnsNsRecordPrefix = model.MetaLabelPrefix + "dns_ns_record_" + dnsNsRecordTargetLabel = dnsNsRecordPrefix + "target" // Constants for instrumentation. namespace = "prometheus" ) -var ( - dnsSDLookupsCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "sd_dns_lookups_total", - Help: "The number of DNS-SD lookups.", - }) - dnsSDLookupFailuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "sd_dns_lookup_failures_total", - Help: "The number of DNS-SD lookup failures.", - }) - - // DefaultSDConfig is the default DNS SD configuration. - DefaultSDConfig = SDConfig{ - RefreshInterval: model.Duration(30 * time.Second), - Type: "SRV", - } -) +// DefaultSDConfig is the default DNS SD configuration. +var DefaultSDConfig = SDConfig{ + RefreshInterval: model.Duration(30 * time.Second), + Type: "SRV", +} func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(dnsSDLookupFailuresCount, dnsSDLookupsCount) } // SDConfig is the configuration for DNS based service discovery. @@ -86,7 +72,7 @@ func (*SDConfig) Name() string { return "dns" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(*c, opts.Logger), nil + return NewDiscovery(*c, opts.Logger, opts.Registerer) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -102,7 +88,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } switch strings.ToUpper(c.Type) { case "SRV": - case "A", "AAAA", "MX": + case "A", "AAAA", "MX", "NS": if c.Port == 0 { return errors.New("a port is required in DNS-SD configs for all record types except SRV") } @@ -116,16 +102,18 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // the Discoverer interface. type Discovery struct { *refresh.Discovery - names []string - port int - qtype uint16 - logger log.Logger + names []string + port int + qtype uint16 + logger log.Logger + dnsSDLookupsCount prometheus.Counter + dnsSDLookupFailuresCount prometheus.Counter lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery { +func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { if logger == nil { logger = log.NewNopLogger() } @@ -140,6 +128,8 @@ func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery { qtype = dns.TypeSRV case "MX": qtype = dns.TypeMX + case "NS": + qtype = dns.TypeNS } d := &Discovery{ names: conf.Names, @@ -147,14 +137,32 @@ func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery { port: conf.Port, logger: logger, lookupFn: lookupWithSearchPath, + dnsSDLookupsCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_dns_lookups_total", + Help: "The number of DNS-SD lookups.", + }), + dnsSDLookupFailuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_dns_lookup_failures_total", + Help: "The number of DNS-SD lookup failures.", + }), } + d.Discovery = refresh.NewDiscovery( - logger, - "dns", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "dns", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: prometheus.NewRegistry(), + Metrics: []prometheus.Collector{d.dnsSDLookupsCount, d.dnsSDLookupFailuresCount}, + }, ) - return d + + return d, nil } func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { @@ -187,9 +195,9 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targetgroup.Group) error { response, err := d.lookupFn(name, d.qtype, d.logger) - dnsSDLookupsCount.Inc() + d.dnsSDLookupsCount.Inc() if err != nil { - dnsSDLookupFailuresCount.Inc() + d.dnsSDLookupFailuresCount.Inc() return err } @@ -199,7 +207,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ } for _, record := range response.Answer { - var target, dnsSrvRecordTarget, dnsSrvRecordPort, dnsMxRecordTarget model.LabelValue + var target, dnsSrvRecordTarget, dnsSrvRecordPort, dnsMxRecordTarget, dnsNsRecordTarget model.LabelValue switch addr := record.(type) { case *dns.SRV: @@ -217,6 +225,13 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ addr.Mx = strings.TrimRight(addr.Mx, ".") target = hostPort(addr.Mx, d.port) + case *dns.NS: + dnsNsRecordTarget = model.LabelValue(addr.Ns) + + // Remove the final dot from rooted DNS names to make them look more usual. + addr.Ns = strings.TrimRight(addr.Ns, ".") + + target = hostPort(addr.Ns, d.port) case *dns.A: target = hostPort(addr.A.String(), d.port) case *dns.AAAA: @@ -234,6 +249,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ dnsSrvRecordTargetLabel: dnsSrvRecordTarget, dnsSrvRecordPortLabel: dnsSrvRecordPort, dnsMxRecordTargetLabel: dnsMxRecordTarget, + dnsNsRecordTargetLabel: dnsNsRecordTarget, }) } diff --git a/discovery/dns/dns_test.go b/discovery/dns/dns_test.go index 50b2860496..b8dd2efaac 100644 --- a/discovery/dns/dns_test.go +++ b/discovery/dns/dns_test.go @@ -22,6 +22,7 @@ import ( "github.com/go-kit/log" "github.com/miekg/dns" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/goleak" @@ -81,6 +82,7 @@ func TestDNS(t *testing.T) { "__meta_dns_srv_record_target": "", "__meta_dns_srv_record_port": "", "__meta_dns_mx_record_target": "", + "__meta_dns_ns_record_target": "", }, }, }, @@ -112,6 +114,7 @@ func TestDNS(t *testing.T) { "__meta_dns_srv_record_target": "", "__meta_dns_srv_record_port": "", "__meta_dns_mx_record_target": "", + "__meta_dns_ns_record_target": "", }, }, }, @@ -143,6 +146,7 @@ func TestDNS(t *testing.T) { "__meta_dns_srv_record_target": "db1.example.com.", "__meta_dns_srv_record_port": "3306", "__meta_dns_mx_record_target": "", + "__meta_dns_ns_record_target": "", }, { "__address__": "db2.example.com:3306", @@ -150,6 +154,7 @@ func TestDNS(t *testing.T) { "__meta_dns_srv_record_target": "db2.example.com.", "__meta_dns_srv_record_port": "3306", "__meta_dns_mx_record_target": "", + "__meta_dns_ns_record_target": "", }, }, }, @@ -180,6 +185,7 @@ func TestDNS(t *testing.T) { "__meta_dns_srv_record_target": "db1.example.com.", "__meta_dns_srv_record_port": "3306", "__meta_dns_mx_record_target": "", + "__meta_dns_ns_record_target": "", }, }, }, @@ -227,6 +233,7 @@ func TestDNS(t *testing.T) { "__meta_dns_srv_record_target": "", "__meta_dns_srv_record_port": "", "__meta_dns_mx_record_target": "smtp1.example.com.", + "__meta_dns_ns_record_target": "", }, { "__address__": "smtp2.example.com:25", @@ -234,6 +241,7 @@ func TestDNS(t *testing.T) { "__meta_dns_srv_record_target": "", "__meta_dns_srv_record_port": "", "__meta_dns_mx_record_target": "smtp2.example.com.", + "__meta_dns_ns_record_target": "", }, }, }, @@ -245,7 +253,8 @@ func TestDNS(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - sd := NewDiscovery(tc.config, nil) + sd, err := NewDiscovery(tc.config, nil, prometheus.NewRegistry()) + require.NoError(t, err) sd.lookupFn = tc.lookup tgs, err := sd.refresh(context.Background()) diff --git a/discovery/eureka/client_test.go b/discovery/eureka/client_test.go index f1451c3a9e..83f6fd5ff1 100644 --- a/discovery/eureka/client_test.go +++ b/discovery/eureka/client_test.go @@ -184,17 +184,17 @@ func TestFetchApps(t *testing.T) { apps, err := fetchApps(context.TODO(), ts.URL, &http.Client{}) require.NoError(t, err) - require.Equal(t, len(apps.Applications), 2) - require.Equal(t, apps.Applications[0].Name, "CONFIG-SERVICE") - require.Equal(t, apps.Applications[1].Name, "META-SERVICE") + require.Len(t, apps.Applications, 2) + require.Equal(t, "CONFIG-SERVICE", apps.Applications[0].Name) + require.Equal(t, "META-SERVICE", apps.Applications[1].Name) - require.Equal(t, len(apps.Applications[1].Instances), 2) - require.Equal(t, apps.Applications[1].Instances[0].InstanceID, "meta-service002.test.com:meta-service:8080") - require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[0].XMLName.Local, "project") - require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[0].Content, "meta-service") - require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[1].XMLName.Local, "management.port") - require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[1].Content, "8090") - require.Equal(t, apps.Applications[1].Instances[1].InstanceID, "meta-service001.test.com:meta-service:8080") + require.Len(t, apps.Applications[1].Instances, 2) + require.Equal(t, "meta-service002.test.com:meta-service:8080", apps.Applications[1].Instances[0].InstanceID) + require.Equal(t, "project", apps.Applications[1].Instances[0].Metadata.Items[0].XMLName.Local) + require.Equal(t, "meta-service", apps.Applications[1].Instances[0].Metadata.Items[0].Content) + require.Equal(t, "management.port", apps.Applications[1].Instances[0].Metadata.Items[1].XMLName.Local) + require.Equal(t, "8090", apps.Applications[1].Instances[0].Metadata.Items[1].Content) + require.Equal(t, "meta-service001.test.com:meta-service:8080", apps.Applications[1].Instances[1].InstanceID) } func Test500ErrorHttpResponse(t *testing.T) { diff --git a/discovery/eureka/eureka.go b/discovery/eureka/eureka.go index 5d9d8d552d..d3e4084e56 100644 --- a/discovery/eureka/eureka.go +++ b/discovery/eureka/eureka.go @@ -23,6 +23,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -80,7 +81,7 @@ func (*SDConfig) Name() string { return "eureka" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -117,7 +118,7 @@ type Discovery struct { } // NewDiscovery creates a new Eureka discovery for the given role. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd") if err != nil { return nil, err @@ -128,10 +129,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { server: conf.Server, } d.Discovery = refresh.NewDiscovery( - logger, - "eureka", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "eureka", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + }, ) return d, nil } diff --git a/discovery/eureka/eureka_test.go b/discovery/eureka/eureka_test.go index 0641aa7bf2..1fe3c710e1 100644 --- a/discovery/eureka/eureka_test.go +++ b/discovery/eureka/eureka_test.go @@ -20,6 +20,7 @@ import ( "net/http/httptest" "testing" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -35,7 +36,7 @@ func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, err Server: ts.URL, } - md, err := NewDiscovery(&conf, nil) + md, err := NewDiscovery(&conf, nil, prometheus.NewRegistry()) if err != nil { return nil, err } @@ -55,7 +56,7 @@ func TestEurekaSDHandleError(t *testing.T) { tgs, err := testUpdateServices(respHandler) require.EqualError(t, err, errTesting) - require.Equal(t, len(tgs), 0) + require.Empty(t, tgs) } func TestEurekaSDEmptyList(t *testing.T) { @@ -72,7 +73,7 @@ func TestEurekaSDEmptyList(t *testing.T) { ) tgs, err := testUpdateServices(respHandler) require.NoError(t, err) - require.Equal(t, len(tgs), 1) + require.Len(t, tgs, 1) } func TestEurekaSDSendGroup(t *testing.T) { @@ -232,11 +233,11 @@ func TestEurekaSDSendGroup(t *testing.T) { tgs, err := testUpdateServices(respHandler) require.NoError(t, err) - require.Equal(t, len(tgs), 1) + require.Len(t, tgs, 1) tg := tgs[0] - require.Equal(t, tg.Source, "eureka") - require.Equal(t, len(tg.Targets), 4) + require.Equal(t, "eureka", tg.Source) + require.Len(t, tg.Targets, 4) tgt := tg.Targets[0] require.Equal(t, tgt[model.AddressLabel], model.LabelValue("config-service001.test.com:8080")) diff --git a/discovery/file/file.go b/discovery/file/file.go index 60b63350f5..ef6ed1f5ee 100644 --- a/discovery/file/file.go +++ b/discovery/file/file.go @@ -39,24 +39,6 @@ import ( ) var ( - fileSDReadErrorsCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_file_read_errors_total", - Help: "The number of File-SD read errors.", - }) - fileSDScanDuration = prometheus.NewSummary( - prometheus.SummaryOpts{ - Name: "prometheus_sd_file_scan_duration_seconds", - Help: "The duration of the File-SD scan in seconds.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }) - fileSDTimeStamp = NewTimestampCollector() - fileWatcherErrorsCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_file_watcher_errors_total", - Help: "The number of File-SD errors caused by filesystem watch failures.", - }) - patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`) // DefaultSDConfig is the default file SD configuration. @@ -67,7 +49,6 @@ var ( func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(fileSDReadErrorsCount, fileSDScanDuration, fileSDTimeStamp, fileWatcherErrorsCount) } // SDConfig is the configuration for file based discovery. @@ -81,7 +62,7 @@ func (*SDConfig) Name() string { return "file" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger), nil + return NewDiscovery(c, opts.Logger, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -187,10 +168,17 @@ type Discovery struct { // This is used to detect deleted target groups. lastRefresh map[string]int logger log.Logger + + fileSDReadErrorsCount prometheus.Counter + fileSDScanDuration prometheus.Summary + fileWatcherErrorsCount prometheus.Counter + fileSDTimeStamp *TimestampCollector + + metricRegisterer discovery.MetricRegisterer } // NewDiscovery returns a new file discovery for the given paths. -func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery { +func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { if logger == nil { logger = log.NewNopLogger() } @@ -200,9 +188,35 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery { interval: time.Duration(conf.RefreshInterval), timestamps: make(map[string]float64), logger: logger, + fileSDReadErrorsCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_file_read_errors_total", + Help: "The number of File-SD read errors.", + }), + fileSDScanDuration: prometheus.NewSummary( + prometheus.SummaryOpts{ + Name: "prometheus_sd_file_scan_duration_seconds", + Help: "The duration of the File-SD scan in seconds.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }), + fileWatcherErrorsCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_file_watcher_errors_total", + Help: "The number of File-SD errors caused by filesystem watch failures.", + }), + fileSDTimeStamp: NewTimestampCollector(), } - fileSDTimeStamp.addDiscoverer(disc) - return disc + + disc.fileSDTimeStamp.addDiscoverer(disc) + + disc.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ + disc.fileSDReadErrorsCount, + disc.fileSDScanDuration, + disc.fileWatcherErrorsCount, + disc.fileSDTimeStamp, + }) + + return disc, nil } // listFiles returns a list of all files that match the configured patterns. @@ -239,10 +253,17 @@ func (d *Discovery) watchFiles() { // Run implements the Discoverer interface. func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + err := d.metricRegisterer.RegisterMetrics() + if err != nil { + level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error()) + return + } + defer d.metricRegisterer.UnregisterMetrics() + watcher, err := fsnotify.NewWatcher() if err != nil { level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err) - fileWatcherErrorsCount.Inc() + d.fileWatcherErrorsCount.Inc() return } d.watcher = watcher @@ -306,7 +327,7 @@ func (d *Discovery) stop() { done := make(chan struct{}) defer close(done) - fileSDTimeStamp.removeDiscoverer(d) + d.fileSDTimeStamp.removeDiscoverer(d) // Closing the watcher will deadlock unless all events and errors are drained. go func() { @@ -332,13 +353,13 @@ func (d *Discovery) stop() { func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) { t0 := time.Now() defer func() { - fileSDScanDuration.Observe(time.Since(t0).Seconds()) + d.fileSDScanDuration.Observe(time.Since(t0).Seconds()) }() ref := map[string]int{} for _, p := range d.listFiles() { tgroups, err := d.readFile(p) if err != nil { - fileSDReadErrorsCount.Inc() + d.fileSDReadErrorsCount.Inc() level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err) // Prevent deletion down below. diff --git a/discovery/file/file_test.go b/discovery/file/file_test.go index 76e1cebed9..c138fc8a95 100644 --- a/discovery/file/file_test.go +++ b/discovery/file/file_test.go @@ -24,6 +24,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/goleak" @@ -143,7 +144,7 @@ func (t *testRunner) run(files ...string) { ctx, cancel := context.WithCancel(context.Background()) t.cancelSD = cancel go func() { - NewDiscovery( + d, err := NewDiscovery( &SDConfig{ Files: files, // Setting a high refresh interval to make sure that the tests only @@ -151,7 +152,11 @@ func (t *testRunner) run(files ...string) { RefreshInterval: model.Duration(1 * time.Hour), }, nil, - ).Run(ctx, t.ch) + prometheus.NewRegistry(), + ) + require.NoError(t, err) + + d.Run(ctx, t.ch) }() } diff --git a/discovery/gce/gce.go b/discovery/gce/gce.go index fa05fbbf38..21a95ee39e 100644 --- a/discovery/gce/gce.go +++ b/discovery/gce/gce.go @@ -23,6 +23,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "golang.org/x/oauth2/google" "google.golang.org/api/compute/v1" @@ -86,7 +87,7 @@ func (*SDConfig) Name() string { return "gce" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(*c, opts.Logger) + return NewDiscovery(*c, opts.Logger, opts.Registerer) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -121,7 +122,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { d := &Discovery{ project: conf.Project, zone: conf.Zone, @@ -141,10 +142,13 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { d.isvc = compute.NewInstancesService(d.svc) d.Discovery = refresh.NewDiscovery( - logger, - "gce", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "gce", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + }, ) return d, nil } diff --git a/discovery/hetzner/hcloud_test.go b/discovery/hetzner/hcloud_test.go index a4f19cfddd..10b799037a 100644 --- a/discovery/hetzner/hcloud_test.go +++ b/discovery/hetzner/hcloud_test.go @@ -48,12 +48,12 @@ func TestHCloudSDRefresh(t *testing.T) { targetGroups, err := d.refresh(context.Background()) require.NoError(t, err) - require.Equal(t, 1, len(targetGroups)) + require.Len(t, targetGroups, 1) targetGroup := targetGroups[0] require.NotNil(t, targetGroup, "targetGroup should not be nil") require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil") - require.Equal(t, 3, len(targetGroup.Targets)) + require.Len(t, targetGroup.Targets, 3) for i, labelSet := range []model.LabelSet{ { diff --git a/discovery/hetzner/hetzner.go b/discovery/hetzner/hetzner.go index c3f7ec39c3..9d3e6aa65d 100644 --- a/discovery/hetzner/hetzner.go +++ b/discovery/hetzner/hetzner.go @@ -21,6 +21,7 @@ import ( "github.com/go-kit/log" "github.com/hetznercloud/hcloud-go/v2/hcloud" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -67,7 +68,7 @@ func (*SDConfig) Name() string { return "hetzner" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Registerer) } type refresher interface { @@ -127,17 +128,20 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) { r, err := newRefresher(conf, logger) if err != nil { return nil, err } return refresh.NewDiscovery( - logger, - "hetzner", - time.Duration(conf.RefreshInterval), - r.refresh, + refresh.Options{ + Logger: logger, + Mech: "hetzner", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: r.refresh, + Registry: reg, + }, ), nil } diff --git a/discovery/hetzner/robot_test.go b/discovery/hetzner/robot_test.go index f78a0bbda1..abee5fea90 100644 --- a/discovery/hetzner/robot_test.go +++ b/discovery/hetzner/robot_test.go @@ -47,12 +47,12 @@ func TestRobotSDRefresh(t *testing.T) { targetGroups, err := d.refresh(context.Background()) require.NoError(t, err) - require.Equal(t, 1, len(targetGroups)) + require.Len(t, targetGroups, 1) targetGroup := targetGroups[0] require.NotNil(t, targetGroup, "targetGroup should not be nil") require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil") - require.Equal(t, 2, len(targetGroup.Targets)) + require.Len(t, targetGroup.Targets, 2) for i, labelSet := range []model.LabelSet{ { @@ -98,5 +98,5 @@ func TestRobotSDRefreshHandleError(t *testing.T) { require.Error(t, err) require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error()) - require.Equal(t, 0, len(targetGroups)) + require.Empty(t, targetGroups) } diff --git a/discovery/http/http.go b/discovery/http/http.go index 2980d7efda..c12fdb26d2 100644 --- a/discovery/http/http.go +++ b/discovery/http/http.go @@ -45,17 +45,10 @@ var ( } userAgent = fmt.Sprintf("Prometheus/%s", version.Version) matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`) - - failuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_http_failures_total", - Help: "Number of HTTP service discovery refresh failures.", - }) ) func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(failuresCount) } // SDConfig is the configuration for HTTP based discovery. @@ -70,7 +63,7 @@ func (*SDConfig) Name() string { return "http" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger, opts.HTTPClientOptions) + return NewDiscovery(c, opts.Logger, opts.HTTPClientOptions, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -112,10 +105,11 @@ type Discovery struct { client *http.Client refreshInterval time.Duration tgLastLength int + failuresCount prometheus.Counter } // NewDiscovery returns a new HTTP discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption, reg prometheus.Registerer) (*Discovery, error) { if logger == nil { logger = log.NewNopLogger() } @@ -130,13 +124,22 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPCli url: conf.URL, client: client, refreshInterval: time.Duration(conf.RefreshInterval), // Stored to be sent as headers. + failuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_http_failures_total", + Help: "Number of HTTP service discovery refresh failures.", + }), } d.Discovery = refresh.NewDiscovery( - logger, - "http", - time.Duration(conf.RefreshInterval), - d.Refresh, + refresh.Options{ + Logger: logger, + Mech: "http", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.Refresh, + Registry: reg, + Metrics: []prometheus.Collector{d.failuresCount}, + }, ) return d, nil } @@ -152,7 +155,7 @@ func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) { resp, err := d.client.Do(req.WithContext(ctx)) if err != nil { - failuresCount.Inc() + d.failuresCount.Inc() return nil, err } defer func() { @@ -161,31 +164,31 @@ func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) { }() if resp.StatusCode != http.StatusOK { - failuresCount.Inc() + d.failuresCount.Inc() return nil, fmt.Errorf("server returned HTTP status %s", resp.Status) } if !matchContentType.MatchString(strings.TrimSpace(resp.Header.Get("Content-Type"))) { - failuresCount.Inc() + d.failuresCount.Inc() return nil, fmt.Errorf("unsupported content type %q", resp.Header.Get("Content-Type")) } b, err := io.ReadAll(resp.Body) if err != nil { - failuresCount.Inc() + d.failuresCount.Inc() return nil, err } var targetGroups []*targetgroup.Group if err := json.Unmarshal(b, &targetGroups); err != nil { - failuresCount.Inc() + d.failuresCount.Inc() return nil, err } for i, tg := range targetGroups { if tg == nil { - failuresCount.Inc() + d.failuresCount.Inc() err = errors.New("nil target group item found") return nil, err } diff --git a/discovery/http/http_test.go b/discovery/http/http_test.go index a284e7f361..164719e900 100644 --- a/discovery/http/http_test.go +++ b/discovery/http/http_test.go @@ -41,7 +41,7 @@ func TestHTTPValidRefresh(t *testing.T) { RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry()) require.NoError(t, err) ctx := context.Background() @@ -62,8 +62,8 @@ func TestHTTPValidRefresh(t *testing.T) { Source: urlSource(ts.URL+"/http_sd.good.json", 0), }, } - require.Equal(t, tgs, expectedTargets) - require.Equal(t, 0.0, getFailureCount()) + require.Equal(t, expectedTargets, tgs) + require.Equal(t, 0.0, getFailureCount(d.failuresCount)) } func TestHTTPInvalidCode(t *testing.T) { @@ -79,13 +79,13 @@ func TestHTTPInvalidCode(t *testing.T) { RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry()) require.NoError(t, err) ctx := context.Background() _, err = d.Refresh(ctx) require.EqualError(t, err, "server returned HTTP status 400 Bad Request") - require.Equal(t, 1.0, getFailureCount()) + require.Equal(t, 1.0, getFailureCount(d.failuresCount)) } func TestHTTPInvalidFormat(t *testing.T) { @@ -101,18 +101,16 @@ func TestHTTPInvalidFormat(t *testing.T) { RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry()) require.NoError(t, err) ctx := context.Background() _, err = d.Refresh(ctx) require.EqualError(t, err, `unsupported content type "text/plain; charset=utf-8"`) - require.Equal(t, 1.0, getFailureCount()) + require.Equal(t, 1.0, getFailureCount(d.failuresCount)) } -var lastFailureCount float64 - -func getFailureCount() float64 { +func getFailureCount(failuresCount prometheus.Counter) float64 { failureChan := make(chan prometheus.Metric) go func() { @@ -129,10 +127,7 @@ func getFailureCount() float64 { metric.Write(&counter) } - // account for failures in prior tests - count := *counter.Counter.Value - lastFailureCount - lastFailureCount = *counter.Counter.Value - return count + return *counter.Counter.Value } func TestContentTypeRegex(t *testing.T) { @@ -417,7 +412,7 @@ func TestSourceDisappeared(t *testing.T) { URL: ts.URL, RefreshInterval: model.Duration(1 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry()) require.NoError(t, err) for _, test := range cases { ctx := context.Background() diff --git a/discovery/ionos/ionos.go b/discovery/ionos/ionos.go index 3afed8d799..36623745ab 100644 --- a/discovery/ionos/ionos.go +++ b/discovery/ionos/ionos.go @@ -23,6 +23,8 @@ import ( "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/refresh" + + "github.com/prometheus/client_golang/prometheus" ) const ( @@ -41,7 +43,7 @@ func init() { type Discovery struct{} // NewDiscovery returns a new refresh.Discovery for IONOS Cloud. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) { if conf.ionosEndpoint == "" { conf.ionosEndpoint = "https://api.ionos.com" } @@ -52,10 +54,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) } return refresh.NewDiscovery( - logger, - "ionos", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "ionos", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + }, ), nil } @@ -86,7 +91,7 @@ func (c SDConfig) Name() string { // NewDiscoverer returns a new discovery.Discoverer for IONOS Cloud. func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(&c, options.Logger) + return NewDiscovery(&c, options.Logger, options.Registerer) } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/discovery/ionos/server_test.go b/discovery/ionos/server_test.go index 92f2a96f9d..30f358e325 100644 --- a/discovery/ionos/server_test.go +++ b/discovery/ionos/server_test.go @@ -48,12 +48,12 @@ func TestIONOSServerRefresh(t *testing.T) { tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 2, len(tg.Targets)) + require.Len(t, tg.Targets, 2) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 708e229a2f..512d775523 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -22,6 +22,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" @@ -30,12 +31,6 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" ) -var ( - epAddCount = eventCount.WithLabelValues("endpoints", "add") - epUpdateCount = eventCount.WithLabelValues("endpoints", "update") - epDeleteCount = eventCount.WithLabelValues("endpoints", "delete") -) - // Endpoints discovers new endpoint targets. type Endpoints struct { logger log.Logger @@ -54,10 +49,19 @@ type Endpoints struct { } // NewEndpoints returns a new endpoints discovery. -func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer) *Endpoints { +func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints { if l == nil { l = log.NewNopLogger() } + + epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd) + epUpdateCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleUpdate) + epDeleteCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleDelete) + + svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd) + svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate) + svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete) + e := &Endpoints{ logger: l, endpointsInf: eps, @@ -68,7 +72,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca podStore: pod.GetStore(), nodeInf: node, withNodeMetadata: node != nil, - queue: workqueue.NewNamed("endpoints"), + queue: workqueue.NewNamed(RoleEndpoint.String()), } _, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index a168623804..21095df4af 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -22,6 +22,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" @@ -33,12 +34,6 @@ import ( "github.com/prometheus/prometheus/util/strutil" ) -var ( - epslAddCount = eventCount.WithLabelValues("endpointslice", "add") - epslUpdateCount = eventCount.WithLabelValues("endpointslice", "update") - epslDeleteCount = eventCount.WithLabelValues("endpointslice", "delete") -) - // EndpointSlice discovers new endpoint targets. type EndpointSlice struct { logger log.Logger @@ -57,10 +52,19 @@ type EndpointSlice struct { } // NewEndpointSlice returns a new endpointslice discovery. -func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer) *EndpointSlice { +func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice { if l == nil { l = log.NewNopLogger() } + + epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd) + epslUpdateCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleUpdate) + epslDeleteCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleDelete) + + svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd) + svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate) + svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete) + e := &EndpointSlice{ logger: l, endpointSliceInf: eps, @@ -71,7 +75,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod podStore: pod.GetStore(), nodeInf: node, withNodeMetadata: node != nil, - queue: workqueue.NewNamed("endpointSlice"), + queue: workqueue.NewNamed(RoleEndpointSlice.String()), } _, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ diff --git a/discovery/kubernetes/endpointslice_adaptor_test.go b/discovery/kubernetes/endpointslice_adaptor_test.go index e564910936..1ee3337193 100644 --- a/discovery/kubernetes/endpointslice_adaptor_test.go +++ b/discovery/kubernetes/endpointslice_adaptor_test.go @@ -29,7 +29,7 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) { require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace()) require.Equal(t, endpointSlice.AddressType, v1.AddressType(adaptor.addressType())) require.Equal(t, endpointSlice.Labels, adaptor.labels()) - require.Equal(t, endpointSlice.Labels[v1.LabelServiceName], "testendpoints") + require.Equal(t, "testendpoints", endpointSlice.Labels[v1.LabelServiceName]) for i, endpointAdaptor := range adaptor.endpoints() { require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses()) @@ -57,7 +57,7 @@ func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) { require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace()) require.Equal(t, endpointSlice.AddressType, v1beta1.AddressType(adaptor.addressType())) require.Equal(t, endpointSlice.Labels, adaptor.labels()) - require.Equal(t, endpointSlice.Labels[v1beta1.LabelServiceName], "testendpoints") + require.Equal(t, "testendpoints", endpointSlice.Labels[v1beta1.LabelServiceName]) for i, endpointAdaptor := range adaptor.endpoints() { require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses()) diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go index fee4cc7207..7b6366b257 100644 --- a/discovery/kubernetes/ingress.go +++ b/discovery/kubernetes/ingress.go @@ -21,6 +21,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" v1 "k8s.io/api/networking/v1" "k8s.io/api/networking/v1beta1" @@ -30,12 +31,6 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" ) -var ( - ingressAddCount = eventCount.WithLabelValues("ingress", "add") - ingressUpdateCount = eventCount.WithLabelValues("ingress", "update") - ingressDeleteCount = eventCount.WithLabelValues("ingress", "delete") -) - // Ingress implements discovery of Kubernetes ingress. type Ingress struct { logger log.Logger @@ -45,8 +40,18 @@ type Ingress struct { } // NewIngress returns a new ingress discovery. -func NewIngress(l log.Logger, inf cache.SharedInformer) *Ingress { - s := &Ingress{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("ingress")} +func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress { + ingressAddCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleAdd) + ingressUpdateCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleUpdate) + ingressDeleteCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleDelete) + + s := &Ingress{ + logger: l, + informer: inf, + store: inf.GetStore(), + queue: workqueue.NewNamed(RoleIngress.String()), + } + _, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { ingressAddCount.Inc() diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index 7bd96652f9..5c5f3dfb62 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -58,24 +58,14 @@ import ( const ( // metaLabelPrefix is the meta prefix used for all meta labels. // in this discovery. - metaLabelPrefix = model.MetaLabelPrefix + "kubernetes_" - namespaceLabel = metaLabelPrefix + "namespace" - metricsNamespace = "prometheus_sd_kubernetes" - presentValue = model.LabelValue("true") + metaLabelPrefix = model.MetaLabelPrefix + "kubernetes_" + namespaceLabel = metaLabelPrefix + "namespace" + presentValue = model.LabelValue("true") ) var ( // Http header. userAgent = fmt.Sprintf("Prometheus/%s", version.Version) - // Custom events metric. - eventCount = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: metricsNamespace, - Name: "events_total", - Help: "The number of Kubernetes events handled.", - }, - []string{"role", "event"}, - ) // DefaultSDConfig is the default Kubernetes SD configuration. DefaultSDConfig = SDConfig{ HTTPClientConfig: config.DefaultHTTPClientConfig, @@ -84,15 +74,6 @@ var ( func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(eventCount) - // Initialize metric vectors. - for _, role := range []string{"endpointslice", "endpoints", "node", "pod", "service", "ingress"} { - for _, evt := range []string{"add", "delete", "update"} { - eventCount.WithLabelValues(role, evt) - } - } - (&clientGoRequestMetricAdapter{}).Register(prometheus.DefaultRegisterer) - (&clientGoWorkqueueMetricsProvider{}).Register(prometheus.DefaultRegisterer) } // Role is role of the service in Kubernetes. @@ -121,6 +102,16 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { } } +func (c Role) String() string { + return string(c) +} + +const ( + MetricLabelRoleAdd = "add" + MetricLabelRoleDelete = "delete" + MetricLabelRoleUpdate = "update" +) + // SDConfig is the configuration for Kubernetes service discovery. type SDConfig struct { APIServer config.URL `yaml:"api_server,omitempty"` @@ -137,7 +128,7 @@ func (*SDConfig) Name() string { return "kubernetes" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return New(opts.Logger, c) + return New(opts.Logger, opts.Registerer, c) } // SetDirectory joins any relative file paths with dir. @@ -274,6 +265,8 @@ type Discovery struct { selectors roleSelector ownNamespace string attachMetadata AttachMetadataConfig + eventCount *prometheus.CounterVec + metricRegisterer discovery.MetricRegisterer } func (d *Discovery) getNamespaces() []string { @@ -292,7 +285,7 @@ func (d *Discovery) getNamespaces() []string { } // New creates a new Kubernetes discovery for the given role. -func New(l log.Logger, conf *SDConfig) (*Discovery, error) { +func New(l log.Logger, reg prometheus.Registerer, conf *SDConfig) (*Discovery, error) { if l == nil { l = log.NewNopLogger() } @@ -346,7 +339,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { return nil, err } - return &Discovery{ + d := &Discovery{ client: c, logger: l, role: conf.Role, @@ -355,7 +348,37 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { selectors: mapSelector(conf.Selectors), ownNamespace: ownNamespace, attachMetadata: conf.AttachMetadata, - }, nil + eventCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: discovery.KubernetesMetricsNamespace, + Name: "events_total", + Help: "The number of Kubernetes events handled.", + }, + []string{"role", "event"}, + ), + } + + d.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{d.eventCount}) + + // Initialize metric vectors. + for _, role := range []string{ + RoleEndpointSlice.String(), + RoleEndpoint.String(), + RoleNode.String(), + RolePod.String(), + RoleService.String(), + RoleIngress.String(), + } { + for _, evt := range []string{ + MetricLabelRoleAdd, + MetricLabelRoleDelete, + MetricLabelRoleUpdate, + } { + d.eventCount.WithLabelValues(role, evt) + } + } + + return d, nil } func mapSelector(rawSelector []SelectorConfig) roleSelector { @@ -391,6 +414,14 @@ const resyncDisabled = 0 // Run implements the discoverer interface. func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { d.Lock() + + err := d.metricRegisterer.RegisterMetrics() + if err != nil { + level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error()) + return + } + defer d.metricRegisterer.UnregisterMetrics() + namespaces := d.getNamespaces() switch d.role { @@ -482,6 +513,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), nodeInf, + d.eventCount, ) d.discoverers = append(d.discoverers, eps) go eps.endpointSliceInf.Run(ctx.Done()) @@ -541,6 +573,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), nodeInf, + d.eventCount, ) d.discoverers = append(d.discoverers, eps) go eps.endpointsInf.Run(ctx.Done()) @@ -572,6 +605,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { log.With(d.logger, "role", "pod"), d.newPodsByNodeInformer(plw), nodeInformer, + d.eventCount, ) d.discoverers = append(d.discoverers, pod) go pod.podInf.Run(ctx.Done()) @@ -594,6 +628,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { svc := NewService( log.With(d.logger, "role", "service"), cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), + d.eventCount, ) d.discoverers = append(d.discoverers, svc) go svc.informer.Run(ctx.Done()) @@ -651,13 +686,14 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { ingress := NewIngress( log.With(d.logger, "role", "ingress"), informer, + d.eventCount, ) d.discoverers = append(d.discoverers, ingress) go ingress.informer.Run(ctx.Done()) } case RoleNode: nodeInformer := d.newNodeInformer(ctx) - node := NewNode(log.With(d.logger, "role", "node"), nodeInformer) + node := NewNode(log.With(d.logger, "role", "node"), nodeInformer, d.eventCount) d.discoverers = append(d.discoverers, node) go node.informer.Run(ctx.Done()) default: diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index d0ed4c6ca1..71c937e944 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -29,6 +29,8 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/testutil" @@ -49,13 +51,25 @@ func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery) fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: k8sVer} - return &Discovery{ + d := &Discovery{ client: clientset, logger: log.NewNopLogger(), role: role, namespaceDiscovery: &nsDiscovery, ownNamespace: "own-ns", - }, clientset + eventCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: discovery.KubernetesMetricsNamespace, + Name: "events_total", + Help: "The number of Kubernetes events handled.", + }, + []string{"role", "event"}, + ), + } + + d.metricRegisterer = discovery.NewMetricRegisterer(prometheus.NewRegistry(), []prometheus.Collector{d.eventCount}) + + return d, clientset } // makeDiscoveryWithMetadata creates a kubernetes.Discovery instance with the specified metadata config. diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index b188a3ceb1..74d87e22c4 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -22,6 +22,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" @@ -35,12 +36,6 @@ const ( NodeLegacyHostIP = "LegacyHostIP" ) -var ( - nodeAddCount = eventCount.WithLabelValues("node", "add") - nodeUpdateCount = eventCount.WithLabelValues("node", "update") - nodeDeleteCount = eventCount.WithLabelValues("node", "delete") -) - // Node discovers Kubernetes nodes. type Node struct { logger log.Logger @@ -50,11 +45,22 @@ type Node struct { } // NewNode returns a new node discovery. -func NewNode(l log.Logger, inf cache.SharedInformer) *Node { +func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node { if l == nil { l = log.NewNopLogger() } - n := &Node{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("node")} + + nodeAddCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleAdd) + nodeUpdateCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleUpdate) + nodeDeleteCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleDelete) + + n := &Node{ + logger: l, + informer: inf, + store: inf.GetStore(), + queue: workqueue.NewNamed(RoleNode.String()), + } + _, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { nodeAddCount.Inc() diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 88da7bba69..615717c138 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -23,6 +23,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,12 +35,6 @@ import ( const nodeIndex = "node" -var ( - podAddCount = eventCount.WithLabelValues("pod", "add") - podUpdateCount = eventCount.WithLabelValues("pod", "update") - podDeleteCount = eventCount.WithLabelValues("pod", "delete") -) - // Pod discovers new pod targets. type Pod struct { podInf cache.SharedIndexInformer @@ -51,18 +46,22 @@ type Pod struct { } // NewPod creates a new pod discovery. -func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer) *Pod { +func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod { if l == nil { l = log.NewNopLogger() } + podAddCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleAdd) + podDeleteCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleDelete) + podUpdateCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleUpdate) + p := &Pod{ podInf: pods, nodeInf: nodes, withNodeMetadata: nodes != nil, store: pods.GetStore(), logger: l, - queue: workqueue.NewNamed("pod"), + queue: workqueue.NewNamed(RolePod.String()), } _, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index 9fcc6644c3..51204a5a1a 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -22,6 +22,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" @@ -30,12 +31,6 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" ) -var ( - svcAddCount = eventCount.WithLabelValues("service", "add") - svcUpdateCount = eventCount.WithLabelValues("service", "update") - svcDeleteCount = eventCount.WithLabelValues("service", "delete") -) - // Service implements discovery of Kubernetes services. type Service struct { logger log.Logger @@ -45,11 +40,22 @@ type Service struct { } // NewService returns a new service discovery. -func NewService(l log.Logger, inf cache.SharedInformer) *Service { +func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service { if l == nil { l = log.NewNopLogger() } - s := &Service{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("service")} + + svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd) + svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate) + svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete) + + s := &Service{ + logger: l, + informer: inf, + store: inf.GetStore(), + queue: workqueue.NewNamed(RoleService.String()), + } + _, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { svcAddCount.Inc() diff --git a/discovery/legacymanager/manager.go b/discovery/legacymanager/manager.go index 74c544e726..9c80f305a8 100644 --- a/discovery/legacymanager/manager.go +++ b/discovery/legacymanager/manager.go @@ -28,48 +28,6 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" ) -var ( - failedConfigs = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "prometheus_sd_failed_configs", - Help: "Current number of service discovery configurations that failed to load.", - }, - []string{"name"}, - ) - discoveredTargets = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "prometheus_sd_discovered_targets", - Help: "Current number of discovered targets.", - }, - []string{"name", "config"}, - ) - receivedUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_received_updates_total", - Help: "Total number of update events received from the SD providers.", - }, - []string{"name"}, - ) - delayedUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_updates_delayed_total", - Help: "Total number of update events that couldn't be sent immediately.", - }, - []string{"name"}, - ) - sentUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_updates_total", - Help: "Total number of update events sent to the SD consumers.", - }, - []string{"name"}, - ) -) - -func RegisterMetrics() { - prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates) -} - type poolKey struct { setName string provider string @@ -84,7 +42,7 @@ type provider struct { } // NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager)) *Manager { +func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, options ...func(*Manager)) *Manager { if logger == nil { logger = log.NewNopLogger() } @@ -96,10 +54,21 @@ func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager ctx: ctx, updatert: 5 * time.Second, triggerSend: make(chan struct{}, 1), + registerer: registerer, } for _, option := range options { option(mgr) } + + // Register the metrics. + // We have to do this after setting all options, so that the name of the Manager is set. + if metrics, err := discovery.NewMetrics(registerer, mgr.name); err == nil { + mgr.metrics = metrics + } else { + level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) + return nil + } + return mgr } @@ -135,6 +104,11 @@ type Manager struct { // The triggerSend channel signals to the manager that new updates have been received from providers. triggerSend chan struct{} + + // A registerer for all service discovery metrics. + registerer prometheus.Registerer + + metrics *discovery.Metrics } // Run starts the background processing. @@ -157,7 +131,7 @@ func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error { for pk := range m.targets { if _, ok := cfg[pk.setName]; !ok { - discoveredTargets.DeleteLabelValues(m.name, pk.setName) + m.metrics.DiscoveredTargets.DeleteLabelValues(m.name, pk.setName) } } m.cancelDiscoverers() @@ -168,9 +142,9 @@ func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error { failedCount := 0 for name, scfg := range cfg { failedCount += m.registerProviders(scfg, name) - discoveredTargets.WithLabelValues(m.name, name).Set(0) + m.metrics.DiscoveredTargets.WithLabelValues(name).Set(0) } - failedConfigs.WithLabelValues(m.name).Set(float64(failedCount)) + m.metrics.FailedConfigs.Set(float64(failedCount)) for _, prov := range m.providers { m.startProvider(m.ctx, prov) @@ -207,7 +181,7 @@ func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targ case <-ctx.Done(): return case tgs, ok := <-updates: - receivedUpdates.WithLabelValues(m.name).Inc() + m.metrics.ReceivedUpdates.Inc() if !ok { level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) return @@ -236,11 +210,11 @@ func (m *Manager) sender() { case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker. select { case <-m.triggerSend: - sentUpdates.WithLabelValues(m.name).Inc() + m.metrics.SentUpdates.Inc() select { case m.syncCh <- m.allGroups(): default: - delayedUpdates.WithLabelValues(m.name).Inc() + m.metrics.DelayedUpdates.Inc() level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") select { case m.triggerSend <- struct{}{}: @@ -288,7 +262,7 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group { } } for setName, v := range n { - discoveredTargets.WithLabelValues(m.name, setName).Set(float64(v)) + m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v)) } return tSets } @@ -309,7 +283,8 @@ func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int } typ := cfg.Name() d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ, "config", setName), + Logger: log.With(m.logger, "discovery", typ, "config", setName), + Registerer: m.registerer, }) if err != nil { level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go index 13b84e6e36..7a2e8feea4 100644 --- a/discovery/legacymanager/manager_test.go +++ b/discovery/legacymanager/manager_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" client_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -664,7 +665,8 @@ func TestTargetUpdatesOrder(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond var totalUpdatesCount int @@ -746,7 +748,8 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -774,7 +777,8 @@ func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { func TestDiscovererConfigs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -798,7 +802,8 @@ func TestDiscovererConfigs(t *testing.T) { func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -837,7 +842,8 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, nil) + discoveryManager := NewManager(ctx, nil, prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -868,7 +874,8 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { } ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -893,7 +900,8 @@ func (e errorConfig) NewDiscoverer(discovery.DiscovererOptions) (discovery.Disco func TestGaugeFailedConfigs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -907,7 +915,7 @@ func TestGaugeFailedConfigs(t *testing.T) { discoveryManager.ApplyConfig(c) <-discoveryManager.SyncCh() - failedCount := client_testutil.ToFloat64(failedConfigs) + failedCount := client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs) if failedCount != 3 { t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount) } @@ -918,7 +926,7 @@ func TestGaugeFailedConfigs(t *testing.T) { discoveryManager.ApplyConfig(c) <-discoveryManager.SyncCh() - failedCount = client_testutil.ToFloat64(failedConfigs) + failedCount = client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs) if failedCount != 0 { t.Fatalf("Expected to get no failed config, got: %v", failedCount) } @@ -1049,7 +1057,8 @@ func TestCoordinationWithReceiver(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - mgr := NewManager(ctx, nil) + mgr := NewManager(ctx, nil, prometheus.NewRegistry()) + require.NotNil(t, mgr) mgr.updatert = updateDelay go mgr.Run() diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index a5e047b948..38a5cdad4b 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -67,24 +67,15 @@ const ( ) // DefaultSDConfig is the default Linode SD configuration. -var ( - DefaultSDConfig = SDConfig{ - TagSeparator: ",", - Port: 80, - RefreshInterval: model.Duration(60 * time.Second), - HTTPClientConfig: config.DefaultHTTPClientConfig, - } - - failuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_linode_failures_total", - Help: "Number of Linode service discovery refresh failures.", - }) -) +var DefaultSDConfig = SDConfig{ + TagSeparator: ",", + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), + HTTPClientConfig: config.DefaultHTTPClientConfig, +} func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(failuresCount) } // SDConfig is the configuration for Linode based service discovery. @@ -101,7 +92,7 @@ func (*SDConfig) Name() string { return "linode" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -131,16 +122,22 @@ type Discovery struct { pollCount int lastResults []*targetgroup.Group eventPollingEnabled bool + failuresCount prometheus.Counter } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { d := &Discovery{ port: conf.Port, tagSeparator: conf.TagSeparator, pollCount: 0, lastRefreshTimestamp: time.Now().UTC(), eventPollingEnabled: true, + failuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_linode_failures_total", + Help: "Number of Linode service discovery refresh failures.", + }), } rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "linode_sd") @@ -158,10 +155,14 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { d.client = &client d.Discovery = refresh.NewDiscovery( - logger, - "linode", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "linode", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + Metrics: []prometheus.Collector{d.failuresCount}, + }, ) return d, nil } @@ -222,14 +223,14 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro // Gather all linode instances. instances, err := d.client.ListInstances(ctx, &linodego.ListOptions{PageSize: 500}) if err != nil { - failuresCount.Inc() + d.failuresCount.Inc() return nil, err } // Gather detailed IP address info for all IPs on all linode instances. detailedIPs, err := d.client.ListIPAddresses(ctx, &linodego.ListOptions{PageSize: 500}) if err != nil { - failuresCount.Inc() + d.failuresCount.Inc() return nil, err } diff --git a/discovery/linode/linode_test.go b/discovery/linode/linode_test.go index 988313b702..536b12090c 100644 --- a/discovery/linode/linode_test.go +++ b/discovery/linode/linode_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -52,7 +53,7 @@ func TestLinodeSDRefresh(t *testing.T) { Credentials: tokenID, Type: "Bearer", } - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) endpoint, err := url.Parse(sdmock.Mock.Endpoint()) require.NoError(t, err) @@ -61,12 +62,12 @@ func TestLinodeSDRefresh(t *testing.T) { tgs, err := d.refresh(context.Background()) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 4, len(tg.Targets)) + require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/manager.go b/discovery/manager.go index 86439d2c95..67e326c41a 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -28,48 +28,6 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" ) -var ( - failedConfigs = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "prometheus_sd_failed_configs", - Help: "Current number of service discovery configurations that failed to load.", - }, - []string{"name"}, - ) - discoveredTargets = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "prometheus_sd_discovered_targets", - Help: "Current number of discovered targets.", - }, - []string{"name", "config"}, - ) - receivedUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_received_updates_total", - Help: "Total number of update events received from the SD providers.", - }, - []string{"name"}, - ) - delayedUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_updates_delayed_total", - Help: "Total number of update events that couldn't be sent immediately.", - }, - []string{"name"}, - ) - sentUpdates = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_updates_total", - Help: "Total number of update events sent to the SD consumers.", - }, - []string{"name"}, - ) -) - -func RegisterMetrics() { - prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates) -} - type poolKey struct { setName string provider string @@ -107,7 +65,7 @@ func (p *Provider) Config() interface{} { } // NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager)) *Manager { +func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, options ...func(*Manager)) *Manager { if logger == nil { logger = log.NewNopLogger() } @@ -118,10 +76,21 @@ func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager ctx: ctx, updatert: 5 * time.Second, triggerSend: make(chan struct{}, 1), + registerer: registerer, } for _, option := range options { option(mgr) } + + // Register the metrics. + // We have to do this after setting all options, so that the name of the Manager is set. + if metrics, err := NewMetrics(registerer, mgr.name); err == nil { + mgr.metrics = metrics + } else { + level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) + return nil + } + return mgr } @@ -170,6 +139,11 @@ type Manager struct { // lastProvider counts providers registered during Manager's lifetime. lastProvider uint + + // A registerer for all service discovery metrics. + registerer prometheus.Registerer + + metrics *Metrics } // Providers returns the currently configured SD providers. @@ -200,7 +174,7 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { for name, scfg := range cfg { failedCount += m.registerProviders(scfg, name) } - failedConfigs.WithLabelValues(m.name).Set(float64(failedCount)) + m.metrics.FailedConfigs.Set(float64(failedCount)) var ( wg sync.WaitGroup @@ -230,13 +204,13 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { // Remove obsolete subs' targets. if _, ok := prov.newSubs[s]; !ok { delete(m.targets, poolKey{s, prov.name}) - discoveredTargets.DeleteLabelValues(m.name, s) + m.metrics.DiscoveredTargets.DeleteLabelValues(m.name, s) } } // Set metrics and targets for new subs. for s := range prov.newSubs { if _, ok := prov.subs[s]; !ok { - discoveredTargets.WithLabelValues(m.name, s).Set(0) + m.metrics.DiscoveredTargets.WithLabelValues(s).Set(0) } if l := len(refTargets); l > 0 { m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l) @@ -316,7 +290,7 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ case <-ctx.Done(): return case tgs, ok := <-updates: - receivedUpdates.WithLabelValues(m.name).Inc() + m.metrics.ReceivedUpdates.Inc() if !ok { level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) // Wait for provider cancellation to ensure targets are cleaned up when expected. @@ -349,11 +323,11 @@ func (m *Manager) sender() { case <-ticker.C: // Some discoverers send updates too often, so we throttle these with the ticker. select { case <-m.triggerSend: - sentUpdates.WithLabelValues(m.name).Inc() + m.metrics.SentUpdates.Inc() select { case m.syncCh <- m.allGroups(): default: - delayedUpdates.WithLabelValues(m.name).Inc() + m.metrics.DelayedUpdates.Inc() level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") select { case m.triggerSend <- struct{}{}: @@ -405,7 +379,7 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group { } } for setName, v := range n { - discoveredTargets.WithLabelValues(m.name, setName).Set(float64(v)) + m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v)) } return tSets } @@ -428,6 +402,7 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { d, err := cfg.NewDiscoverer(DiscovererOptions{ Logger: log.With(m.logger, "discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, + Registerer: m.registerer, }) if err != nil { level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 5371608112..f22de75a46 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" client_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -664,7 +665,8 @@ func TestTargetUpdatesOrder(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond var totalUpdatesCount int @@ -778,7 +780,8 @@ func pk(provider, setName string, n int) poolKey { func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -790,27 +793,28 @@ func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) { discoveryManager.ApplyConfig(c) syncedTargets := <-discoveryManager.SyncCh() - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 1) discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) } func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -822,12 +826,12 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) { discoveryManager.ApplyConfig(c) syncedTargets := <-discoveryManager.SyncCh() - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 1) c["prometheus2"] = c["prometheus"] delete(c, "prometheus") @@ -836,16 +840,17 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) { syncedTargets = <-discoveryManager.SyncCh() p = pk("static", "prometheus2", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) } func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -860,30 +865,31 @@ func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testi c["prometheus2"] = c["prometheus"] discoveryManager.ApplyConfig(c) syncedTargets := <-discoveryManager.SyncCh() - require.Equal(t, 2, len(syncedTargets)) + require.Len(t, syncedTargets, 2) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 2, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 2) delete(c, "prometheus") discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() p = pk("static", "prometheus2", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) } func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -895,9 +901,9 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { discoveryManager.ApplyConfig(c) syncedTargets := <-discoveryManager.SyncCh() - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) var mu sync.Mutex c["prometheus2"] = Configs{ @@ -912,39 +918,40 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { // Original targets should be present as soon as possible. syncedTargets = <-discoveryManager.SyncCh() mu.Unlock() - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) // prometheus2 configs should be ready on second sync. syncedTargets = <-discoveryManager.SyncCh() - require.Equal(t, 2, len(syncedTargets)) + require.Len(t, syncedTargets, 2) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"bar:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) p = pk("lockstatic", "prometheus2", 1) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) - require.Equal(t, 2, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 2) // Delete part of config and ensure only original targets exist. delete(c, "prometheus2") discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() - require.Equal(t, 1, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 1) verifyPresence(t, discoveryManager.targets, pk("static", "prometheus", 0), "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) } func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -959,31 +966,32 @@ func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) { p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true) - require.Equal(t, 2, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 2) c["prometheus"] = Configs{ staticConfig("foo:9090"), } discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() - require.Equal(t, 1, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 1) p = pk("static", "prometheus", 1) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", false) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) } func TestDiscovererConfigs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1001,12 +1009,12 @@ func TestDiscovererConfigs(t *testing.T) { verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) p = pk("static", "prometheus", 1) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"baz:9090\"}", true) - require.Equal(t, 2, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 2) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"baz:9090\"}", true) - require.Equal(t, 3, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 3) } // TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after @@ -1015,7 +1023,8 @@ func TestDiscovererConfigs(t *testing.T) { func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1029,9 +1038,9 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { syncedTargets := <-discoveryManager.SyncCh() p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) c["prometheus"] = Configs{ StaticConfig{{}}, @@ -1052,8 +1061,8 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { if len(group.Targets) != 0 { t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) } - require.Equal(t, 1, len(syncedTargets)) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets, 1) + require.Len(t, syncedTargets["prometheus"], 1) if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil { t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls) } @@ -1062,7 +1071,8 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, nil) + discoveryManager := NewManager(ctx, nil, prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1082,11 +1092,11 @@ func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { if len(discoveryManager.providers) != 1 { t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers)) } - require.Equal(t, 2, len(syncedTargets)) + require.Len(t, syncedTargets, 2) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) } func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { @@ -1098,7 +1108,8 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { } ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1144,7 +1155,8 @@ func (s lockStaticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup. func TestGaugeFailedConfigs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() @@ -1158,7 +1170,7 @@ func TestGaugeFailedConfigs(t *testing.T) { discoveryManager.ApplyConfig(c) <-discoveryManager.SyncCh() - failedCount := client_testutil.ToFloat64(failedConfigs) + failedCount := client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs) if failedCount != 3 { t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount) } @@ -1169,7 +1181,7 @@ func TestGaugeFailedConfigs(t *testing.T) { discoveryManager.ApplyConfig(c) <-discoveryManager.SyncCh() - failedCount = client_testutil.ToFloat64(failedConfigs) + failedCount = client_testutil.ToFloat64(discoveryManager.metrics.FailedConfigs) if failedCount != 0 { t.Fatalf("Expected to get no failed config, got: %v", failedCount) } @@ -1300,7 +1312,8 @@ func TestCoordinationWithReceiver(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - mgr := NewManager(ctx, nil) + mgr := NewManager(ctx, nil, prometheus.NewRegistry()) + require.NotNil(t, mgr) mgr.updatert = updateDelay go mgr.Run() @@ -1392,10 +1405,11 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) { // TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when // ApplyConfig happens at the same time as targets update. -func TestTargetSetTargetGroupsUpdateDuringApplyConfig(*testing.T) { +func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - discoveryManager := NewManager(ctx, log.NewNopLogger()) + discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) + require.NotNil(t, discoveryManager) discoveryManager.updatert = 100 * time.Millisecond go discoveryManager.Run() diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 27947fa8a8..a6a6252fd0 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -28,6 +28,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -83,7 +84,7 @@ func (*SDConfig) Name() string { return "marathon" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(*c, opts.Logger) + return NewDiscovery(*c, opts.Logger, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -132,7 +133,7 @@ type Discovery struct { } // NewDiscovery returns a new Marathon Discovery. -func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd") if err != nil { return nil, err @@ -154,10 +155,13 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { appsClient: fetchApps, } d.Discovery = refresh.NewDiscovery( - logger, - "marathon", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "marathon", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + }, ) return d, nil } diff --git a/discovery/marathon/marathon_test.go b/discovery/marathon/marathon_test.go index 258e3c8ddf..a1ddce9309 100644 --- a/discovery/marathon/marathon_test.go +++ b/discovery/marathon/marathon_test.go @@ -21,6 +21,7 @@ import ( "net/http/httptest" "testing" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -36,7 +37,7 @@ func testConfig() SDConfig { } func testUpdateServices(client appListClient) ([]*targetgroup.Group, error) { - md, err := NewDiscovery(testConfig(), nil) + md, err := NewDiscovery(testConfig(), nil, prometheus.NewRegistry()) if err != nil { return nil, err } @@ -129,7 +130,7 @@ func TestMarathonSDSendGroup(t *testing.T) { } func TestMarathonSDRemoveApp(t *testing.T) { - md, err := NewDiscovery(testConfig(), nil) + md, err := NewDiscovery(testConfig(), nil, prometheus.NewRegistry()) if err != nil { t.Fatalf("%s", err) } diff --git a/discovery/metrics.go b/discovery/metrics.go new file mode 100644 index 0000000000..6a60603955 --- /dev/null +++ b/discovery/metrics.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + clientGoRequestMetrics = &clientGoRequestMetricAdapter{} + clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{} +) + +func init() { + clientGoRequestMetrics.RegisterWithK8sGoClient() + clientGoWorkloadMetrics.RegisterWithK8sGoClient() +} + +// Metrics to be used with a discovery manager. +type Metrics struct { + FailedConfigs prometheus.Gauge + DiscoveredTargets *prometheus.GaugeVec + ReceivedUpdates prometheus.Counter + DelayedUpdates prometheus.Counter + SentUpdates prometheus.Counter +} + +func NewMetrics(registerer prometheus.Registerer, sdManagerName string) (*Metrics, error) { + m := &Metrics{} + + m.FailedConfigs = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "prometheus_sd_failed_configs", + Help: "Current number of service discovery configurations that failed to load.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + ) + + m.DiscoveredTargets = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_sd_discovered_targets", + Help: "Current number of discovered targets.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + []string{"config"}, + ) + + m.ReceivedUpdates = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_received_updates_total", + Help: "Total number of update events received from the SD providers.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + ) + + m.DelayedUpdates = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_updates_delayed_total", + Help: "Total number of update events that couldn't be sent immediately.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + ) + + m.SentUpdates = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_updates_total", + Help: "Total number of update events sent to the SD consumers.", + ConstLabels: prometheus.Labels{"name": sdManagerName}, + }, + ) + + metrics := []prometheus.Collector{ + m.FailedConfigs, + m.DiscoveredTargets, + m.ReceivedUpdates, + m.DelayedUpdates, + m.SentUpdates, + } + + for _, collector := range metrics { + err := registerer.Register(collector) + if err != nil { + return nil, fmt.Errorf("failed to register discovery manager metrics: %w", err) + } + } + + return m, nil +} diff --git a/discovery/kubernetes/client_metrics.go b/discovery/metrics_k8s_client.go similarity index 81% rename from discovery/kubernetes/client_metrics.go rename to discovery/metrics_k8s_client.go index 7b097b14a3..f16245684b 100644 --- a/discovery/kubernetes/client_metrics.go +++ b/discovery/metrics_k8s_client.go @@ -11,10 +11,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kubernetes +package discovery import ( "context" + "fmt" "net/url" "time" @@ -23,13 +24,22 @@ import ( "k8s.io/client-go/util/workqueue" ) -const workqueueMetricsNamespace = metricsNamespace + "_workqueue" +// This file registers metrics used by the Kubernetes Go client (k8s.io/client-go). +// Unfortunately, k8s.io/client-go metrics are global. +// If we instantiate multiple k8s SD instances, their k8s/client-go metrics will overlap. +// To prevent us from displaying misleading metrics, we register k8s.io/client-go metrics +// outside of the Kubernetes SD. + +const ( + KubernetesMetricsNamespace = "prometheus_sd_kubernetes" + workqueueMetricsNamespace = KubernetesMetricsNamespace + "_workqueue" +) var ( // Metrics for client-go's HTTP requests. clientGoRequestResultMetricVec = prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: metricsNamespace, + Namespace: KubernetesMetricsNamespace, Name: "http_request_total", Help: "Total number of HTTP requests to the Kubernetes API by status code.", }, @@ -37,7 +47,7 @@ var ( ) clientGoRequestLatencyMetricVec = prometheus.NewSummaryVec( prometheus.SummaryOpts{ - Namespace: metricsNamespace, + Namespace: KubernetesMetricsNamespace, Name: "http_request_duration_seconds", Help: "Summary of latencies for HTTP requests to the Kubernetes API by endpoint.", Objectives: map[float64]float64{}, @@ -109,17 +119,38 @@ func (noopMetric) Set(float64) {} // Definition of client-go metrics adapters for HTTP requests observation. type clientGoRequestMetricAdapter struct{} -func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer) { +// Returns all of the Prometheus metrics derived from k8s.io/client-go. +// This may be used tu register and unregister the metrics. +func clientGoMetrics() []prometheus.Collector { + return []prometheus.Collector{ + clientGoRequestResultMetricVec, + clientGoRequestLatencyMetricVec, + clientGoWorkqueueDepthMetricVec, + clientGoWorkqueueAddsMetricVec, + clientGoWorkqueueLatencyMetricVec, + clientGoWorkqueueUnfinishedWorkSecondsMetricVec, + clientGoWorkqueueLongestRunningProcessorMetricVec, + clientGoWorkqueueWorkDurationMetricVec, + } +} + +func RegisterK8sClientMetricsWithPrometheus(registerer prometheus.Registerer) error { + for _, collector := range clientGoMetrics() { + err := registerer.Register(collector) + if err != nil { + return fmt.Errorf("failed to register Kubernetes Go Client metrics: %w", err) + } + } + return nil +} + +func (f *clientGoRequestMetricAdapter) RegisterWithK8sGoClient() { metrics.Register( metrics.RegisterOpts{ RequestLatency: f, RequestResult: f, }, ) - registerer.MustRegister( - clientGoRequestResultMetricVec, - clientGoRequestLatencyMetricVec, - ) } func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) { @@ -133,16 +164,8 @@ func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.U // Definition of client-go workqueue metrics provider definition. type clientGoWorkqueueMetricsProvider struct{} -func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Registerer) { +func (f *clientGoWorkqueueMetricsProvider) RegisterWithK8sGoClient() { workqueue.SetProvider(f) - registerer.MustRegister( - clientGoWorkqueueDepthMetricVec, - clientGoWorkqueueAddsMetricVec, - clientGoWorkqueueLatencyMetricVec, - clientGoWorkqueueWorkDurationMetricVec, - clientGoWorkqueueUnfinishedWorkSecondsMetricVec, - clientGoWorkqueueLongestRunningProcessorMetricVec, - ) } func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index 162833ece4..a13bb8704a 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -26,6 +26,7 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -80,7 +81,7 @@ func (*DockerSDConfig) Name() string { return "docker" } // NewDiscoverer returns a Discoverer for the Config. func (c *DockerSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDockerDiscovery(c, opts.Logger) + return NewDockerDiscovery(c, opts.Logger, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -114,7 +115,7 @@ type DockerDiscovery struct { } // NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets. -func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger) (*DockerDiscovery, error) { +func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, reg prometheus.Registerer) (*DockerDiscovery, error) { var err error d := &DockerDiscovery{ @@ -165,10 +166,13 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger) (*DockerDiscove } d.Discovery = refresh.NewDiscovery( - logger, - "docker", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "docker", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + }, ) return d, nil } diff --git a/discovery/moby/docker_test.go b/discovery/moby/docker_test.go index bb84b1571a..1a87ad2a12 100644 --- a/discovery/moby/docker_test.go +++ b/discovery/moby/docker_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -37,19 +38,19 @@ host: %s var cfg DockerSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) - d, err := NewDockerDiscovery(&cfg, log.NewNopLogger()) + d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 3, len(tg.Targets)) + require.Len(t, tg.Targets, 3) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/moby/dockerswarm.go b/discovery/moby/dockerswarm.go index 371f9d5ed1..bd87fea5a3 100644 --- a/discovery/moby/dockerswarm.go +++ b/discovery/moby/dockerswarm.go @@ -23,6 +23,7 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -74,7 +75,7 @@ func (*DockerSwarmSDConfig) Name() string { return "dockerswarm" } // NewDiscoverer returns a Discoverer for the Config. func (c *DockerSwarmSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -117,7 +118,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { var err error d := &Discovery{ @@ -168,10 +169,13 @@ func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger) (*Discovery, err } d.Discovery = refresh.NewDiscovery( - logger, - "dockerswarm", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "dockerswarm", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + }, ) return d, nil } diff --git a/discovery/moby/nodes_test.go b/discovery/moby/nodes_test.go index 1a53321378..512ff7049d 100644 --- a/discovery/moby/nodes_test.go +++ b/discovery/moby/nodes_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -38,19 +39,19 @@ host: %s var cfg DockerSwarmSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 5, len(tg.Targets)) + require.Len(t, tg.Targets, 5) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/moby/services_test.go b/discovery/moby/services_test.go index 1bc9832c7a..816586dd7f 100644 --- a/discovery/moby/services_test.go +++ b/discovery/moby/services_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -38,19 +39,19 @@ host: %s var cfg DockerSwarmSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 15, len(tg.Targets)) + require.Len(t, tg.Targets, 15) for i, lbls := range []model.LabelSet{ { @@ -332,19 +333,19 @@ filters: var cfg DockerSwarmSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg, "tg should not be nil") require.NotNil(t, tg.Targets, "tg.targets should not be nil") - require.Equal(t, 4, len(tg.Targets)) + require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/moby/tasks_test.go b/discovery/moby/tasks_test.go index 2cc9322f61..764fda3436 100644 --- a/discovery/moby/tasks_test.go +++ b/discovery/moby/tasks_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -38,19 +39,19 @@ host: %s var cfg DockerSwarmSDConfig require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg)) - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) ctx := context.Background() tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 27, len(tg.Targets)) + require.Len(t, tg.Targets, 27) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/nomad/nomad.go b/discovery/nomad/nomad.go index 7013f0737c..3fdcf714eb 100644 --- a/discovery/nomad/nomad.go +++ b/discovery/nomad/nomad.go @@ -49,27 +49,18 @@ const ( ) // DefaultSDConfig is the default nomad SD configuration. -var ( - DefaultSDConfig = SDConfig{ - AllowStale: true, - HTTPClientConfig: config.DefaultHTTPClientConfig, - Namespace: "default", - RefreshInterval: model.Duration(60 * time.Second), - Region: "global", - Server: "http://localhost:4646", - TagSeparator: ",", - } - - failuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_sd_nomad_failures_total", - Help: "Number of nomad service discovery refresh failures.", - }) -) +var DefaultSDConfig = SDConfig{ + AllowStale: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, + Namespace: "default", + RefreshInterval: model.Duration(60 * time.Second), + Region: "global", + Server: "http://localhost:4646", + TagSeparator: ",", +} func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(failuresCount) } // SDConfig is the configuration for nomad based service discovery. @@ -88,7 +79,7 @@ func (*SDConfig) Name() string { return "nomad" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -121,10 +112,11 @@ type Discovery struct { region string server string tagSeparator string + failuresCount prometheus.Counter } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { d := &Discovery{ allowStale: conf.AllowStale, namespace: conf.Namespace, @@ -132,6 +124,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { region: conf.Region, server: conf.Server, tagSeparator: conf.TagSeparator, + failuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_nomad_failures_total", + Help: "Number of nomad service discovery refresh failures.", + }), } HTTPClient, err := config.NewClientFromConfig(conf.HTTPClientConfig, "nomad_sd") @@ -153,10 +150,14 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { d.client = client d.Discovery = refresh.NewDiscovery( - logger, - "nomad", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "nomad", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + Metrics: []prometheus.Collector{d.failuresCount}, + }, ) return d, nil } @@ -167,7 +168,7 @@ func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) { } stubs, _, err := d.client.Services().List(opts) if err != nil { - failuresCount.Inc() + d.failuresCount.Inc() return nil, err } @@ -179,7 +180,7 @@ func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) { for _, service := range stub.Services { instances, _, err := d.client.Services().Get(service.ServiceName, opts) if err != nil { - failuresCount.Inc() + d.failuresCount.Inc() return nil, fmt.Errorf("failed to fetch services: %w", err) } diff --git a/discovery/nomad/nomad_test.go b/discovery/nomad/nomad_test.go index d9aa54330d..ca67a877e1 100644 --- a/discovery/nomad/nomad_test.go +++ b/discovery/nomad/nomad_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" ) @@ -127,7 +128,7 @@ func TestConfiguredService(t *testing.T) { conf := &SDConfig{ Server: "http://localhost:4646", } - _, err := NewDiscovery(conf, nil) + _, err := NewDiscovery(conf, nil, prometheus.NewRegistry()) require.NoError(t, err) } @@ -141,18 +142,18 @@ func TestNomadSDRefresh(t *testing.T) { cfg := DefaultSDConfig cfg.Server = endpoint.String() - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) tgs, err := d.refresh(context.Background()) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 1, len(tg.Targets)) + require.Len(t, tg.Targets, 1) lbls := model.LabelSet{ "__address__": model.LabelValue("127.0.0.1:30456"), diff --git a/discovery/openstack/hypervisor_test.go b/discovery/openstack/hypervisor_test.go index 396d5283dc..45684b4a2e 100644 --- a/discovery/openstack/hypervisor_test.go +++ b/discovery/openstack/hypervisor_test.go @@ -53,12 +53,12 @@ func TestOpenstackSDHypervisorRefresh(t *testing.T) { hypervisor, _ := mock.openstackAuthSuccess() ctx := context.Background() tgs, err := hypervisor.refresh(ctx) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NoError(t, err) require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 2, len(tg.Targets)) + require.Len(t, tg.Targets, 2) for l, v := range map[string]string{ "__address__": "172.16.70.14:0", diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index b2fe1e7870..9b28c1d6e1 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -145,16 +145,16 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, openstackLabelUserID: model.LabelValue(s.UserID), } - flavorId, ok := s.Flavor["id"].(string) + flavorID, ok := s.Flavor["id"].(string) if !ok { level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string") continue } - labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorId) + labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID) - imageId, ok := s.Image["id"].(string) + imageID, ok := s.Image["id"].(string) if ok { - labels[openstackLabelInstanceImage] = model.LabelValue(imageId) + labels[openstackLabelInstanceImage] = model.LabelValue(imageID) } for k, v := range s.Metadata { diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index d2da5d9681..9e124b6053 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -61,12 +61,12 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { tgs, err := instance.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 4, len(tg.Targets)) + require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/openstack/openstack.go b/discovery/openstack/openstack.go index 92c83a4cf4..9544a7c0f7 100644 --- a/discovery/openstack/openstack.go +++ b/discovery/openstack/openstack.go @@ -24,6 +24,7 @@ import ( "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" "github.com/mwitkow/go-conntrack" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -70,7 +71,7 @@ func (*SDConfig) Name() string { return "openstack" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -134,16 +135,19 @@ type refresher interface { } // NewDiscovery returns a new OpenStack Discoverer which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, l log.Logger) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, l log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) { r, err := newRefresher(conf, l) if err != nil { return nil, err } return refresh.NewDiscovery( - l, - "openstack", - time.Duration(conf.RefreshInterval), - r.refresh, + refresh.Options{ + Logger: l, + Mech: "openstack", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: r.refresh, + Registry: reg, + }, ), nil } diff --git a/discovery/ovhcloud/dedicated_server_test.go b/discovery/ovhcloud/dedicated_server_test.go index e8ffa4a283..52311bcc87 100644 --- a/discovery/ovhcloud/dedicated_server_test.go +++ b/discovery/ovhcloud/dedicated_server_test.go @@ -47,11 +47,11 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr targetGroups, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(targetGroups)) + require.Len(t, targetGroups, 1) targetGroup := targetGroups[0] require.NotNil(t, targetGroup) require.NotNil(t, targetGroup.Targets) - require.Equal(t, 1, len(targetGroup.Targets)) + require.Len(t, targetGroup.Targets, 1) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/ovhcloud/ovhcloud.go b/discovery/ovhcloud/ovhcloud.go index 535ade4df5..eca284a85a 100644 --- a/discovery/ovhcloud/ovhcloud.go +++ b/discovery/ovhcloud/ovhcloud.go @@ -22,6 +22,7 @@ import ( "github.com/go-kit/log" "github.com/ovh/go-ovh/ovh" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -93,7 +94,7 @@ func createClient(config *SDConfig) (*ovh.Client, error) { // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, options.Logger) + return NewDiscovery(c, options.Logger, options.Registerer) } func init() { @@ -140,16 +141,19 @@ func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) { } // NewDiscovery returns a new OVHcloud Discoverer which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) { r, err := newRefresher(conf, logger) if err != nil { return nil, err } return refresh.NewDiscovery( - logger, - "ovhcloud", - time.Duration(conf.RefreshInterval), - r.refresh, + refresh.Options{ + Logger: logger, + Mech: "ovhcloud", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: r.refresh, + Registry: reg, + }, ), nil } diff --git a/discovery/ovhcloud/ovhcloud_test.go b/discovery/ovhcloud/ovhcloud_test.go index efcd95bb0d..9bd9ea9547 100644 --- a/discovery/ovhcloud/ovhcloud_test.go +++ b/discovery/ovhcloud/ovhcloud_test.go @@ -18,6 +18,7 @@ import ( "fmt" "testing" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" @@ -122,7 +123,8 @@ func TestDiscoverer(t *testing.T) { conf, _ := getMockConf("vps") logger := testutil.NewLogger(t) _, err := conf.NewDiscoverer(discovery.DiscovererOptions{ - Logger: logger, + Logger: logger, + Registerer: prometheus.NewRegistry(), }) require.NoError(t, err) diff --git a/discovery/ovhcloud/vps_test.go b/discovery/ovhcloud/vps_test.go index b1177f215e..2d2d6dcd21 100644 --- a/discovery/ovhcloud/vps_test.go +++ b/discovery/ovhcloud/vps_test.go @@ -49,11 +49,11 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr targetGroups, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(targetGroups)) + require.Len(t, targetGroups, 1) targetGroup := targetGroups[0] require.NotNil(t, targetGroup) require.NotNil(t, targetGroup.Targets) - require.Equal(t, 1, len(targetGroup.Targets)) + require.Len(t, targetGroup.Targets, 1) for i, lbls := range []model.LabelSet{ { "__address__": "192.0.2.1", diff --git a/discovery/puppetdb/puppetdb.go b/discovery/puppetdb/puppetdb.go index 9484a0aa63..616f2c61e6 100644 --- a/discovery/puppetdb/puppetdb.go +++ b/discovery/puppetdb/puppetdb.go @@ -29,6 +29,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/regexp" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -83,7 +84,7 @@ func (*SDConfig) Name() string { return "puppetdb" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -130,7 +131,7 @@ type Discovery struct { } // NewDiscovery returns a new PuppetDB discovery for the given config. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { if logger == nil { logger = log.NewNopLogger() } @@ -156,10 +157,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { } d.Discovery = refresh.NewDiscovery( - logger, - "http", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "http", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + }, ) return d, nil } diff --git a/discovery/puppetdb/puppetdb_test.go b/discovery/puppetdb/puppetdb_test.go index 5514787d46..edd9b9d046 100644 --- a/discovery/puppetdb/puppetdb_test.go +++ b/discovery/puppetdb/puppetdb_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -62,7 +63,7 @@ func TestPuppetSlashInURL(t *testing.T) { Port: 80, RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) require.Equal(t, apiURL, d.url) } @@ -79,7 +80,7 @@ func TestPuppetDBRefresh(t *testing.T) { RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) ctx := context.Background() @@ -105,7 +106,7 @@ func TestPuppetDBRefresh(t *testing.T) { Source: ts.URL + "/pdb/query/v4?query=vhosts", }, } - require.Equal(t, tgs, expectedTargets) + require.Equal(t, expectedTargets, tgs) } func TestPuppetDBRefreshWithParameters(t *testing.T) { @@ -120,7 +121,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) { RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) ctx := context.Background() @@ -156,7 +157,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) { Source: ts.URL + "/pdb/query/v4?query=vhosts", }, } - require.Equal(t, tgs, expectedTargets) + require.Equal(t, expectedTargets, tgs) } func TestPuppetDBInvalidCode(t *testing.T) { @@ -172,7 +173,7 @@ func TestPuppetDBInvalidCode(t *testing.T) { RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) ctx := context.Background() @@ -193,7 +194,7 @@ func TestPuppetDBInvalidFormat(t *testing.T) { RefreshInterval: model.Duration(30 * time.Second), } - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) ctx := context.Background() diff --git a/discovery/refresh/refresh.go b/discovery/refresh/refresh.go index 919567a53b..0b0e5a921d 100644 --- a/discovery/refresh/refresh.go +++ b/discovery/refresh/refresh.go @@ -22,29 +22,17 @@ import ( "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) -var ( - failuresCount = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_sd_refresh_failures_total", - Help: "Number of refresh failures for the given SD mechanism.", - }, - []string{"mechanism"}, - ) - duration = prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: "prometheus_sd_refresh_duration_seconds", - Help: "The duration of a refresh in seconds for the given SD mechanism.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - []string{"mechanism"}, - ) -) - -func init() { - prometheus.MustRegister(duration, failuresCount) +type Options struct { + Logger log.Logger + Mech string + Interval time.Duration + RefreshF func(ctx context.Context) ([]*targetgroup.Group, error) + Registry prometheus.Registerer + Metrics []prometheus.Collector } // Discovery implements the Discoverer interface. @@ -54,25 +42,62 @@ type Discovery struct { refreshf func(ctx context.Context) ([]*targetgroup.Group, error) failures prometheus.Counter - duration prometheus.Observer + duration prometheus.Summary + + metricRegisterer discovery.MetricRegisterer } // NewDiscovery returns a Discoverer function that calls a refresh() function at every interval. -func NewDiscovery(l log.Logger, mech string, interval time.Duration, refreshf func(ctx context.Context) ([]*targetgroup.Group, error)) *Discovery { - if l == nil { - l = log.NewNopLogger() +func NewDiscovery(opts Options) *Discovery { + var logger log.Logger + if opts.Logger == nil { + logger = log.NewNopLogger() + } else { + logger = opts.Logger } - return &Discovery{ - logger: l, - interval: interval, - refreshf: refreshf, - failures: failuresCount.WithLabelValues(mech), - duration: duration.WithLabelValues(mech), + + d := Discovery{ + logger: logger, + interval: opts.Interval, + refreshf: opts.RefreshF, + failures: prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_refresh_failures_total", + Help: "Number of refresh failures for the given SD mechanism.", + ConstLabels: prometheus.Labels{ + "mechanism": opts.Mech, + }, + }), + duration: prometheus.NewSummary( + prometheus.SummaryOpts{ + Name: "prometheus_sd_refresh_duration_seconds", + Help: "The duration of a refresh in seconds for the given SD mechanism.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + ConstLabels: prometheus.Labels{ + "mechanism": opts.Mech, + }, + }), } + + metrics := []prometheus.Collector{d.failures, d.duration} + if opts.Metrics != nil { + metrics = append(metrics, opts.Metrics...) + } + + d.metricRegisterer = discovery.NewMetricRegisterer(opts.Registry, metrics) + + return &d } // Run implements the Discoverer interface. func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + err := d.metricRegisterer.RegisterMetrics() + if err != nil { + level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error()) + return + } + defer d.metricRegisterer.UnregisterMetrics() + // Get an initial set right away. tgs, err := d.refresh(ctx) if err != nil { diff --git a/discovery/refresh/refresh_test.go b/discovery/refresh/refresh_test.go index 6decef19fc..12e7ab3be0 100644 --- a/discovery/refresh/refresh_test.go +++ b/discovery/refresh/refresh_test.go @@ -19,6 +19,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/goleak" @@ -65,7 +66,15 @@ func TestRefresh(t *testing.T) { return nil, fmt.Errorf("some error") } interval := time.Millisecond - d := NewDiscovery(nil, "test", interval, refresh) + d := NewDiscovery( + Options{ + Logger: nil, + Mech: "test", + Interval: interval, + RefreshF: refresh, + Registry: prometheus.NewRegistry(), + }, + ) ch := make(chan []*targetgroup.Group) ctx, cancel := context.WithCancel(context.Background()) diff --git a/discovery/scaleway/instance_test.go b/discovery/scaleway/instance_test.go index e7a32dd924..d2449d00c9 100644 --- a/discovery/scaleway/instance_test.go +++ b/discovery/scaleway/instance_test.go @@ -55,12 +55,12 @@ api_url: %s tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 2, len(tg.Targets)) + require.Len(t, tg.Targets, 2) for i, lbls := range []model.LabelSet{ { @@ -161,5 +161,5 @@ api_url: %s tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) } diff --git a/discovery/scaleway/scaleway.go b/discovery/scaleway/scaleway.go index 90091b3172..86527b34e6 100644 --- a/discovery/scaleway/scaleway.go +++ b/discovery/scaleway/scaleway.go @@ -23,6 +23,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/scaleway/scaleway-sdk-go/scw" @@ -160,7 +161,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(&c, options.Logger) + return NewDiscovery(&c, options.Logger, options.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -177,17 +178,20 @@ func init() { // the Discoverer interface. type Discovery struct{} -func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) { r, err := newRefresher(conf) if err != nil { return nil, err } return refresh.NewDiscovery( - logger, - "scaleway", - time.Duration(conf.RefreshInterval), - r.refresh, + refresh.Options{ + Logger: logger, + Mech: "scaleway", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: r.refresh, + Registry: reg, + }, ), nil } diff --git a/discovery/triton/triton.go b/discovery/triton/triton.go index c83f3b34ab..4839827ada 100644 --- a/discovery/triton/triton.go +++ b/discovery/triton/triton.go @@ -26,6 +26,7 @@ import ( "github.com/go-kit/log" "github.com/mwitkow/go-conntrack" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -74,7 +75,7 @@ func (*SDConfig) Name() string { return "triton" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return New(opts.Logger, c) + return New(opts.Logger, c, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -138,7 +139,7 @@ type Discovery struct { } // New returns a new Discovery which periodically refreshes its targets. -func New(logger log.Logger, conf *SDConfig) (*Discovery, error) { +func New(logger log.Logger, conf *SDConfig, reg prometheus.Registerer) (*Discovery, error) { tls, err := config.NewTLSConfig(&conf.TLSConfig) if err != nil { return nil, err @@ -159,10 +160,13 @@ func New(logger log.Logger, conf *SDConfig) (*Discovery, error) { sdConfig: conf, } d.Discovery = refresh.NewDiscovery( - logger, - "triton", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "triton", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + }, ) return d, nil } diff --git a/discovery/triton/triton_test.go b/discovery/triton/triton_test.go index ca38965322..fa51a2e472 100644 --- a/discovery/triton/triton_test.go +++ b/discovery/triton/triton_test.go @@ -24,6 +24,7 @@ import ( "strings" "testing" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -79,7 +80,7 @@ var ( ) func newTritonDiscovery(c SDConfig) (*Discovery, error) { - return New(nil, &c) + return New(nil, &c, prometheus.NewRegistry()) } func TestTritonSDNew(t *testing.T) { @@ -155,7 +156,7 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) { tgts := testTritonSDRefresh(t, conf, dstr) require.NotNil(t, tgts) - require.Equal(t, 2, len(tgts)) + require.Len(t, tgts, 2) } func TestTritonSDRefreshNoServer(t *testing.T) { @@ -163,7 +164,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) { _, err := td.refresh(context.Background()) require.Error(t, err) - require.Equal(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint"), true) + require.True(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint")) } func TestTritonSDRefreshCancelled(t *testing.T) { @@ -173,7 +174,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) { cancel() _, err := td.refresh(ctx) require.Error(t, err) - require.Equal(t, strings.Contains(err.Error(), context.Canceled.Error()), true) + require.True(t, strings.Contains(err.Error(), context.Canceled.Error())) } func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { @@ -188,7 +189,7 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { tgts := testTritonSDRefresh(t, cnconf, dstr) require.NotNil(t, tgts) - require.Equal(t, 2, len(tgts)) + require.Len(t, tgts, 2) } func TestTritonSDRefreshCNsWithHostname(t *testing.T) { @@ -205,7 +206,7 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) { tgts := testTritonSDRefresh(t, cnconf, dstr) require.NotNil(t, tgts) - require.Equal(t, 2, len(tgts)) + require.Len(t, tgts, 2) } func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet { @@ -235,7 +236,7 @@ func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet tgs, err := td.refresh(context.Background()) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) diff --git a/discovery/util.go b/discovery/util.go new file mode 100644 index 0000000000..83cc640dd9 --- /dev/null +++ b/discovery/util.go @@ -0,0 +1,72 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" +) + +// A utility to be used by implementations of discovery.Discoverer +// which need to manage the lifetime of their metrics. +type MetricRegisterer interface { + RegisterMetrics() error + UnregisterMetrics() +} + +// metricRegistererImpl is an implementation of MetricRegisterer. +type metricRegistererImpl struct { + reg prometheus.Registerer + metrics []prometheus.Collector +} + +var _ MetricRegisterer = &metricRegistererImpl{} + +// Creates an instance of a MetricRegisterer. +// Typically called inside the implementation of the NewDiscoverer() method. +func NewMetricRegisterer(reg prometheus.Registerer, metrics []prometheus.Collector) MetricRegisterer { + return &metricRegistererImpl{ + reg: reg, + metrics: metrics, + } +} + +// RegisterMetrics registers the metrics with a Prometheus registerer. +// If any metric fails to register, it will unregister all metrics that +// were registered so far, and return an error. +// Typically called at the start of the SD's Run() method. +func (rh *metricRegistererImpl) RegisterMetrics() error { + for _, collector := range rh.metrics { + err := rh.reg.Register(collector) + if err != nil { + // Unregister all metrics that were registered so far. + // This is so that if RegisterMetrics() gets called again, + // there will not be an error due to a duplicate registration. + rh.UnregisterMetrics() + + return fmt.Errorf("failed to register metric: %w", err) + } + } + return nil +} + +// UnregisterMetrics unregisters the metrics from the same Prometheus +// registerer which was used to register them. +// Typically called at the end of the SD's Run() method by a defer statement. +func (rh *metricRegistererImpl) UnregisterMetrics() { + for _, collector := range rh.metrics { + rh.reg.Unregister(collector) + } +} diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index bc33d28cba..744f3f96cf 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -25,6 +25,7 @@ import ( "github.com/go-kit/log" "github.com/kolo/xmlrpc" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -115,7 +116,7 @@ func (*SDConfig) Name() string { return "uyuni" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -203,7 +204,7 @@ func getEndpointInfoForSystems( } // NewDiscovery returns a uyuni discovery for the given configuration. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { apiURL, err := url.Parse(conf.Server) if err != nil { return nil, err @@ -227,10 +228,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { } d.Discovery = refresh.NewDiscovery( - logger, - "uyuni", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "uyuni", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + }, ) return d, nil } diff --git a/discovery/uyuni/uyuni_test.go b/discovery/uyuni/uyuni_test.go index d045cde6d7..fd03c88f1b 100644 --- a/discovery/uyuni/uyuni_test.go +++ b/discovery/uyuni/uyuni_test.go @@ -23,6 +23,8 @@ import ( "github.com/stretchr/testify/require" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -35,7 +37,7 @@ func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, err Server: ts.URL, } - md, err := NewDiscovery(&conf, nil) + md, err := NewDiscovery(&conf, nil, prometheus.NewRegistry()) if err != nil { return nil, err } @@ -55,7 +57,7 @@ func TestUyuniSDHandleError(t *testing.T) { tgs, err := testUpdateServices(respHandler) require.EqualError(t, err, errTesting) - require.Equal(t, len(tgs), 0) + require.Empty(t, tgs) } func TestUyuniSDLogin(t *testing.T) { @@ -87,7 +89,7 @@ func TestUyuniSDLogin(t *testing.T) { tgs, err := testUpdateServices(respHandler) require.EqualError(t, err, errTesting) - require.Equal(t, len(tgs), 0) + require.Empty(t, tgs) } func TestUyuniSDSkipLogin(t *testing.T) { @@ -108,7 +110,7 @@ func TestUyuniSDSkipLogin(t *testing.T) { Server: ts.URL, } - md, err := NewDiscovery(&conf, nil) + md, err := NewDiscovery(&conf, nil, prometheus.NewRegistry()) if err != nil { t.Error(err) } @@ -119,5 +121,5 @@ func TestUyuniSDSkipLogin(t *testing.T) { tgs, err := md.refresh(context.Background()) require.EqualError(t, err, errTesting) - require.Equal(t, len(tgs), 0) + require.Empty(t, tgs) } diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go index 42881d3c19..129800048a 100644 --- a/discovery/vultr/vultr.go +++ b/discovery/vultr/vultr.go @@ -23,6 +23,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -78,7 +79,7 @@ func (*SDConfig) Name() string { return "vultr" } // NewDiscoverer returns a Discoverer for the Config. func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger) + return NewDiscovery(c, opts.Logger, opts.Registerer) } // SetDirectory joins any relative file paths with dir. @@ -106,7 +107,7 @@ type Discovery struct { } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { d := &Discovery{ port: conf.Port, } @@ -128,10 +129,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { } d.Discovery = refresh.NewDiscovery( - logger, - "vultr", - time.Duration(conf.RefreshInterval), - d.refresh, + refresh.Options{ + Logger: logger, + Mech: "vultr", + Interval: time.Duration(conf.RefreshInterval), + RefreshF: d.refresh, + Registry: reg, + }, ) return d, nil } diff --git a/discovery/vultr/vultr_test.go b/discovery/vultr/vultr_test.go index b729541531..c50b11d2da 100644 --- a/discovery/vultr/vultr_test.go +++ b/discovery/vultr/vultr_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" ) @@ -46,7 +47,7 @@ func TestVultrSDRefresh(t *testing.T) { cfg := DefaultSDConfig cfg.HTTPClientConfig.BearerToken = APIKey - d, err := NewDiscovery(&cfg, log.NewNopLogger()) + d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry()) require.NoError(t, err) endpoint, err := url.Parse(sdMock.Mock.Endpoint()) require.NoError(t, err) @@ -56,12 +57,12 @@ func TestVultrSDRefresh(t *testing.T) { tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 3, len(tg.Targets)) + require.Len(t, tg.Targets, 3) for i, k := range []model.LabelSet{ { diff --git a/discovery/xds/client_test.go b/discovery/xds/client_test.go index ff5217359c..b699995fb7 100644 --- a/discovery/xds/client_test.go +++ b/discovery/xds/client_test.go @@ -53,14 +53,14 @@ func TestMakeXDSResourceHttpEndpointEmptyServerURLScheme(t *testing.T) { require.Empty(t, endpointURL) require.Error(t, err) - require.Equal(t, err.Error(), "invalid xDS server URL") + require.Equal(t, "invalid xDS server URL", err.Error()) } func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) { endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring") require.Empty(t, endpointURL) - require.NotNil(t, err) + require.Error(t, err) require.Contains(t, err.Error(), "must be either 'http' or 'https'") } @@ -68,7 +68,7 @@ func TestMakeXDSResourceHttpEndpoint(t *testing.T) { endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("http://127.0.0.1:5000"), "monitoring") require.NoError(t, err) - require.Equal(t, endpointURL.String(), "http://127.0.0.1:5000/v3/discovery:monitoring") + require.Equal(t, "http://127.0.0.1:5000/v3/discovery:monitoring", endpointURL.String()) } func TestCreateNewHTTPResourceClient(t *testing.T) { @@ -89,8 +89,8 @@ func TestCreateNewHTTPResourceClient(t *testing.T) { require.NoError(t, err) - require.Equal(t, client.endpoint, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1") - require.Equal(t, client.client.Timeout, 1*time.Minute) + require.Equal(t, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1", client.endpoint) + require.Equal(t, 1*time.Minute, client.client.Timeout) } func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion, responder discoveryResponder) (*HTTPResourceClient, func()) { @@ -138,7 +138,7 @@ func TestHTTPResourceClientFetchFullResponse(t *testing.T) { require.NotNil(t, res) require.Equal(t, client.ResourceTypeURL(), res.TypeUrl) - require.Len(t, res.Resources, 0) + require.Empty(t, res.Resources) require.Equal(t, "abc", client.latestNonce, "Nonce not cached") require.Equal(t, "1", client.latestVersion, "Version not cached") diff --git a/discovery/xds/kuma.go b/discovery/xds/kuma.go index bc88ba5540..c74bc552c7 100644 --- a/discovery/xds/kuma.go +++ b/discovery/xds/kuma.go @@ -30,35 +30,12 @@ import ( "github.com/prometheus/prometheus/util/strutil" ) -var ( - // DefaultKumaSDConfig is the default Kuma MADS SD configuration. - DefaultKumaSDConfig = KumaSDConfig{ - HTTPClientConfig: config.DefaultHTTPClientConfig, - RefreshInterval: model.Duration(15 * time.Second), - FetchTimeout: model.Duration(2 * time.Minute), - } - - kumaFetchFailuresCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "sd_kuma_fetch_failures_total", - Help: "The number of Kuma MADS fetch call failures.", - }) - kumaFetchSkipUpdateCount = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "sd_kuma_fetch_skipped_updates_total", - Help: "The number of Kuma MADS fetch calls that result in no updates to the targets.", - }) - kumaFetchDuration = prometheus.NewSummary( - prometheus.SummaryOpts{ - Namespace: namespace, - Name: "sd_kuma_fetch_duration_seconds", - Help: "The duration of a Kuma MADS fetch call.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - ) -) +// DefaultKumaSDConfig is the default Kuma MADS SD configuration. +var DefaultKumaSDConfig = KumaSDConfig{ + HTTPClientConfig: config.DefaultHTTPClientConfig, + RefreshInterval: model.Duration(15 * time.Second), + FetchTimeout: model.Duration(2 * time.Minute), +} const ( // kumaMetaLabelPrefix is the meta prefix used for all kuma meta labels. @@ -120,7 +97,7 @@ func (c *KumaSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discover logger = log.NewNopLogger() } - return NewKumaHTTPDiscovery(c, logger) + return NewKumaHTTPDiscovery(c, logger, opts.Registerer) } func convertKumaV1MonitoringAssignment(assignment *MonitoringAssignment) []model.LabelSet { @@ -176,12 +153,16 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L return targets, nil } -func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger) (discovery.Discoverer, error) { +func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, reg prometheus.Registerer) (discovery.Discoverer, error) { // Default to "prometheus" if hostname is unavailable. - clientID, err := osutil.GetFQDN() - if err != nil { - level.Debug(logger).Log("msg", "error getting FQDN", "err", err) - clientID = "prometheus" + clientID := conf.ClientID + if clientID == "" { + var err error + clientID, err = osutil.GetFQDN() + if err != nil { + level.Debug(logger).Log("msg", "error getting FQDN", "err", err) + clientID = "prometheus" + } } clientConfig := &HTTPResourceClientConfig{ @@ -203,15 +184,41 @@ func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger) (discovery.Disc } d := &fetchDiscovery{ - client: client, - logger: logger, - refreshInterval: time.Duration(conf.RefreshInterval), - source: "kuma", - parseResources: kumaMadsV1ResourceParser, - fetchFailuresCount: kumaFetchFailuresCount, - fetchSkipUpdateCount: kumaFetchSkipUpdateCount, - fetchDuration: kumaFetchDuration, + client: client, + logger: logger, + refreshInterval: time.Duration(conf.RefreshInterval), + source: "kuma", + parseResources: kumaMadsV1ResourceParser, + fetchFailuresCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_kuma_fetch_failures_total", + Help: "The number of Kuma MADS fetch call failures.", + }), + fetchSkipUpdateCount: prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sd_kuma_fetch_skipped_updates_total", + Help: "The number of Kuma MADS fetch calls that result in no updates to the targets.", + }), + fetchDuration: prometheus.NewSummary( + prometheus.SummaryOpts{ + Namespace: namespace, + Name: "sd_kuma_fetch_duration_seconds", + Help: "The duration of a Kuma MADS fetch call.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + ), } + d.metricRegisterer = discovery.NewMetricRegisterer( + reg, + []prometheus.Collector{ + d.fetchFailuresCount, + d.fetchSkipUpdateCount, + d.fetchDuration, + }, + ) + return d, nil } diff --git a/discovery/xds/kuma_test.go b/discovery/xds/kuma_test.go index 1db0a0831d..5e2417c96f 100644 --- a/discovery/xds/kuma_test.go +++ b/discovery/xds/kuma_test.go @@ -21,6 +21,7 @@ import ( "time" v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" @@ -107,7 +108,7 @@ func getKumaMadsV1DiscoveryResponse(resources ...*MonitoringAssignment) (*v3.Dis } func newKumaTestHTTPDiscovery(c KumaSDConfig) (*fetchDiscovery, error) { - kd, err := NewKumaHTTPDiscovery(&c, nopLogger) + kd, err := NewKumaHTTPDiscovery(&c, nopLogger, prometheus.NewRegistry()) if err != nil { return nil, err } @@ -129,7 +130,7 @@ func TestKumaMadsV1ResourceParserInvalidTypeURL(t *testing.T) { func TestKumaMadsV1ResourceParserEmptySlice(t *testing.T) { resources := make([]*anypb.Any, 0) groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL) - require.Len(t, groups, 0) + require.Empty(t, groups) require.NoError(t, err) } @@ -204,7 +205,7 @@ func TestNewKumaHTTPDiscovery(t *testing.T) { require.True(t, ok) require.Equal(t, kumaConf.Server, resClient.Server()) require.Equal(t, KumaMadsV1ResourceTypeURL, resClient.ResourceTypeURL()) - require.NotEmpty(t, resClient.ID()) + require.Equal(t, kumaConf.ClientID, resClient.ID()) require.Equal(t, KumaMadsV1ResourceType, resClient.config.ResourceType) } diff --git a/discovery/xds/xds.go b/discovery/xds/xds.go index 48bdbab02b..8b6cb7e65e 100644 --- a/discovery/xds/xds.go +++ b/discovery/xds/xds.go @@ -55,6 +55,7 @@ type SDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` FetchTimeout model.Duration `yaml:"fetch_timeout,omitempty"` Server string `yaml:"server,omitempty"` + ClientID string `yaml:"client_id,omitempty"` } // mustRegisterMessage registers the provided message type in the typeRegistry, and panics @@ -69,9 +70,6 @@ func init() { // Register top-level SD Configs. discovery.RegisterConfig(&KumaSDConfig{}) - // Register metrics. - prometheus.MustRegister(kumaFetchDuration, kumaFetchSkipUpdateCount, kumaFetchFailuresCount) - // Register protobuf types that need to be marshalled/ unmarshalled. mustRegisterMessage(protoTypes, (&v3.DiscoveryRequest{}).ProtoReflect().Type()) mustRegisterMessage(protoTypes, (&v3.DiscoveryResponse{}).ProtoReflect().Type()) @@ -109,12 +107,20 @@ type fetchDiscovery struct { parseResources resourceParser logger log.Logger - fetchDuration prometheus.Observer + fetchDuration prometheus.Summary fetchSkipUpdateCount prometheus.Counter fetchFailuresCount prometheus.Counter + + metricRegisterer discovery.MetricRegisterer } func (d *fetchDiscovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + err := d.metricRegisterer.RegisterMetrics() + if err != nil { + level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error()) + return + } + defer d.metricRegisterer.UnregisterMetrics() defer d.client.Close() ticker := time.NewTicker(d.refreshInterval) diff --git a/discovery/xds/xds_test.go b/discovery/xds/xds_test.go index 974a47342f..f57fff9968 100644 --- a/discovery/xds/xds_test.go +++ b/discovery/xds/xds_test.go @@ -36,6 +36,7 @@ var ( sdConf = SDConfig{ Server: "http://127.0.0.1", RefreshInterval: model.Duration(10 * time.Second), + ClientID: "test-id", } testFetchFailuresCount = prometheus.NewCounter( diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index a960350ec0..9b0fc7cc60 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -429,6 +429,15 @@ Unit tests for rules. +###### Flags + +| Flag | Description | +| --- | --- | +| --run | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | + + + + ###### Arguments | Argument | Description | Required | diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index c129c7e66c..5e2f31c1c6 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1126,7 +1126,7 @@ A DNS-based service discovery configuration allows specifying a set of DNS domain names which are periodically queried to discover a list of targets. The DNS servers to be contacted are read from `/etc/resolv.conf`. -This service discovery method only supports basic DNS A, AAAA, MX and SRV +This service discovery method only supports basic DNS A, AAAA, MX, NS and SRV record queries, but not the advanced DNS-SD approach specified in [RFC6763](https://tools.ietf.org/html/rfc6763). @@ -1136,13 +1136,14 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_dns_srv_record_target`: the target field of the SRV record * `__meta_dns_srv_record_port`: the port field of the SRV record * `__meta_dns_mx_record_target`: the target field of the MX record +* `__meta_dns_ns_record_target`: the target field of the NS record ```yaml # A list of DNS domain names to be queried. names: [ - ] -# The type of DNS query to perform. One of SRV, A, AAAA or MX. +# The type of DNS query to perform. One of SRV, A, AAAA, MX or NS. [ type: | default = 'SRV' ] # The port number used if the query type is not SRV. @@ -2229,6 +2230,11 @@ See below for the configuration options for Kuma MonitoringAssignment discovery: # Address of the Kuma Control Plane's MADS xDS server. server: +# Client id is used by Kuma Control Plane to compute Monitoring Assignment for specific Prometheus backend. +# This is useful when migrating between multiple Prometheus backends, or having separate backend for each Mesh. +# When not specified, system hostname/fqdn will be used if available, if not `prometheus` will be used. +[ client_id: ] + # The time to wait between polling update requests. [ refresh_interval: | default = 30s ] diff --git a/docs/feature_flags.md b/docs/feature_flags.md index d57763af0b..bcf8309b5c 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -119,20 +119,19 @@ also experimental) protobuf parser, through which _all_ metrics are ingested (i.e. not only native histograms). Prometheus will try to negotiate the protobuf format first. The instrumented target needs to support the protobuf format, too, _and_ it needs to expose native histograms. The protobuf format -allows to expose conventional and native histograms side by side. With this -feature flag disabled, Prometheus will continue to parse the conventional -histogram (albeit via the text format). With this flag enabled, Prometheus will -still ingest those conventional histograms that do not come with a -corresponding native histogram. However, if a native histogram is present, -Prometheus will ignore the corresponding conventional histogram, with the -notable exception of exemplars, which are always ingested. To keep the -conventional histograms as well, enable `scrape_classic_histograms` in the -scrape job. +allows to expose classic and native histograms side by side. With this feature +flag disabled, Prometheus will continue to parse the classic histogram (albeit +via the text format). With this flag enabled, Prometheus will still ingest +those classic histograms that do not come with a corresponding native +histogram. However, if a native histogram is present, Prometheus will ignore +the corresponding classic histogram, with the notable exception of exemplars, +which are always ingested. To keep the classic histograms as well, enable +`scrape_classic_histograms` in the scrape job. _Note about the format of `le` and `quantile` label values:_ In certain situations, the protobuf parsing changes the number formatting of -the `le` labels of conventional histograms and the `quantile` labels of +the `le` labels of classic histograms and the `quantile` labels of summaries. Typically, this happens if the scraped target is instrumented with [client_golang](https://github.com/prometheus/client_golang) provided that [promhttp.HandlerOpts.EnableOpenMetrics](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus/promhttp#HandlerOpts) @@ -195,3 +194,13 @@ won't work when you push OTLP metrics. Enables PromQL functions that are considered experimental and whose name or semantics could change. + +## Created Timestamps Zero Injection + +`--enable-feature=created-timestamp-zero-ingestion` + +Enables ingestion of created timestamp. Created timestamps are injected as 0 valued samples when appropriate. See [PromCon talk](https://youtu.be/nWf0BfQ5EEA) for details. + +Currently Prometheus supports created timestamps only on the traditional Prometheus Protobuf protocol (WIP for other protocols). As a result, when enabling this feature, the Prometheus protobuf scrape protocol will be prioritized (See `scrape_config.scrape_protocols` settings for more details). + +Besides enabling this feature in Prometheus, created timestamps need to be exposed by the application being scraped. diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 00afa1d223..dda88fccd1 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -238,23 +238,23 @@ boundaries are inclusive or exclusive. ## `histogram_quantile()` `histogram_quantile(φ scalar, b instant-vector)` calculates the φ-quantile (0 ≤ -φ ≤ 1) from a [conventional +φ ≤ 1) from a [classic histogram](https://prometheus.io/docs/concepts/metric_types/#histogram) or from a native histogram. (See [histograms and summaries](https://prometheus.io/docs/practices/histograms) for a detailed -explanation of φ-quantiles and the usage of the (conventional) histogram metric +explanation of φ-quantiles and the usage of the (classic) histogram metric type in general.) _Note that native histograms are an experimental feature. The behavior of this function when dealing with native histograms may change in future versions of Prometheus._ -The conventional float samples in `b` are considered the counts of observations -in each bucket of one or more conventional histograms. Each float sample must -have a label `le` where the label value denotes the inclusive upper bound of -the bucket. (Float samples without such a label are silently ignored.) The -other labels and the metric name are used to identify the buckets belonging to -each conventional histogram. The [histogram metric +The float samples in `b` are considered the counts of observations in each +bucket of one or more classic histograms. Each float sample must have a label +`le` where the label value denotes the inclusive upper bound of the bucket. +(Float samples without such a label are silently ignored.) The other labels and +the metric name are used to identify the buckets belonging to each classic +histogram. The [histogram metric type](https://prometheus.io/docs/concepts/metric_types/#histogram) automatically provides time series with the `_bucket` suffix and the appropriate labels. @@ -262,17 +262,17 @@ appropriate labels. The native histogram samples in `b` are treated each individually as a separate histogram to calculate the quantile from. -As long as no naming collisions arise, `b` may contain a mix of conventional +As long as no naming collisions arise, `b` may contain a mix of classic and native histograms. Use the `rate()` function to specify the time window for the quantile calculation. Example: A histogram metric is called `http_request_duration_seconds` (and -therefore the metric name for the buckets of a conventional histogram is +therefore the metric name for the buckets of a classic histogram is `http_request_duration_seconds_bucket`). To calculate the 90th percentile of request durations over the last 10m, use the following expression in case -`http_request_duration_seconds` is a conventional histogram: +`http_request_duration_seconds` is a classic histogram: histogram_quantile(0.9, rate(http_request_duration_seconds_bucket[10m])) @@ -283,9 +283,9 @@ For a native histogram, use the following expression instead: The quantile is calculated for each label combination in `http_request_duration_seconds`. To aggregate, use the `sum()` aggregator around the `rate()` function. Since the `le` label is required by -`histogram_quantile()` to deal with conventional histograms, it has to be +`histogram_quantile()` to deal with classic histograms, it has to be included in the `by` clause. The following expression aggregates the 90th -percentile by `job` for conventional histograms: +percentile by `job` for classic histograms: histogram_quantile(0.9, sum by (job, le) (rate(http_request_duration_seconds_bucket[10m]))) @@ -293,7 +293,7 @@ When aggregating native histograms, the expression simplifies to: histogram_quantile(0.9, sum by (job) (rate(http_request_duration_seconds[10m]))) -To aggregate all conventional histograms, specify only the `le` label: +To aggregate all classic histograms, specify only the `le` label: histogram_quantile(0.9, sum by (le) (rate(http_request_duration_seconds_bucket[10m]))) @@ -307,7 +307,7 @@ assuming a linear distribution within a bucket. If `b` has 0 observations, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. For φ = `NaN`, `NaN` is returned. -The following is only relevant for conventional histograms: If `b` contains +The following is only relevant for classic histograms: If `b` contains fewer than two buckets, `NaN` is returned. The highest bucket must have an upper bound of `+Inf`. (Otherwise, `NaN` is returned.) If a quantile is located in the highest bucket, the upper bound of the second highest bucket is @@ -586,6 +586,22 @@ in ascending order. Native histograms are sorted by their sum of observations. Same as `sort`, but sorts in descending order. +## `sort_by_label()` + +**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.** + +`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by their label values and sample value in case of label values being equal, in ascending order. + +Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering. + +## `sort_by_label_desc()` + +**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.** + +Same as `sort_by_label`, but sorts in descending order. + +Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering. + ## `sqrt()` `sqrt(v instant-vector)` calculates the square root of all elements in `v`. @@ -624,6 +640,7 @@ over time and return an instant vector with per-series aggregation results: * `quantile_over_time(scalar, range-vector)`: the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified interval. * `stddev_over_time(range-vector)`: the population standard deviation of the values in the specified interval. * `stdvar_over_time(range-vector)`: the population standard variance of the values in the specified interval. +* `mad_over_time(range-vector)`: the median absolute deviation of all points in the specified interval. * `last_over_time(range-vector)`: the most recent point value in the specified interval. * `present_over_time(range-vector)`: the value 1 for any series in the specified interval. diff --git a/docs/storage.md b/docs/storage.md index bcb8f7853e..b4c5b6adad 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -17,9 +17,9 @@ Ingested samples are grouped into blocks of two hours. Each two-hour block consi of a directory containing a chunks subdirectory containing all the time series samples for that window of time, a metadata file, and an index file (which indexes metric names and labels to time series in the chunks directory). The samples in the chunks directory -are grouped together into one or more segment files of up to 512MB each by default. When series are -deleted via the API, deletion records are stored in separate tombstone files (instead -of deleting the data immediately from the chunk segments). +are grouped together into one or more segment files of up to 512MB each by default. When +series are deleted via the API, deletion records are stored in separate tombstone files +(instead of deleting the data immediately from the chunk segments). The current block for incoming samples is kept in memory and is not fully persisted. It is secured against crashes by a write-ahead log (WAL) that can be @@ -58,15 +58,17 @@ A Prometheus server's data directory looks something like this:    └── 00000000 ``` - Note that a limitation of local storage is that it is not clustered or replicated. Thus, it is not arbitrarily scalable or durable in the face of drive or node outages and should be managed like any other single node -database. The use of RAID is suggested for storage availability, and [snapshots](querying/api.md#snapshot) -are recommended for backups. With proper +database. The use of RAID is suggested for storage availability, and +[snapshots](querying/api.md#snapshot) are recommended for backups. With proper architecture, it is possible to retain years of data in local storage. -Alternatively, external storage may be used via the [remote read/write APIs](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). Careful evaluation is required for these systems as they vary greatly in durability, performance, and efficiency. +Alternatively, external storage may be used via the +[remote read/write APIs](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). +Careful evaluation is required for these systems as they vary greatly in durability, +performance, and efficiency. For further details on file format, see [TSDB format](/tsdb/docs/format/README.md). @@ -74,40 +76,61 @@ For further details on file format, see [TSDB format](/tsdb/docs/format/README.m The initial two-hour blocks are eventually compacted into longer blocks in the background. -Compaction will create larger blocks containing data spanning up to 10% of the retention time, or 31 days, whichever is smaller. +Compaction will create larger blocks containing data spanning up to 10% of the retention time, +or 31 days, whichever is smaller. ## Operational aspects Prometheus has several flags that configure local storage. The most important are: -* `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`. -* `--storage.tsdb.retention.time`: When to remove old data. Defaults to `15d`. Overrides `storage.tsdb.retention` if this flag is set to anything other than default. -* `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. The oldest data will be removed first. Defaults to `0` or disabled. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only the persistent blocks are deleted to honor this retention although WAL and m-mapped chunks are counted in the total size. So the minimum requirement for the disk is the peak space taken by the `wal` (the WAL and Checkpoint) and `chunks_head` (m-mapped Head chunks) directory combined (peaks every 2 hours). -* `--storage.tsdb.retention`: Deprecated in favor of `storage.tsdb.retention.time`. -* `--storage.tsdb.wal-compression`: Enables compression of the write-ahead log (WAL). Depending on your data, you can expect the WAL size to be halved with little extra cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0. Note that once enabled, downgrading Prometheus to a version below 2.11.0 will require deleting the WAL. +- `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`. +- `--storage.tsdb.retention.time`: When to remove old data. Defaults to `15d`. + Overrides `storage.tsdb.retention` if this flag is set to anything other than default. +- `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. + The oldest data will be removed first. Defaults to `0` or disabled. Units supported: + B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only + the persistent blocks are deleted to honor this retention although WAL and m-mapped + chunks are counted in the total size. So the minimum requirement for the disk is the + peak space taken by the `wal` (the WAL and Checkpoint) and `chunks_head` + (m-mapped Head chunks) directory combined (peaks every 2 hours). +- `--storage.tsdb.retention`: Deprecated in favor of `storage.tsdb.retention.time`. +- `--storage.tsdb.wal-compression`: Enables compression of the write-ahead log (WAL). + Depending on your data, you can expect the WAL size to be halved with little extra + cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0. + Note that once enabled, downgrading Prometheus to a version below 2.11.0 will + require deleting the WAL. -Prometheus stores an average of only 1-2 bytes per sample. Thus, to plan the capacity of a Prometheus server, you can use the rough formula: +Prometheus stores an average of only 1-2 bytes per sample. Thus, to plan the +capacity of a Prometheus server, you can use the rough formula: ``` needed_disk_space = retention_time_seconds * ingested_samples_per_second * bytes_per_sample ``` -To lower the rate of ingested samples, you can either reduce the number of time series you scrape (fewer targets or fewer series per target), or you can increase the scrape interval. However, reducing the number of series is likely more effective, due to compression of samples within a series. +To lower the rate of ingested samples, you can either reduce the number of +time series you scrape (fewer targets or fewer series per target), or you +can increase the scrape interval. However, reducing the number of series is +likely more effective, due to compression of samples within a series. If your local storage becomes corrupted for whatever reason, the best strategy to address the problem is to shut down Prometheus then remove the entire storage directory. You can also try removing individual block directories, -or the WAL directory to resolve the problem. Note that this means losing +or the WAL directory to resolve the problem. Note that this means losing approximately two hours data per block directory. Again, Prometheus's local storage is not intended to be durable long-term storage; external solutions offer extended retention and data durability. -CAUTION: Non-POSIX compliant filesystems are not supported for Prometheus' local storage as unrecoverable corruptions may happen. NFS filesystems (including AWS's EFS) are not supported. NFS could be POSIX-compliant, but most implementations are not. It is strongly recommended to use a local filesystem for reliability. +CAUTION: Non-POSIX compliant filesystems are not supported for Prometheus' +local storage as unrecoverable corruptions may happen. NFS filesystems +(including AWS's EFS) are not supported. NFS could be POSIX-compliant, +but most implementations are not. It is strongly recommended to use a +local filesystem for reliability. If both time and size retention policies are specified, whichever triggers first will be used. -Expired block cleanup happens in the background. It may take up to two hours to remove expired blocks. Blocks must be fully expired before they are removed. +Expired block cleanup happens in the background. It may take up to two hours +to remove expired blocks. Blocks must be fully expired before they are removed. ## Remote storage integrations @@ -119,59 +142,101 @@ a set of interfaces that allow integrating with remote storage systems. Prometheus integrates with remote storage systems in three ways: -* Prometheus can write samples that it ingests to a remote URL in a standardized format. -* Prometheus can receive samples from other Prometheus servers in a standardized format. -* Prometheus can read (back) sample data from a remote URL in a standardized format. +- Prometheus can write samples that it ingests to a remote URL in a standardized format. +- Prometheus can receive samples from other Prometheus servers in a standardized format. +- Prometheus can read (back) sample data from a remote URL in a standardized format. ![Remote read and write architecture](images/remote_integrations.png) -The read and write protocols both use a snappy-compressed protocol buffer encoding over HTTP. The protocols are not considered as stable APIs yet and may change to use gRPC over HTTP/2 in the future, when all hops between Prometheus and the remote storage can safely be assumed to support HTTP/2. +The read and write protocols both use a snappy-compressed protocol buffer encoding over +HTTP. The protocols are not considered as stable APIs yet and may change to use gRPC +over HTTP/2 in the future, when all hops between Prometheus and the remote storage can +safely be assumed to support HTTP/2. -For details on configuring remote storage integrations in Prometheus, see the [remote write](configuration/configuration.md#remote_write) and [remote read](configuration/configuration.md#remote_read) sections of the Prometheus configuration documentation. +For details on configuring remote storage integrations in Prometheus, see the +[remote write](configuration/configuration.md#remote_write) and +[remote read](configuration/configuration.md#remote_read) sections of the Prometheus +configuration documentation. -The built-in remote write receiver can be enabled by setting the `--web.enable-remote-write-receiver` command line flag. When enabled, the remote write receiver endpoint is `/api/v1/write`. +The built-in remote write receiver can be enabled by setting the +`--web.enable-remote-write-receiver` command line flag. When enabled, +the remote write receiver endpoint is `/api/v1/write`. -For details on the request and response messages, see the [remote storage protocol buffer definitions](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto). +For details on the request and response messages, see the +[remote storage protocol buffer definitions](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto). -Note that on the read path, Prometheus only fetches raw series data for a set of label selectors and time ranges from the remote end. All PromQL evaluation on the raw data still happens in Prometheus itself. This means that remote read queries have some scalability limit, since all necessary data needs to be loaded into the querying Prometheus server first and then processed there. However, supporting fully distributed evaluation of PromQL was deemed infeasible for the time being. +Note that on the read path, Prometheus only fetches raw series data for a set of +label selectors and time ranges from the remote end. All PromQL evaluation on the +raw data still happens in Prometheus itself. This means that remote read queries +have some scalability limit, since all necessary data needs to be loaded into the +querying Prometheus server first and then processed there. However, supporting +fully distributed evaluation of PromQL was deemed infeasible for the time being. ### Existing integrations -To learn more about existing integrations with remote storage systems, see the [Integrations documentation](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). +To learn more about existing integrations with remote storage systems, see the +[Integrations documentation](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). ## Backfilling from OpenMetrics format ### Overview -If a user wants to create blocks into the TSDB from data that is in [OpenMetrics](https://openmetrics.io/) format, they can do so using backfilling. However, they should be careful and note that it is not safe to backfill data from the last 3 hours (the current head block) as this time range may overlap with the current head block Prometheus is still mutating. Backfilling will create new TSDB blocks, each containing two hours of metrics data. This limits the memory requirements of block creation. Compacting the two hour blocks into larger blocks is later done by the Prometheus server itself. +If a user wants to create blocks into the TSDB from data that is in +[OpenMetrics](https://openmetrics.io/) format, they can do so using backfilling. +However, they should be careful and note that it is not safe to backfill data +from the last 3 hours (the current head block) as this time range may overlap +with the current head block Prometheus is still mutating. Backfilling will +create new TSDB blocks, each containing two hours of metrics data. This limits +the memory requirements of block creation. Compacting the two hour blocks into +larger blocks is later done by the Prometheus server itself. -A typical use case is to migrate metrics data from a different monitoring system or time-series database to Prometheus. To do so, the user must first convert the source data into [OpenMetrics](https://openmetrics.io/) format, which is the input format for the backfilling as described below. +A typical use case is to migrate metrics data from a different monitoring system +or time-series database to Prometheus. To do so, the user must first convert the +source data into [OpenMetrics](https://openmetrics.io/) format, which is the +input format for the backfilling as described below. ### Usage -Backfilling can be used via the Promtool command line. Promtool will write the blocks to a directory. By default this output directory is ./data/, you can change it by using the name of the desired output directory as an optional argument in the sub-command. +Backfilling can be used via the Promtool command line. Promtool will write the blocks +to a directory. By default this output directory is ./data/, you can change it by +using the name of the desired output directory as an optional argument in the sub-command. ``` promtool tsdb create-blocks-from openmetrics [] ``` -After the creation of the blocks, move it to the data directory of Prometheus. If there is an overlap with the existing blocks in Prometheus, the flag `--storage.tsdb.allow-overlapping-blocks` needs to be set for Prometheus versions v2.38 and below. Note that any backfilled data is subject to the retention configured for your Prometheus server (by time or size). +After the creation of the blocks, move it to the data directory of Prometheus. +If there is an overlap with the existing blocks in Prometheus, the flag +`--storage.tsdb.allow-overlapping-blocks` needs to be set for Prometheus versions +v2.38 and below. Note that any backfilled data is subject to the retention +configured for your Prometheus server (by time or size). #### Longer Block Durations -By default, the promtool will use the default block duration (2h) for the blocks; this behavior is the most generally applicable and correct. However, when backfilling data over a long range of times, it may be advantageous to use a larger value for the block duration to backfill faster and prevent additional compactions by TSDB later. +By default, the promtool will use the default block duration (2h) for the blocks; +this behavior is the most generally applicable and correct. However, when backfilling +data over a long range of times, it may be advantageous to use a larger value for +the block duration to backfill faster and prevent additional compactions by TSDB later. -The `--max-block-duration` flag allows the user to configure a maximum duration of blocks. The backfilling tool will pick a suitable block duration no larger than this. +The `--max-block-duration` flag allows the user to configure a maximum duration of blocks. +The backfilling tool will pick a suitable block duration no larger than this. -While larger blocks may improve the performance of backfilling large datasets, drawbacks exist as well. Time-based retention policies must keep the entire block around if even one sample of the (potentially large) block is still within the retention policy. Conversely, size-based retention policies will remove the entire block even if the TSDB only goes over the size limit in a minor way. +While larger blocks may improve the performance of backfilling large datasets, +drawbacks exist as well. Time-based retention policies must keep the entire block +around if even one sample of the (potentially large) block is still within the +retention policy. Conversely, size-based retention policies will remove the entire +block even if the TSDB only goes over the size limit in a minor way. -Therefore, backfilling with few blocks, thereby choosing a larger block duration, must be done with care and is not recommended for any production instances. +Therefore, backfilling with few blocks, thereby choosing a larger block duration, +must be done with care and is not recommended for any production instances. ## Backfilling for Recording Rules ### Overview -When a new recording rule is created, there is no historical data for it. Recording rule data only exists from the creation time on. `promtool` makes it possible to create historical recording rule data. +When a new recording rule is created, there is no historical data for it. +Recording rule data only exists from the creation time on. +`promtool` makes it possible to create historical recording rule data. ### Usage @@ -187,14 +252,26 @@ $ promtool tsdb create-blocks-from rules \ rules.yaml rules2.yaml ``` -The recording rule files provided should be a normal [Prometheus rules file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). +The recording rule files provided should be a normal +[Prometheus rules file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). -The output of `promtool tsdb create-blocks-from rules` command is a directory that contains blocks with the historical rule data for all rules in the recording rule files. By default, the output directory is `data/`. In order to make use of this new block data, the blocks must be moved to a running Prometheus instance data dir `storage.tsdb.path` (for Prometheus versions v2.38 and below, the flag `--storage.tsdb.allow-overlapping-blocks` must be enabled). Once moved, the new blocks will merge with existing blocks when the next compaction runs. +The output of `promtool tsdb create-blocks-from rules` command is a directory that +contains blocks with the historical rule data for all rules in the recording rule +files. By default, the output directory is `data/`. In order to make use of this +new block data, the blocks must be moved to a running Prometheus instance data dir +`storage.tsdb.path` (for Prometheus versions v2.38 and below, the flag +`--storage.tsdb.allow-overlapping-blocks` must be enabled). Once moved, the new +blocks will merge with existing blocks when the next compaction runs. ### Limitations -- If you run the rule backfiller multiple times with the overlapping start/end times, blocks containing the same data will be created each time the rule backfiller is run. +- If you run the rule backfiller multiple times with the overlapping start/end times, + blocks containing the same data will be created each time the rule backfiller is run. - All rules in the recording rule files will be evaluated. -- If the `interval` is set in the recording rule file that will take priority over the `eval-interval` flag in the rule backfill command. +- If the `interval` is set in the recording rule file that will take priority over + the `eval-interval` flag in the rule backfill command. - Alerts are currently ignored if they are in the recording rule file. -- Rules in the same group cannot see the results of previous rules. Meaning that rules that refer to other rules being backfilled is not supported. A workaround is to backfill multiple times and create the dependent data first (and move dependent data to the Prometheus server data dir so that it is accessible from the Prometheus API). +- Rules in the same group cannot see the results of previous rules. Meaning that rules + that refer to other rules being backfilled is not supported. A workaround is to + backfill multiple times and create the dependent data first (and move dependent + data to the Prometheus server data dir so that it is accessible from the Prometheus API). diff --git a/documentation/examples/custom-sd/adapter-usage/main.go b/documentation/examples/custom-sd/adapter-usage/main.go index ae656db193..f4bba73944 100644 --- a/documentation/examples/custom-sd/adapter-usage/main.go +++ b/documentation/examples/custom-sd/adapter-usage/main.go @@ -268,6 +268,11 @@ func main() { if err != nil { fmt.Println("err: ", err) } + + if err != nil { + level.Error(logger).Log("msg", "failed to create discovery metrics", "err", err) + os.Exit(1) + } sdAdapter := adapter.NewAdapter(ctx, *outputFile, "exampleSD", disc, logger) sdAdapter.Run() diff --git a/documentation/examples/custom-sd/adapter/adapter.go b/documentation/examples/custom-sd/adapter/adapter.go index 57c32ce492..7fbf94aa9c 100644 --- a/documentation/examples/custom-sd/adapter/adapter.go +++ b/documentation/examples/custom-sd/adapter/adapter.go @@ -25,6 +25,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery" @@ -167,7 +168,7 @@ func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, ctx: ctx, disc: d, groups: make(map[string]*customSD), - manager: discovery.NewManager(ctx, logger), + manager: discovery.NewManager(ctx, logger, prometheus.NewRegistry()), output: file, name: name, logger: logger, diff --git a/documentation/examples/custom-sd/adapter/adapter_test.go b/documentation/examples/custom-sd/adapter/adapter_test.go index eaf34c6673..8e5920eb48 100644 --- a/documentation/examples/custom-sd/adapter/adapter_test.go +++ b/documentation/examples/custom-sd/adapter/adapter_test.go @@ -226,6 +226,7 @@ func TestWriteOutput(t *testing.T) { require.NoError(t, err) defer os.Remove(tmpfile.Name()) tmpfile.Close() + require.NoError(t, err) adapter := NewAdapter(ctx, tmpfile.Name(), "test_sd", nil, nil) require.NoError(t, adapter.writeOutput()) } diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 1014b0aadf..3a84bfa98f 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -1,26 +1,26 @@ module github.com/prometheus/prometheus/documentation/examples/remote_storage -go 1.18 +go 1.20 require ( - github.com/alecthomas/kingpin/v2 v2.3.2 + github.com/alecthomas/kingpin/v2 v2.4.0 github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.2 - github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_golang v1.18.0 github.com/prometheus/common v0.45.0 - github.com/prometheus/prometheus v0.47.2 + github.com/prometheus/prometheus v0.48.0 github.com/stretchr/testify v1.8.4 ) require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/aws/aws-sdk-go v1.44.302 // indirect + github.com/aws/aws-sdk-go v1.45.25 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -29,14 +29,14 @@ require ( github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.3.1 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/compress v1.17.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect @@ -46,28 +46,28 @@ require ( github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect + github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 // indirect - go.opentelemetry.io/collector/semconv v0.81.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect - go.opentelemetry.io/otel v1.16.0 // indirect - go.opentelemetry.io/otel/metric v1.16.0 // indirect - go.opentelemetry.io/otel/trace v1.16.0 // indirect + go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 // indirect + go.opentelemetry.io/collector/semconv v0.87.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.14.0 // indirect - golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect + golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.12.0 // indirect - golang.org/x/sys v0.13.0 // indirect + golang.org/x/oauth2 v0.13.0 // indirect + golang.org/x/sys v0.15.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect - google.golang.org/grpc v1.56.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect + google.golang.org/grpc v1.58.3 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 64ced7423c..031c54ff03 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,23 +1,17 @@ -github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= -github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0 h1:QM6sE5k2ZT/vI5BEe0r7mqjsUSnhVBFbOsVkEuaEfiA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 h1:bWh0Z2rOEDfB/ywv/l0iHN1JgyazE6kW/aIA89+CEK0= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= -github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -27,8 +21,8 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAu github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.302 h1:ST3ko6GrJKn3Xi+nAvxjG3uk/V1pW8KC52WLeIxqqNk= -github.com/aws/aws-sdk-go v1.44.302/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4= +github.com/aws/aws-sdk-go v1.45.25/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -44,10 +38,10 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= +github.com/digitalocean/godo v1.104.1 h1:SZNxjAsskM/su0YW9P8Wx3gU0W1Z13b6tZlYNpl5BnA= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/docker v24.0.4+incompatible h1:s/LVDftw9hjblvqIeTiGYXBCD95nOEEl7qRsRrIOuQI= +github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= @@ -82,8 +76,8 @@ github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= +github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -99,23 +93,23 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450WpPH+yvXo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/hashicorp/consul/api v1.22.0 h1:ydEvDooB/A0c/xpsBd8GSt7P2/zYPBui4KrNip0xGjE= +github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -125,13 +119,13 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e h1:sr4lujmn9heD030xx/Pd4B/JSmvRhFzuotNXaaV0WLs= +github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c h1:Nc3Mt2BAnq0/VoLEntF/nipX+K1S7pG+RgwiitSv6v0= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= -github.com/hetznercloud/hcloud-go/v2 v2.0.0 h1:Sg1DJ+MAKvbYAqaBaq9tPbwXBS2ckPIaMtVdUjKu+4g= +github.com/hetznercloud/hcloud-go/v2 v2.4.0 h1:MqlAE+w125PLvJRCpAJmEwrIxoVdUdOyuFUhE/Ukbok= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/influxdata/influxdb v1.11.2 h1:qOF3uQN1mDfJNEKwbAgJsqehf8IXgKok2vlGm736oGo= github.com/influxdata/influxdb v1.11.2/go.mod h1:eUMkLTE2vQwvSk6KGMrTBLKPaqSuczuelGbggigMPFw= -github.com/ionos-cloud/sdk-go/v6 v6.1.8 h1:493wE/BkZxJf7x79UCE0cYGPZoqQcPiEBALvt7uVGY0= +github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -148,8 +142,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g= +github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -162,14 +156,14 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linode/linodego v1.19.0 h1:n4WJrcr9+30e9JGZ6DI0nZbm5SdAj1kSwvvt/998YUw= +github.com/linode/linodego v1.23.0 h1:s0ReCZtuN9Z1IoUN9w1RLeYO1dMZUGPwOQ/IBFsBHtU= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= -github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= +github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -186,7 +180,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= +github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -200,13 +194,13 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= @@ -219,12 +213,12 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= -github.com/prometheus/prometheus v0.47.2 h1:jWcnuQHz1o1Wu3MZ6nMJDuTI0kU5yJp9pkxh8XEkNvI= -github.com/prometheus/prometheus v0.47.2/go.mod h1:J/bmOSjgH7lFxz2gZhrWEZs2i64vMS+HIuZfmYNhJ/M= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/prometheus v0.48.0 h1:yrBloImGQ7je4h8M10ujGh4R6oxYQJQKlMuETwNskGk= +github.com/prometheus/prometheus v0.48.0/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3pxSD+SC7+CISp7xY0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21 h1:yWfiTPwYxB0l5fGMhl/G+liULugVIHD9AU77iNLrURQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -244,18 +238,18 @@ github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtX github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4= -go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw= -go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8= -go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= -go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= -go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= -go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= -go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= -go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 h1:qCPXSQCoD3qeWFb1RuIks8fw9Atxpk78bmtVdi15KhE= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0016/go.mod h1:OdN0alYOlYhHXu6BDlGehrZWgtBuiDsz/rlNeJeXiNg= +go.opentelemetry.io/collector/semconv v0.87.0 h1:BsG1jdLLRCBRlvUujk4QA86af7r/ZXnizczQpEs/gg8= +go.opentelemetry.io/collector/semconv v0.87.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= @@ -268,12 +262,12 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -294,8 +288,8 @@ golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= -golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -303,7 +297,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -323,8 +317,8 @@ golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -344,7 +338,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -353,12 +347,12 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw= -google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 h1:lCbbUxUDD+DiXx9Q6F/ttL0aAu7N2pz8XnmMm8ZW4NE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 h1:XUODHrpzJEUeWmVo/jfNTLj0YyVveOo28oE6vkFbkO4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= +google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a h1:myvhA4is3vrit1a6NZCWBIwN0kNEnX21DJOJX/NvIfI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -386,12 +380,12 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y= -k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM= -k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8= +k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= +k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= +k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 h1:OmK1d0WrkD3IPfkskvroRykOulHVHf0s0ZIFRjyt+UI= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= diff --git a/go.mod b/go.mod index e81939d834..dc8739daca 100644 --- a/go.mod +++ b/go.mod @@ -3,53 +3,52 @@ module github.com/prometheus/prometheus go 1.20 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 - github.com/DmitriyVTitov/size v1.5.0 - github.com/alecthomas/kingpin/v2 v2.3.2 - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aws/aws-sdk-go v1.45.25 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.3.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 + github.com/Code-Hex/go-generics-cache v1.3.1 + github.com/alecthomas/kingpin/v2 v2.4.0 + github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 + github.com/aws/aws-sdk-go v1.48.14 + github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.2.0 github.com/dennwc/varint v1.0.0 - github.com/dgraph-io/ristretto v0.1.1 - github.com/digitalocean/godo v1.104.1 - github.com/docker/docker v24.0.6+incompatible + github.com/digitalocean/godo v1.106.0 + github.com/docker/docker v24.0.7+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.11.1 github.com/envoyproxy/protoc-gen-validate v1.0.2 - github.com/fsnotify/fsnotify v1.6.0 + github.com/fsnotify/fsnotify v1.7.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 - github.com/go-openapi/strfmt v0.21.7 + github.com/go-openapi/strfmt v0.21.9 github.com/go-zookeeper/zk v1.0.3 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 - github.com/google/uuid v1.3.1 - github.com/gophercloud/gophercloud v1.7.0 + github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08 + github.com/google/uuid v1.4.0 + github.com/gophercloud/gophercloud v1.8.0 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.25.1 - github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c + github.com/hashicorp/consul/api v1.26.1 + github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c // Not upgrading this for now due to https://github.com/prometheus/prometheus/pull/13255#issuecomment-1845237409 github.com/hetznercloud/hcloud-go/v2 v2.4.0 - github.com/ionos-cloud/sdk-go/v6 v6.1.9 + github.com/ionos-cloud/sdk-go/v6 v6.1.10 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.2 + github.com/klauspost/compress v1.17.4 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.24.0 - github.com/miekg/dns v1.1.56 + github.com/linode/linodego v1.25.0 + github.com/miekg/dns v1.1.57 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 github.com/ovh/go-ovh v1.4.3 - github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.26.0 - github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_model v0.5.0 - github.com/prometheus/common v0.45.0 + github.com/prometheus/common v0.45.1-0.20231122191551-832cd6e99f99 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.10.0 @@ -57,80 +56,70 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.8.4 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/featuregate v0.77.0 - go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 - go.opentelemetry.io/collector/semconv v0.88.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 - go.opentelemetry.io/otel v1.19.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 - go.opentelemetry.io/otel/sdk v1.19.0 - go.opentelemetry.io/otel/trace v1.19.0 + go.opentelemetry.io/collector/featuregate v1.0.0 + go.opentelemetry.io/collector/pdata v1.0.0 + go.opentelemetry.io/collector/semconv v0.90.1 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 + go.opentelemetry.io/otel v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 + go.opentelemetry.io/otel/sdk v1.21.0 + go.opentelemetry.io/otel/trace v1.21.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 - go.uber.org/goleak v1.2.1 + go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 - golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa - golang.org/x/net v0.18.0 - golang.org/x/oauth2 v0.13.0 + golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb + golang.org/x/net v0.19.0 + golang.org/x/oauth2 v0.15.0 golang.org/x/sync v0.5.0 - golang.org/x/sys v0.14.0 - golang.org/x/time v0.3.0 - golang.org/x/tools v0.15.0 - google.golang.org/api v0.147.0 - google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a + golang.org/x/sys v0.15.0 + golang.org/x/time v0.5.0 + golang.org/x/tools v0.16.0 + google.golang.org/api v0.153.0 + google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.28.3 - k8s.io/apimachinery v0.28.3 - k8s.io/client-go v0.28.3 + k8s.io/api v0.28.4 + k8s.io/apimachinery v0.28.4 + k8s.io/client-go v0.28.4 k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.100.1 + k8s.io/klog/v2 v2.110.1 ) require ( - cloud.google.com/go/compute v1.23.0 // indirect + github.com/DmitriyVTitov/size v1.5.0 + github.com/dgraph-io/ristretto v0.1.1 + github.com/pkg/errors v0.9.1 +) + +require ( + cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/dustin/go-humanize v1.0.0 // indirect - github.com/golang-jwt/jwt/v5 v5.0.0 // indirect - github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect -) - -require ( - github.com/Code-Hex/go-generics-cache v1.3.1 github.com/Microsoft/go-winio v0.6.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect github.com/emicklei/go-restful/v3 v3.10.2 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/fatih/color v1.15.0 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-kit/kit v0.12.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.4 // indirect @@ -141,22 +130,28 @@ require ( github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/validate v0.22.1 // indirect github.com/go-resty/resty/v2 v2.10.0 // indirect + github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.4 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect github.com/imdario/mergo v0.3.16 // indirect @@ -164,6 +159,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect @@ -176,18 +172,23 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - go.mongodb.org/mongo-driver v1.12.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/xhit/go-str2duration/v2 v2.1.0 // indirect + go.mongodb.org/mongo-driver v1.13.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect - golang.org/x/crypto v0.15.0 // indirect + golang.org/x/crypto v0.16.0 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/term v0.14.0 // indirect + golang.org/x/term v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect diff --git a/go.sum b/go.sum index e0136526b2..18b5e3ce06 100644 --- a/go.sum +++ b/go.sum @@ -18,8 +18,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -34,20 +34,18 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1/go.mod h1:oGV6NlB0cvi1ZbYRR2UN44QHxWFyGk+iylgD0qaMXjA= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0 h1:QM6sE5k2ZT/vI5BEe0r7mqjsUSnhVBFbOsVkEuaEfiA= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0/go.mod h1:243D9iHbcQXoFUtgHJwL7gl2zx1aDuDMjvBZVGr2uW0= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 h1:bWh0Z2rOEDfB/ywv/l0iHN1JgyazE6kW/aIA89+CEK0= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1/go.mod h1:Bzf34hhAE9NSxailk8xVeLEZbUjOXcC+GnU1mMKdhLw= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 h1:ECsQtyERDVz3NP3kvDOTLvbQhqWp/x9EsGKtb4ogUr8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.3.0 h1:qgs/VAMSR+9qFhwTw4OwF2NbVuw+2m83pVZJjqkKQMw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.3.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= @@ -68,15 +66,15 @@ github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWX github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= -github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -93,9 +91,11 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4= -github.com/aws/aws-sdk-go v1.45.25/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.48.14 h1:nVLrp+F84SG+xGiFMfe1TE6ZV6smF+42tuuNgYGV30s= +github.com/aws/aws-sdk-go v1.48.14/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -141,13 +141,13 @@ github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkz github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/digitalocean/godo v1.104.1 h1:SZNxjAsskM/su0YW9P8Wx3gU0W1Z13b6tZlYNpl5BnA= -github.com/digitalocean/godo v1.104.1/go.mod h1:VAI/L5YDzMuPRU01lEEUSQ/sp5Z//1HnnFv/RBTEdbg= +github.com/digitalocean/godo v1.106.0 h1:m5iErwl3xHovGFlawd50n54ntgXHt1BLsvU6BXsVxEU= +github.com/digitalocean/godo v1.106.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= -github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -179,13 +179,13 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -205,8 +205,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= @@ -236,8 +236,8 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6 github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= -github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/strfmt v0.21.9 h1:LnEGOO9qyEC1v22Bzr323M98G13paIUGPU7yeJtG9Xs= +github.com/go-openapi/strfmt v0.21.9/go.mod h1:0k3v301mglEaZRJdDDGSlN6Npq4VMVU69DE0LUyf7uA= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -356,24 +356,24 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08 h1:PxlBVtIFHR/mtWk2i0gTEdCz+jBnqiuHNSki0epDbVs= +github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs= -github.com/gophercloud/gophercloud v1.7.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= +github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -391,10 +391,10 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= -github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g= +github.com/hashicorp/consul/api v1.26.1 h1:5oSXOO5fboPZeW5SN+TdGFP/BILDgBm19OrPZ/pICIM= +github.com/hashicorp/consul/api v1.26.1/go.mod h1:B4sQTeaSO16NtynqrAdwOlahJ7IUDZM9cj2420xYL8A= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= +github.com/hashicorp/consul/sdk v0.15.0 h1:2qK9nDrr4tiJKRoxPGhm6B7xJjLVIQqkjiab2M4aKjU= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -430,6 +430,7 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -455,8 +456,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4= -github.com/ionos-cloud/sdk-go/v6 v6.1.9/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/ionos-cloud/sdk-go/v6 v6.1.10 h1:3815Q2Hw/wc4cJ8wD7bwfsmDsdfIEp80B7BQMj0YP2w= +github.com/ionos-cloud/sdk-go/v6 v6.1.10/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -489,8 +490,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -508,8 +509,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.24.0 h1:zO+bMdTE6wPccqP7QIkbxAfACX7DjSX6DW9JE/qOKDQ= -github.com/linode/linodego v1.24.0/go.mod h1:cq/ty5BCEQnsO6OjMqD7Q03KCCyB8CNM5E3MNg0LV6M= +github.com/linode/linodego v1.25.0 h1:zYMz0lTasD503jBu3tSRhzEmXHQN1zptCw5o71ibyyU= +github.com/linode/linodego v1.25.0/go.mod h1:BMZI0pMM/YGjBis7pIXDPbcgYfCZLH0/UvzqtsGtG1c= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -542,8 +543,8 @@ github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04 github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= -github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -643,8 +644,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -660,8 +661,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common v0.45.1-0.20231122191551-832cd6e99f99 h1:V5ajRiLiCQGO+ggTr+07gMUcTqlIMMkDBfrJe5zKLmc= +github.com/prometheus/common v0.45.1-0.20231122191551-832cd6e99f99/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= @@ -674,8 +675,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -759,8 +760,8 @@ go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mI go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE= -go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0= +go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= +go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -770,28 +771,28 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/featuregate v0.77.0 h1:m1/IzaXoQh6SgF6CM80vrBOCf5zSJ2GVISfA27fYzGU= -go.opentelemetry.io/collector/featuregate v0.77.0/go.mod h1:/kVAsGUCyJXIDSgHftCN63QiwAEVHRLX2Kh/S+dqgHY= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 h1:AgALhc2VenoA5l1DvTdg7mkzaBGqoTSuMkAtjsttBFo= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0017/go.mod h1:Rv9fOclA5AtM/JGm0d4jBOIAo1+jBA13UT5Bx0ovXi4= -go.opentelemetry.io/collector/semconv v0.88.0 h1:8TVP4hYaUC87S6CCLKNoSxsUE0ChldE4vqotvNHHUnE= -go.opentelemetry.io/collector/semconv v0.88.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= -go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/collector/featuregate v1.0.0 h1:5MGqe2v5zxaoo73BUOvUTunftX5J8RGrbFsC2Ha7N3g= +go.opentelemetry.io/collector/featuregate v1.0.0/go.mod h1:xGbRuw+GbutRtVVSEy3YR2yuOlEyiUMhN2M9DJljgqY= +go.opentelemetry.io/collector/pdata v1.0.0 h1:ECP2jnLztewsHmL1opL8BeMtWVc7/oSlKNhfY9jP8ec= +go.opentelemetry.io/collector/pdata v1.0.0/go.mod h1:TsDFgs4JLNG7t6x9D8kGswXUz4mme+MyNChHx8zSF6k= +go.opentelemetry.io/collector/semconv v0.90.1 h1:2fkQZbefQBbIcNb9Rk1mRcWlFZgQOk7CpST1e1BK8eg= +go.opentelemetry.io/collector/semconv v0.90.1/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -800,8 +801,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -824,8 +825,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -836,8 +837,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8= +golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -904,20 +905,19 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -997,23 +997,20 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= -golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1023,7 +1020,6 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= @@ -1033,8 +1029,9 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1089,8 +1086,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= -golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1110,8 +1107,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.147.0 h1:Can3FaQo9LlVqxJCodNmeZW/ib3/qKAY3rFeXiHo5gc= -google.golang.org/api v0.147.0/go.mod h1:pQ/9j83DcmPd/5C9e2nFOdjjNkDZ1G+zkbK2uvdkJMs= +google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4= +google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1150,12 +1147,12 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= -google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= -google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a h1:myvhA4is3vrit1a6NZCWBIwN0kNEnX21DJOJX/NvIfI= -google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:SUBoKXbI1Efip18FClrQVGjWcyd0QZd8KkvdP34t7ww= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 h1:ZcOkrmX74HbKFYnpPY8Qsw93fC29TbJXspYKaBkSXDQ= +google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1235,12 +1232,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= -k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= -k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= -k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= -k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= -k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= +k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= +k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= +k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8= +k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg= +k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY= +k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 286913628b..64c7b797a1 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -94,8 +94,8 @@ func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { Sum: h.Sum, } - c.PositiveSpans, c.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false) - c.NegativeSpans, c.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false) + c.PositiveSpans, c.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false, false) + c.NegativeSpans, c.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false, false) return &c } @@ -231,11 +231,8 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { // resulting histogram might have buckets with a population of zero or directly // adjacent spans (offset=0). To normalize those, call the Compact method. // -// The method reconciles differences in the zero threshold and in the schema, -// but the schema of the other histogram must be ≥ the schema of the receiving -// histogram (i.e. must have an equal or higher resolution). This means that the -// schema of the receiving histogram won't change. Its zero threshold, however, -// will change if needed. The other histogram will not be modified in any case. +// The method reconciles differences in the zero threshold and in the schema, and +// changes them if needed. The other histogram will not be modified in any case. // // This method returns a pointer to the receiving histogram for convenience. func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { @@ -269,21 +266,30 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { h.Sum += other.Sum var ( + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets + otherPositiveSpans = other.PositiveSpans otherPositiveBuckets = other.PositiveBuckets otherNegativeSpans = other.NegativeSpans otherNegativeBuckets = other.NegativeBuckets ) - if other.Schema < h.Schema { - panic(fmt.Errorf("cannot add histogram with schema %d to %d", other.Schema, h.Schema)) - } else if other.Schema > h.Schema { - otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false) - otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false) + switch { + case other.Schema < h.Schema: + hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true) + hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true) + h.Schema = other.Schema + + case other.Schema > h.Schema: + otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false) + otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false) } - h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) - h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -296,21 +302,29 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { h.Sum -= other.Sum var ( + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets + otherPositiveSpans = other.PositiveSpans otherPositiveBuckets = other.PositiveBuckets otherNegativeSpans = other.NegativeSpans otherNegativeBuckets = other.NegativeBuckets ) - if other.Schema < h.Schema { - panic(fmt.Errorf("cannot subtract histigram with schema %d to %d", other.Schema, h.Schema)) - } else if other.Schema > h.Schema { - otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false) - otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false) + switch { + case other.Schema < h.Schema: + hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true) + hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true) + h.Schema = other.Schema + case other.Schema > h.Schema: + otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false) + otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false) } - h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) - h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -353,12 +367,12 @@ func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { // Size returns the total size of the FloatHistogram, which includes the size of the pointer // to FloatHistogram, all its fields, and all elements contained in slices. // NOTE: this is only valid for 64 bit architectures. -func (fh *FloatHistogram) Size() int { +func (h *FloatHistogram) Size() int { // Size of each slice separately. - posSpanSize := len(fh.PositiveSpans) * 8 // 8 bytes (int32 + uint32). - negSpanSize := len(fh.NegativeSpans) * 8 // 8 bytes (int32 + uint32). - posBucketSize := len(fh.PositiveBuckets) * 8 // 8 bytes (float64). - negBucketSize := len(fh.NegativeBuckets) * 8 // 8 bytes (float64). + posSpanSize := len(h.PositiveSpans) * 8 // 8 bytes (int32 + uint32). + negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32). + posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64). + negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64). // Total size of the struct. @@ -765,8 +779,9 @@ func (h *FloatHistogram) floatBucketIterator( schema: h.Schema, positive: positive, }, - targetSchema: targetSchema, - absoluteStartValue: absoluteStartValue, + targetSchema: targetSchema, + absoluteStartValue: absoluteStartValue, + boundReachedStartValue: absoluteStartValue == 0, } if positive { i.spans = h.PositiveSpans @@ -824,55 +839,83 @@ func (i *floatBucketIterator) Next() bool { return false } - // Copy all of these into local variables so that we can forward to the - // next bucket and then roll back if needed. - origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan - span := i.spans[spansIdx] - firstPass := true - i.currCount = 0 - -mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. - for { + if i.schema == i.targetSchema { + // Fast path for the common case. + span := i.spans[i.spansIdx] if i.bucketsIdx == 0 { // Seed origIdx for the first bucket. - origIdx = span.Offset + i.currIdx = span.Offset } else { - origIdx++ + i.currIdx++ } - for idxInSpan >= span.Length { + + for i.idxInSpan >= span.Length { // We have exhausted the current span and have to find a new // one. We even handle pathologic spans of length 0 here. - idxInSpan = 0 - spansIdx++ - if spansIdx >= len(i.spans) { - if firstPass { - return false + i.idxInSpan = 0 + i.spansIdx++ + if i.spansIdx >= len(i.spans) { + return false + } + span = i.spans[i.spansIdx] + i.currIdx += span.Offset + } + + i.currCount = i.buckets[i.bucketsIdx] + i.idxInSpan++ + i.bucketsIdx++ + } else { + // Copy all of these into local variables so that we can forward to the + // next bucket and then roll back if needed. + origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan + span := i.spans[spansIdx] + firstPass := true + i.currCount = 0 + + mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. + for { + if i.bucketsIdx == 0 { + // Seed origIdx for the first bucket. + origIdx = span.Offset + } else { + origIdx++ + } + for idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We even handle pathologic spans of length 0 here. + idxInSpan = 0 + spansIdx++ + if spansIdx >= len(i.spans) { + if firstPass { + return false + } + break mergeLoop } + span = i.spans[spansIdx] + origIdx += span.Offset + } + currIdx := targetIdx(origIdx, i.schema, i.targetSchema) + switch { + case firstPass: + i.currIdx = currIdx + firstPass = false + case currIdx != i.currIdx: + // Reached next bucket in targetSchema. + // Do not actually forward to the next bucket, but break out. + break mergeLoop + } + i.currCount += i.buckets[i.bucketsIdx] + idxInSpan++ + i.bucketsIdx++ + i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan + if i.schema == i.targetSchema { + // Don't need to test the next bucket for mergeability + // if we have no schema change anyway. break mergeLoop } - span = i.spans[spansIdx] - origIdx += span.Offset - } - currIdx := i.targetIdx(origIdx) - switch { - case firstPass: - i.currIdx = currIdx - firstPass = false - case currIdx != i.currIdx: - // Reached next bucket in targetSchema. - // Do not actually forward to the next bucket, but break out. - break mergeLoop - } - i.currCount += i.buckets[i.bucketsIdx] - idxInSpan++ - i.bucketsIdx++ - i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan - if i.schema == i.targetSchema { - // Don't need to test the next bucket for mergeability - // if we have no schema change anyway. - break mergeLoop } } + // Skip buckets before absoluteStartValue. // TODO(beorn7): Maybe do something more efficient than this recursive call. if !i.boundReachedStartValue && getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue { @@ -882,17 +925,6 @@ mergeLoop: // Merge together all buckets from the original schema that fall into return true } -// targetIdx returns the bucket index within i.targetSchema for the given bucket -// index within i.schema. -func (i *floatBucketIterator) targetIdx(idx int32) int32 { - if i.schema == i.targetSchema { - // Fast path for the common case. The below would yield the same - // result, just with more effort. - return idx - } - return ((idx - 1) >> (i.schema - i.targetSchema)) + 1 -} - type reverseFloatBucketIterator struct { baseBucketIterator[float64, float64] idxInSpan int32 // Changed from uint32 to allow negative values for exhaustion detection. @@ -1000,8 +1032,8 @@ func addBuckets( spansB []Span, bucketsB []float64, ) ([]Span, []float64) { var ( - iSpan int = -1 - iBucket int = -1 + iSpan = -1 + iBucket = -1 iInSpan int32 indexA int32 indexB int32 @@ -1034,17 +1066,16 @@ func addBuckets( spansA[0].Length++ spansA[0].Offset-- goto nextLoop - } else { - spansA = append(spansA, Span{}) - copy(spansA[1:], spansA) - spansA[0] = Span{Offset: indexB, Length: 1} - if len(spansA) > 1 { - // Convert the absolute offset in the formerly - // first span to a relative offset. - spansA[1].Offset -= indexB + 1 - } - goto nextLoop } + spansA = append(spansA, Span{}) + copy(spansA[1:], spansA) + spansA[0] = Span{Offset: indexB, Length: 1} + if len(spansA) > 1 { + // Convert the absolute offset in the formerly + // first span to a relative offset. + spansA[1].Offset -= indexB + 1 + } + goto nextLoop } else if spansA[0].Offset == indexB { // Just add to first bucket. bucketsA[0] += bucketB @@ -1062,48 +1093,47 @@ func addBuckets( iInSpan += deltaIndex bucketsA[iBucket] += bucketB break - } else { - deltaIndex -= remainingInSpan - iBucket += int(remainingInSpan) - iSpan++ - if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset { - // Bucket is in gap behind previous span (or there are no further spans). - bucketsA = append(bucketsA, 0) - copy(bucketsA[iBucket+1:], bucketsA[iBucket:]) - bucketsA[iBucket] = bucketB - switch { - case deltaIndex == 0: - // Directly after previous span, extend previous span. - if iSpan < len(spansA) { - spansA[iSpan].Offset-- - } - iSpan-- - iInSpan = int32(spansA[iSpan].Length) - spansA[iSpan].Length++ - goto nextLoop - case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1: - // Directly before next span, extend next span. - iInSpan = 0 + } + deltaIndex -= remainingInSpan + iBucket += int(remainingInSpan) + iSpan++ + if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset { + // Bucket is in gap behind previous span (or there are no further spans). + bucketsA = append(bucketsA, 0) + copy(bucketsA[iBucket+1:], bucketsA[iBucket:]) + bucketsA[iBucket] = bucketB + switch { + case deltaIndex == 0: + // Directly after previous span, extend previous span. + if iSpan < len(spansA) { spansA[iSpan].Offset-- - spansA[iSpan].Length++ - goto nextLoop - default: - // No next span, or next span is not directly adjacent to new bucket. - // Add new span. - iInSpan = 0 - if iSpan < len(spansA) { - spansA[iSpan].Offset -= deltaIndex + 1 - } - spansA = append(spansA, Span{}) - copy(spansA[iSpan+1:], spansA[iSpan:]) - spansA[iSpan] = Span{Length: 1, Offset: deltaIndex} - goto nextLoop } - } else { - // Try start of next span. - deltaIndex -= spansA[iSpan].Offset + iSpan-- + iInSpan = int32(spansA[iSpan].Length) + spansA[iSpan].Length++ + goto nextLoop + case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1: + // Directly before next span, extend next span. iInSpan = 0 + spansA[iSpan].Offset-- + spansA[iSpan].Length++ + goto nextLoop + default: + // No next span, or next span is not directly adjacent to new bucket. + // Add new span. + iInSpan = 0 + if iSpan < len(spansA) { + spansA[iSpan].Offset -= deltaIndex + 1 + } + spansA = append(spansA, Span{}) + copy(spansA[iSpan+1:], spansA[iSpan:]) + spansA[iSpan] = Span{Length: 1, Offset: deltaIndex} + goto nextLoop } + } else { + // Try start of next span. + deltaIndex -= spansA[iSpan].Offset + iInSpan = 0 } } @@ -1136,8 +1166,9 @@ func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram { panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) } - h.PositiveSpans, h.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false) - h.NegativeSpans, h.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false) + h.PositiveSpans, h.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false, true) + h.NegativeSpans, h.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false, true) + h.Schema = targetSchema return h } diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index cbcd2a1f08..3d20960f6c 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -1242,7 +1242,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 1.234, PositiveSpans: []Span{{0, 2}, {3, 3}}, PositiveBuckets: []float64{5, 4, 2, 3, 6}, - NegativeSpans: []Span{{-9, 2}, {3, 2}}, + NegativeSpans: []Span{{-6, 2}, {1, 2}}, NegativeBuckets: []float64{1, 1, 4, 4}, }, &FloatHistogram{ @@ -1262,7 +1262,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 3.579, PositiveSpans: []Span{{-2, 2}, {0, 5}, {0, 3}}, PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, - NegativeSpans: []Span{{-9, 2}, {3, 2}, {5, 2}, {3, 2}}, + NegativeSpans: []Span{{-6, 2}, {1, 2}, {4, 2}, {3, 2}}, NegativeBuckets: []float64{1, 1, 4, 4, 3, 1, 5, 6}, }, }, @@ -1573,16 +1573,33 @@ func TestFloatHistogramAdd(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - in2Copy := c.in2.Copy() - require.Equal(t, c.expected, c.in1.Add(c.in2)) - // Has it also happened in-place? - require.Equal(t, c.expected, c.in1) - // Check that the argument was not mutated. - require.Equal(t, in2Copy, c.in2) + testHistogramAdd(t, c.in1, c.in2, c.expected) + testHistogramAdd(t, c.in2, c.in1, c.expected) }) } } +func testHistogramAdd(t *testing.T, a, b, expected *FloatHistogram) { + var ( + aCopy = a.Copy() + bCopy = b.Copy() + expectedCopy = expected.Copy() + ) + + res := aCopy.Add(bCopy) + + res.Compact(0) + expectedCopy.Compact(0) + + require.Equal(t, expectedCopy, res) + + // Has it also happened in-place? + require.Equal(t, expectedCopy, aCopy) + + // Check that the argument was not mutated. + require.Equal(t, b, bCopy) +} + func TestFloatHistogramSub(t *testing.T) { // This has fewer test cases than TestFloatHistogramAdd because Add and // Sub share most of the trickier code. @@ -1662,16 +1679,35 @@ func TestFloatHistogramSub(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - in2Copy := c.in2.Copy() - require.Equal(t, c.expected, c.in1.Sub(c.in2)) - // Has it also happened in-place? - require.Equal(t, c.expected, c.in1) - // Check that the argument was not mutated. - require.Equal(t, in2Copy, c.in2) + testFloatHistogramSub(t, c.in1, c.in2, c.expected) + + expectedNegative := c.expected.Copy().Mul(-1) + testFloatHistogramSub(t, c.in2, c.in1, expectedNegative) }) } } +func testFloatHistogramSub(t *testing.T, a, b, expected *FloatHistogram) { + var ( + aCopy = a.Copy() + bCopy = b.Copy() + expectedCopy = expected.Copy() + ) + + res := aCopy.Sub(bCopy) + + res.Compact(0) + expectedCopy.Compact(0) + + require.Equal(t, expectedCopy, res) + + // Has it also happened in-place? + require.Equal(t, expectedCopy, aCopy) + + // Check that the argument was not mutated. + require.Equal(t, b, bCopy) +} + func TestFloatHistogramCopyToSchema(t *testing.T) { cases := []struct { name string @@ -1734,7 +1770,10 @@ func TestFloatHistogramCopyToSchema(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { + inCopy := c.in.Copy() require.Equal(t, c.expected, c.in.CopyToSchema(c.targetSchema)) + // Check that the receiver histogram was not mutated: + require.Equal(t, inCopy, c.in) }) } } @@ -1779,8 +1818,8 @@ func TestReverseFloatBucketIterator(t *testing.T) { for it.Next() { actBuckets = append(actBuckets, it.At()) } - require.Greater(t, len(expBuckets), 0) - require.Greater(t, len(actBuckets), 0) + require.NotEmpty(t, expBuckets) + require.NotEmpty(t, actBuckets) require.Equal(t, expBuckets, actBuckets) // Negative buckets. @@ -1795,8 +1834,8 @@ func TestReverseFloatBucketIterator(t *testing.T) { for it.Next() { actBuckets = append(actBuckets, it.At()) } - require.Greater(t, len(expBuckets), 0) - require.Greater(t, len(actBuckets), 0) + require.NotEmpty(t, expBuckets) + require.NotEmpty(t, actBuckets) require.Equal(t, expBuckets, actBuckets) } @@ -2489,5 +2528,7 @@ func TestFloatHistogramReduceResolution(t *testing.T) { for _, tc := range tcs { target := tc.origin.ReduceResolution(tc.target.Schema) require.Equal(t, tc.target, target) + // Check that receiver histogram was mutated: + require.Equal(t, tc.target, tc.origin) } } diff --git a/model/histogram/generic.go b/model/histogram/generic.go index d42bb24151..7e1cc4b605 100644 --- a/model/histogram/generic.go +++ b/model/histogram/generic.go @@ -605,7 +605,16 @@ var exponentialBounds = [][]float64{ // The target schema must be smaller than the original schema. // Set deltaBuckets to true if the provided buckets are // deltas. Set it to false if the buckets contain absolute counts. -func reduceResolution[IBC InternalBucketCount](originSpans []Span, originBuckets []IBC, originSchema, targetSchema int32, deltaBuckets bool) ([]Span, []IBC) { +// Set inplace to true to reuse input slices and avoid allocations (otherwise +// new slices will be allocated for result). +func reduceResolution[IBC InternalBucketCount]( + originSpans []Span, + originBuckets []IBC, + originSchema, + targetSchema int32, + deltaBuckets bool, + inplace bool, +) ([]Span, []IBC) { var ( targetSpans []Span // The spans in the target schema. targetBuckets []IBC // The bucket counts in the target schema. @@ -617,6 +626,13 @@ func reduceResolution[IBC InternalBucketCount](originSpans []Span, originBuckets lastTargetBucketCount IBC ) + if inplace { + // Slice reuse is safe because when reducing the resolution, + // target slices don't grow faster than origin slices are being read. + targetSpans = originSpans[:0] + targetBuckets = originBuckets[:0] + } + for _, span := range originSpans { // Determine the index of the first bucket in this span. bucketIdx += span.Offset diff --git a/model/histogram/generic_test.go b/model/histogram/generic_test.go index d24910d214..b4d6585a8b 100644 --- a/model/histogram/generic_test.go +++ b/model/histogram/generic_test.go @@ -15,6 +15,7 @@ package histogram import ( "math" + "slices" "testing" "github.com/stretchr/testify/require" @@ -140,9 +141,22 @@ func TestReduceResolutionHistogram(t *testing.T) { } for _, tc := range cases { - spans, buckets := reduceResolution(tc.spans, tc.buckets, tc.schema, tc.targetSchema, true) + spansCopy, bucketsCopy := slices.Clone(tc.spans), slices.Clone(tc.buckets) + spans, buckets := reduceResolution(tc.spans, tc.buckets, tc.schema, tc.targetSchema, true, false) require.Equal(t, tc.expectedSpans, spans) require.Equal(t, tc.expectedBuckets, buckets) + // Verify inputs were not mutated: + require.Equal(t, spansCopy, tc.spans) + require.Equal(t, bucketsCopy, tc.buckets) + + // Output slices reuse input slices: + const inplace = true + spans, buckets = reduceResolution(tc.spans, tc.buckets, tc.schema, tc.targetSchema, true, inplace) + require.Equal(t, tc.expectedSpans, spans) + require.Equal(t, tc.expectedBuckets, buckets) + // Verify inputs were mutated which is now expected: + require.Equal(t, spans, tc.spans[:len(spans)]) + require.Equal(t, buckets, tc.buckets[:len(buckets)]) } } @@ -175,8 +189,21 @@ func TestReduceResolutionFloatHistogram(t *testing.T) { } for _, tc := range cases { - spans, buckets := reduceResolution(tc.spans, tc.buckets, tc.schema, tc.targetSchema, false) + spansCopy, bucketsCopy := slices.Clone(tc.spans), slices.Clone(tc.buckets) + spans, buckets := reduceResolution(tc.spans, tc.buckets, tc.schema, tc.targetSchema, false, false) require.Equal(t, tc.expectedSpans, spans) require.Equal(t, tc.expectedBuckets, buckets) + // Verify inputs were not mutated: + require.Equal(t, spansCopy, tc.spans) + require.Equal(t, bucketsCopy, tc.buckets) + + // Output slices reuse input slices: + const inplace = true + spans, buckets = reduceResolution(tc.spans, tc.buckets, tc.schema, tc.targetSchema, false, inplace) + require.Equal(t, tc.expectedSpans, spans) + require.Equal(t, tc.expectedBuckets, buckets) + // Verify inputs were mutated which is now expected: + require.Equal(t, spans, tc.spans[:len(spans)]) + require.Equal(t, buckets, tc.buckets[:len(buckets)]) } } diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index 4a12498f22..f4d292b344 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -282,50 +282,49 @@ func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram { return h } -// ToFloat returns a FloatHistogram representation of the Histogram. It is a -// deep copy (e.g. spans are not shared). -func (h *Histogram) ToFloat() *FloatHistogram { - var ( - positiveSpans, negativeSpans []Span - positiveBuckets, negativeBuckets []float64 - ) - if len(h.PositiveSpans) != 0 { - positiveSpans = make([]Span, len(h.PositiveSpans)) - copy(positiveSpans, h.PositiveSpans) +// ToFloat returns a FloatHistogram representation of the Histogram. It is a deep +// copy (e.g. spans are not shared). The function accepts a FloatHistogram as an +// argument whose memory will be reused and overwritten if provided. If this +// argument is nil, a new FloatHistogram will be allocated. +func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram { + if fh == nil { + fh = &FloatHistogram{} } - if len(h.NegativeSpans) != 0 { - negativeSpans = make([]Span, len(h.NegativeSpans)) - copy(negativeSpans, h.NegativeSpans) - } - if len(h.PositiveBuckets) != 0 { - positiveBuckets = make([]float64, len(h.PositiveBuckets)) - var current float64 - for i, b := range h.PositiveBuckets { - current += float64(b) - positiveBuckets[i] = current - } - } - if len(h.NegativeBuckets) != 0 { - negativeBuckets = make([]float64, len(h.NegativeBuckets)) - var current float64 - for i, b := range h.NegativeBuckets { - current += float64(b) - negativeBuckets[i] = current - } + fh.CounterResetHint = h.CounterResetHint + fh.Schema = h.Schema + fh.ZeroThreshold = h.ZeroThreshold + fh.ZeroCount = float64(h.ZeroCount) + fh.Count = float64(h.Count) + fh.Sum = h.Sum + + fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans)) + copy(fh.PositiveSpans, h.PositiveSpans) + + fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans)) + copy(fh.NegativeSpans, h.NegativeSpans) + + fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets)) + var currentPositive float64 + for i, b := range h.PositiveBuckets { + currentPositive += float64(b) + fh.PositiveBuckets[i] = currentPositive } - return &FloatHistogram{ - CounterResetHint: h.CounterResetHint, - Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: float64(h.ZeroCount), - Count: float64(h.Count), - Sum: h.Sum, - PositiveSpans: positiveSpans, - NegativeSpans: negativeSpans, - PositiveBuckets: positiveBuckets, - NegativeBuckets: negativeBuckets, + fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets)) + var currentNegative float64 + for i, b := range h.NegativeBuckets { + currentNegative += float64(b) + fh.NegativeBuckets[i] = currentNegative } + + return fh +} + +func resize[T any](items []T, n int) []T { + if cap(items) < n { + return make([]T, n) + } + return items[:n] } // Validate validates consistency between span and bucket slices. Also, buckets are checked @@ -502,10 +501,10 @@ func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram { } h.PositiveSpans, h.PositiveBuckets = reduceResolution( - h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, true, + h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, true, true, ) h.NegativeSpans, h.NegativeBuckets = reduceResolution( - h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, true, + h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, true, true, ) h.Schema = targetSchema return h diff --git a/model/histogram/histogram_test.go b/model/histogram/histogram_test.go index 5aa9ca6feb..9a64faaaae 100644 --- a/model/histogram/histogram_test.go +++ b/model/histogram/histogram_test.go @@ -408,9 +408,57 @@ func TestHistogramToFloat(t *testing.T) { }, NegativeBuckets: []int64{1, 2, -2, 1, -1, 0}, } - fh := h.ToFloat() + cases := []struct { + name string + fh *FloatHistogram + }{ + {name: "without prior float histogram"}, + {name: "prior float histogram with more buckets", fh: &FloatHistogram{ + Schema: 2, + Count: 3, + Sum: 5, + ZeroThreshold: 4, + ZeroCount: 1, + PositiveSpans: []Span{ + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + NegativeSpans: []Span{ + {Offset: 20, Length: 6}, + {Offset: 12, Length: 7}, + {Offset: 33, Length: 10}, + }, + NegativeBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + }}, + {name: "prior float histogram with fewer buckets", fh: &FloatHistogram{ + Schema: 2, + Count: 3, + Sum: 5, + ZeroThreshold: 4, + ZeroCount: 1, + PositiveSpans: []Span{ + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2}, + NegativeSpans: []Span{ + {Offset: 20, Length: 6}, + {Offset: 12, Length: 7}, + {Offset: 33, Length: 10}, + }, + NegativeBuckets: []float64{1, 2}, + }}, + } - require.Equal(t, h.String(), fh.String()) + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + fh := h.ToFloat(c.fh) + require.Equal(t, h.String(), fh.String()) + }) + } } // TestHistogramEquals tests both Histogram and FloatHistogram. @@ -436,14 +484,14 @@ func TestHistogramEquals(t *testing.T) { equals := func(h1, h2 Histogram) { require.True(t, h1.Equals(&h2)) require.True(t, h2.Equals(&h1)) - h1f, h2f := h1.ToFloat(), h2.ToFloat() + h1f, h2f := h1.ToFloat(nil), h2.ToFloat(nil) require.True(t, h1f.Equals(h2f)) require.True(t, h2f.Equals(h1f)) } notEquals := func(h1, h2 Histogram) { require.False(t, h1.Equals(&h2)) require.False(t, h2.Equals(&h1)) - h1f, h2f := h1.ToFloat(), h2.ToFloat() + h1f, h2f := h1.ToFloat(nil), h2.ToFloat(nil) require.False(t, h1f.Equals(h2f)) require.False(t, h2f.Equals(h1f)) } @@ -950,7 +998,7 @@ func TestHistogramValidation(t *testing.T) { return } - fh := tc.h.ToFloat() + fh := tc.h.ToFloat(nil) if err := fh.Validate(); tc.errMsg != "" { require.EqualError(t, err, tc.errMsg) } else { @@ -1008,5 +1056,7 @@ func TestHistogramReduceResolution(t *testing.T) { for _, tc := range tcs { target := tc.origin.ReduceResolution(tc.target.Schema) require.Equal(t, tc.target, target) + // Check that receiver histogram was mutated: + require.Equal(t, tc.target, tc.origin) } } diff --git a/model/metadata/metadata.go b/model/metadata/metadata.go index f227af0b95..f6f2827a46 100644 --- a/model/metadata/metadata.go +++ b/model/metadata/metadata.go @@ -13,11 +13,11 @@ package metadata -import "github.com/prometheus/prometheus/model/textparse" +import "github.com/prometheus/common/model" // Metadata stores a series' metadata information. type Metadata struct { - Type textparse.MetricType + Type model.MetricType Unit string Help string } diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index fadf35b867..d29c3d07ae 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -108,6 +108,10 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.Regex.Regexp == nil { c.Regex = MustNewRegexp("") } + return c.Validate() +} + +func (c *Config) Validate() error { if c.Action == "" { return fmt.Errorf("relabel action cannot be empty") } @@ -117,7 +121,13 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) } - if (c.Action == Replace || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !relabelTarget.MatchString(c.TargetLabel) { + if c.Action == Replace && !strings.Contains(c.TargetLabel, "$") && !model.LabelName(c.TargetLabel).IsValid() { + return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) + } + if c.Action == Replace && strings.Contains(c.TargetLabel, "$") && !relabelTarget.MatchString(c.TargetLabel) { + return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) + } + if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !model.LabelName(c.TargetLabel).IsValid() { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement { @@ -264,12 +274,11 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { } target := model.LabelName(cfg.Regex.ExpandString([]byte{}, cfg.TargetLabel, val, indexes)) if !target.IsValid() { - lb.Del(cfg.TargetLabel) break } res := cfg.Regex.ExpandString([]byte{}, cfg.Replacement, val, indexes) if len(res) == 0 { - lb.Del(cfg.TargetLabel) + lb.Del(string(target)) break } lb.Set(string(target), string(res)) diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index b50ff4010a..517b9b8223 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -14,6 +14,7 @@ package relabel import ( + "fmt" "testing" "github.com/prometheus/common/model" @@ -213,6 +214,25 @@ func TestRelabel(t *testing.T) { "a": "boo", }), }, + { + // Blank replacement should delete the label. + input: labels.FromMap(map[string]string{ + "a": "foo", + "f": "baz", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("(f).*"), + TargetLabel: "$1", + Replacement: "$2", + Action: Replace, + }, + }, + output: labels.FromMap(map[string]string{ + "a": "foo", + }), + }, { input: labels.FromMap(map[string]string{ "a": "foo", @@ -334,7 +354,7 @@ func TestRelabel(t *testing.T) { }, { // invalid target_labels input: labels.FromMap(map[string]string{ - "a": "some-name-value", + "a": "some-name-0", }), relabel: []*Config{ { @@ -349,18 +369,18 @@ func TestRelabel(t *testing.T) { Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), Action: Replace, Replacement: "${1}", - TargetLabel: "0${3}", + TargetLabel: "${3}", }, { SourceLabels: model.LabelNames{"a"}, - Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), + Regex: MustNewRegexp("some-([^-]+)(-[^,]+)"), Action: Replace, Replacement: "${1}", - TargetLabel: "-${3}", + TargetLabel: "${3}", }, }, output: labels.FromMap(map[string]string{ - "a": "some-name-value", + "a": "some-name-0", }), }, { // more complex real-life like usecase @@ -565,6 +585,7 @@ func TestRelabel(t *testing.T) { if cfg.Replacement == "" { cfg.Replacement = DefaultRelabelConfig.Replacement } + require.NoError(t, cfg.Validate()) } res, keep := Process(test.input, test.relabel...) @@ -575,6 +596,77 @@ func TestRelabel(t *testing.T) { } } +func TestRelabelValidate(t *testing.T) { + tests := []struct { + config Config + expected string + }{ + { + config: Config{}, + expected: `relabel action cannot be empty`, + }, + { + config: Config{ + Action: Replace, + }, + expected: `requires 'target_label' value`, + }, + { + config: Config{ + Action: Lowercase, + }, + expected: `requires 'target_label' value`, + }, + { + config: Config{ + Action: Lowercase, + Replacement: DefaultRelabelConfig.Replacement, + TargetLabel: "${3}", + }, + expected: `"${3}" is invalid 'target_label'`, + }, + { + config: Config{ + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: Replace, + Replacement: "${1}", + TargetLabel: "${3}", + }, + }, + { + config: Config{ + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: Replace, + Replacement: "${1}", + TargetLabel: "0${3}", + }, + expected: `"0${3}" is invalid 'target_label'`, + }, + { + config: Config{ + SourceLabels: model.LabelNames{"a"}, + Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: Replace, + Replacement: "${1}", + TargetLabel: "-${3}", + }, + expected: `"-${3}" is invalid 'target_label' for replace action`, + }, + } + for i, test := range tests { + t.Run(fmt.Sprint(i), func(t *testing.T) { + err := test.config.Validate() + if test.expected == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, test.expected) + } + }) + } +} + func TestTargetLabelValidity(t *testing.T) { tests := []struct { str string diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go index d6499538e2..ef5008f4bf 100644 --- a/model/rulefmt/rulefmt_test.go +++ b/model/rulefmt/rulefmt_test.go @@ -193,10 +193,10 @@ groups: _, errs := Parse([]byte(group)) require.Len(t, errs, 2, "Expected two errors") var err00 *Error - require.True(t, errors.As(errs[0], &err00)) + require.ErrorAs(t, errs[0], &err00) err0 := err00.Err.node var err01 *Error - require.True(t, errors.As(errs[1], &err01)) + require.ErrorAs(t, errs[1], &err01) err1 := err01.Err.node require.NotEqual(t, err0, err1, "Error nodes should not be the same") } diff --git a/model/textparse/interface.go b/model/textparse/interface.go index 2f5fdbc3bf..3a363ebfbc 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -16,7 +16,7 @@ package textparse import ( "mime" - "github.com/gogo/protobuf/types" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -44,7 +44,7 @@ type Parser interface { // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. - Type() ([]byte, MetricType) + Type() ([]byte, model.MetricType) // Unit returns the metric name and unit in the current entry. // Must only be called after Next returned a unit entry. @@ -66,10 +66,10 @@ type Parser interface { // retrieved (including the case where no exemplars exist at all). Exemplar(l *exemplar.Exemplar) bool - // CreatedTimestamp writes the created timestamp of the current sample - // into the passed timestamp. It returns false if no created timestamp - // exists or if the metric type does not support created timestamps. - CreatedTimestamp(ct *types.Timestamp) bool + // CreatedTimestamp returns the created timestamp (in milliseconds) for the + // current sample. It returns nil if it is unknown e.g. if it wasn't set, + // if the scrape protocol or metric type does not support created timestamps. + CreatedTimestamp() *int64 // Next advances the parser to the next sample. It returns false if no // more samples were read or an error occurred. @@ -111,17 +111,3 @@ const ( EntryUnit Entry = 4 EntryHistogram Entry = 5 // A series with a native histogram as a value. ) - -// MetricType represents metric type values. -type MetricType string - -const ( - MetricTypeCounter = MetricType("counter") - MetricTypeGauge = MetricType("gauge") - MetricTypeHistogram = MetricType("histogram") - MetricTypeGaugeHistogram = MetricType("gaugehistogram") - MetricTypeSummary = MetricType("summary") - MetricTypeInfo = MetricType("info") - MetricTypeStateset = MetricType("stateset") - MetricTypeUnknown = MetricType("unknown") -) diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index bb50755441..ddfbe4fc5c 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -24,7 +24,7 @@ import ( "strings" "unicode/utf8" - "github.com/gogo/protobuf/types" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -76,7 +76,7 @@ type OpenMetricsParser struct { builder labels.ScratchBuilder series []byte text []byte - mtype MetricType + mtype model.MetricType val float64 ts int64 hasTS bool @@ -128,7 +128,7 @@ func (p *OpenMetricsParser) Help() ([]byte, []byte) { // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. -func (p *OpenMetricsParser) Type() ([]byte, MetricType) { +func (p *OpenMetricsParser) Type() ([]byte, model.MetricType) { return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype } @@ -213,9 +213,10 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { return true } -// CreatedTimestamp returns false because OpenMetricsParser does not support created timestamps (yet). -func (p *OpenMetricsParser) CreatedTimestamp(_ *types.Timestamp) bool { - return false +// CreatedTimestamp returns nil as it's not implemented yet. +// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 +func (p *OpenMetricsParser) CreatedTimestamp() *int64 { + return nil } // nextToken returns the next token from the openMetricsLexer. @@ -273,21 +274,21 @@ func (p *OpenMetricsParser) Next() (Entry, error) { case tType: switch s := yoloString(p.text); s { case "counter": - p.mtype = MetricTypeCounter + p.mtype = model.MetricTypeCounter case "gauge": - p.mtype = MetricTypeGauge + p.mtype = model.MetricTypeGauge case "histogram": - p.mtype = MetricTypeHistogram + p.mtype = model.MetricTypeHistogram case "gaugehistogram": - p.mtype = MetricTypeGaugeHistogram + p.mtype = model.MetricTypeGaugeHistogram case "summary": - p.mtype = MetricTypeSummary + p.mtype = model.MetricTypeSummary case "info": - p.mtype = MetricTypeInfo + p.mtype = model.MetricTypeInfo case "stateset": - p.mtype = MetricTypeStateset + p.mtype = model.MetricTypeStateset case "unknown": - p.mtype = MetricTypeUnknown + p.mtype = model.MetricTypeUnknown default: return EntryInvalid, fmt.Errorf("invalid metric type %q", s) } diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index d65e4977ef..2b1d909f38 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -18,6 +18,7 @@ import ( "io" "testing" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/exemplar" @@ -77,7 +78,7 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` m string t *int64 v float64 - typ MetricType + typ model.MetricType help string unit string comment string @@ -88,7 +89,7 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` help: "A summary of the GC invocation durations.", }, { m: "go_gc_duration_seconds", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { m: "go_gc_duration_seconds", unit: "seconds", @@ -130,7 +131,7 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` help: "Number of goroutines that currently exist.", }, { m: "go_goroutines", - typ: MetricTypeGauge, + typ: model.MetricTypeGauge, }, { m: `go_goroutines`, v: 33, @@ -138,21 +139,21 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` lset: labels.FromStrings("__name__", "go_goroutines"), }, { m: "hh", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: `hh_bucket{le="+Inf"}`, v: 1, lset: labels.FromStrings("__name__", "hh_bucket", "le", "+Inf"), }, { m: "gh", - typ: MetricTypeGaugeHistogram, + typ: model.MetricTypeGaugeHistogram, }, { m: `gh_bucket{le="+Inf"}`, v: 1, lset: labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"), }, { m: "hhh", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: `hhh_bucket{le="+Inf"}`, v: 1, @@ -165,7 +166,7 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4}, }, { m: "ggh", - typ: MetricTypeGaugeHistogram, + typ: model.MetricTypeGaugeHistogram, }, { m: `ggh_bucket{le="+Inf"}`, v: 1, @@ -178,7 +179,7 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, }, { m: "smr_seconds", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { m: `smr_seconds_count`, v: 2, @@ -191,14 +192,14 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321}, }, { m: "ii", - typ: MetricTypeInfo, + typ: model.MetricTypeInfo, }, { m: `ii{foo="bar"}`, v: 1, lset: labels.FromStrings("__name__", "ii", "foo", "bar"), }, { m: "ss", - typ: MetricTypeStateset, + typ: model.MetricTypeStateset, }, { m: `ss{ss="foo"}`, v: 1, @@ -213,7 +214,7 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` lset: labels.FromStrings("A", "a", "__name__", "ss"), }, { m: "un", - typ: MetricTypeUnknown, + typ: model.MetricTypeUnknown, }, { m: "_metric_starting_with_underscore", v: 1, @@ -228,7 +229,7 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), }, { m: "foo", - typ: MetricTypeCounter, + typ: model.MetricTypeCounter, }, { m: "foo_total", v: 17, @@ -269,9 +270,9 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` require.Equal(t, exp[i].v, v) require.Equal(t, exp[i].lset, res) if exp[i].e == nil { - require.Equal(t, false, found) + require.False(t, found) } else { - require.Equal(t, true, found) + require.True(t, found) require.Equal(t, *exp[i].e, e) } @@ -296,7 +297,7 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` i++ } - require.Equal(t, len(exp), i) + require.Len(t, exp, i) } func TestOpenMetricsParseErrors(t *testing.T) { diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index b3fa2d8a6d..7123e52c33 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -26,7 +26,7 @@ import ( "unicode/utf8" "unsafe" - "github.com/gogo/protobuf/types" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -148,7 +148,7 @@ type PromParser struct { builder labels.ScratchBuilder series []byte text []byte - mtype MetricType + mtype model.MetricType val float64 ts int64 hasTS bool @@ -192,7 +192,7 @@ func (p *PromParser) Help() ([]byte, []byte) { // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. -func (p *PromParser) Type() ([]byte, MetricType) { +func (p *PromParser) Type() ([]byte, model.MetricType) { return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype } @@ -247,9 +247,10 @@ func (p *PromParser) Exemplar(*exemplar.Exemplar) bool { return false } -// CreatedTimestamp returns false because PromParser does not support created timestamps. -func (p *PromParser) CreatedTimestamp(_ *types.Timestamp) bool { - return false +// CreatedTimestamp returns nil as it's not implemented yet. +// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 +func (p *PromParser) CreatedTimestamp() *int64 { + return nil } // nextToken returns the next token from the promlexer. It skips over tabs @@ -306,15 +307,15 @@ func (p *PromParser) Next() (Entry, error) { case tType: switch s := yoloString(p.text); s { case "counter": - p.mtype = MetricTypeCounter + p.mtype = model.MetricTypeCounter case "gauge": - p.mtype = MetricTypeGauge + p.mtype = model.MetricTypeGauge case "histogram": - p.mtype = MetricTypeHistogram + p.mtype = model.MetricTypeHistogram case "summary": - p.mtype = MetricTypeSummary + p.mtype = model.MetricTypeSummary case "untyped": - p.mtype = MetricTypeUnknown + p.mtype = model.MetricTypeUnknown default: return EntryInvalid, fmt.Errorf("invalid metric type %q", s) } diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 280f39b4f1..d34b26ba5c 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -65,7 +65,7 @@ testmetric{label="\"bar\""} 1` m string t *int64 v float64 - typ MetricType + typ model.MetricType help string comment string }{ @@ -74,7 +74,7 @@ testmetric{label="\"bar\""} 1` help: "A summary of the GC invocation durations.", }, { m: "go_gc_duration_seconds", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { m: `go_gc_duration_seconds{quantile="0"}`, v: 4.9351e-05, @@ -142,7 +142,7 @@ testmetric{label="\"bar\""} 1` help: "Number of goroutines that currently exist.", }, { m: "go_goroutines", - typ: MetricTypeGauge, + typ: model.MetricTypeGauge, }, { m: `go_goroutines`, v: 33, @@ -209,7 +209,7 @@ testmetric{label="\"bar\""} 1` i++ } - require.Equal(t, len(exp), i) + require.Len(t, exp, i) } func TestPromParseErrors(t *testing.T) { diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index 23afb5c596..534bbebb20 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -252,21 +252,21 @@ func (p *ProtobufParser) Help() ([]byte, []byte) { // Type returns the metric name and type in the current entry. // Must only be called after Next returned a type entry. // The returned byte slices become invalid after the next call to Next. -func (p *ProtobufParser) Type() ([]byte, MetricType) { +func (p *ProtobufParser) Type() ([]byte, model.MetricType) { n := p.metricBytes.Bytes() switch p.mf.GetType() { case dto.MetricType_COUNTER: - return n, MetricTypeCounter + return n, model.MetricTypeCounter case dto.MetricType_GAUGE: - return n, MetricTypeGauge + return n, model.MetricTypeGauge case dto.MetricType_HISTOGRAM: - return n, MetricTypeHistogram + return n, model.MetricTypeHistogram case dto.MetricType_GAUGE_HISTOGRAM: - return n, MetricTypeGaugeHistogram + return n, model.MetricTypeGaugeHistogram case dto.MetricType_SUMMARY: - return n, MetricTypeSummary + return n, model.MetricTypeSummary } - return n, MetricTypeUnknown + return n, model.MetricTypeUnknown } // Unit always returns (nil, nil) because units aren't supported by the protobuf @@ -360,22 +360,26 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { return true } -func (p *ProtobufParser) CreatedTimestamp(ct *types.Timestamp) bool { - var foundCT *types.Timestamp +// CreatedTimestamp returns CT or nil if CT is not present or +// invalid (as timestamp e.g. negative value) on counters, summaries or histograms. +func (p *ProtobufParser) CreatedTimestamp() *int64 { + var ct *types.Timestamp switch p.mf.GetType() { case dto.MetricType_COUNTER: - foundCT = p.mf.GetMetric()[p.metricPos].GetCounter().GetCreatedTimestamp() + ct = p.mf.GetMetric()[p.metricPos].GetCounter().GetCreatedTimestamp() case dto.MetricType_SUMMARY: - foundCT = p.mf.GetMetric()[p.metricPos].GetSummary().GetCreatedTimestamp() + ct = p.mf.GetMetric()[p.metricPos].GetSummary().GetCreatedTimestamp() case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: - foundCT = p.mf.GetMetric()[p.metricPos].GetHistogram().GetCreatedTimestamp() + ct = p.mf.GetMetric()[p.metricPos].GetHistogram().GetCreatedTimestamp() default: } - if foundCT == nil { - return false + ctAsTime, err := types.TimestampFromProto(ct) + if err != nil { + // Errors means ct == nil or invalid timestamp, which we silently ignore. + return nil } - *ct = *foundCT - return true + ctMilis := ctAsTime.UnixMilli() + return &ctMilis } // Next advances the parser to the next "sample" (emulating the behavior of a diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index d83f2088a1..7dcc85f545 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -21,7 +21,7 @@ import ( "testing" "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/exemplar" @@ -623,14 +623,14 @@ func TestProtobufParse(t *testing.T) { m string t int64 v float64 - typ MetricType + typ model.MetricType help string unit string comment string shs *histogram.Histogram fhs *histogram.FloatHistogram e []exemplar.Exemplar - ct *types.Timestamp + ct int64 } inputBuf := createTestProtoBuf(t) @@ -650,7 +650,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "go_build_info", - typ: MetricTypeGauge, + typ: model.MetricTypeGauge, }, { m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", @@ -668,7 +668,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "go_memstats_alloc_bytes_total", - typ: MetricTypeCounter, + typ: model.MetricTypeCounter, }, { m: "go_memstats_alloc_bytes_total", @@ -686,7 +686,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "something_untyped", - typ: MetricTypeUnknown, + typ: model.MetricTypeUnknown, }, { m: "something_untyped", @@ -702,7 +702,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: "test_histogram", @@ -737,7 +737,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gauge_histogram", - typ: MetricTypeGaugeHistogram, + typ: model.MetricTypeGaugeHistogram, }, { m: "test_gauge_histogram", @@ -773,7 +773,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_float_histogram", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: "test_float_histogram", @@ -808,7 +808,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gauge_float_histogram", - typ: MetricTypeGaugeHistogram, + typ: model.MetricTypeGaugeHistogram, }, { m: "test_gauge_float_histogram", @@ -844,7 +844,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram2", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: "test_histogram2_count", @@ -904,7 +904,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram_family", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: "test_histogram_family\xfffoo\xffbar", @@ -948,7 +948,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_float_histogram_with_zerothreshold_zero", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: "test_float_histogram_with_zerothreshold_zero", @@ -972,7 +972,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "rpc_durations_seconds", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { m: "rpc_durations_seconds_count\xffservice\xffexponential", @@ -1023,7 +1023,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "without_quantiles", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { m: "without_quantiles_count", @@ -1045,7 +1045,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "empty_histogram", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: "empty_histogram", @@ -1064,12 +1064,12 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_counter_with_createdtimestamp", - typ: MetricTypeCounter, + typ: model.MetricTypeCounter, }, { m: "test_counter_with_createdtimestamp", v: 42, - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, lset: labels.FromStrings( "__name__", "test_counter_with_createdtimestamp", ), @@ -1080,12 +1080,12 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_summary_with_createdtimestamp", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { m: "test_summary_with_createdtimestamp_count", v: 42, - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_count", ), @@ -1093,7 +1093,7 @@ func TestProtobufParse(t *testing.T) { { m: "test_summary_with_createdtimestamp_sum", v: 1.234, - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_sum", ), @@ -1104,11 +1104,11 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram_with_createdtimestamp", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { m: "test_histogram_with_createdtimestamp", - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, PositiveSpans: []histogram.Span{}, @@ -1124,11 +1124,11 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gaugehistogram_with_createdtimestamp", - typ: MetricTypeGaugeHistogram, + typ: model.MetricTypeGaugeHistogram, }, { m: "test_gaugehistogram_with_createdtimestamp", - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, PositiveSpans: []histogram.Span{}, @@ -1150,7 +1150,7 @@ func TestProtobufParse(t *testing.T) { }, { // 1 m: "go_build_info", - typ: MetricTypeGauge, + typ: model.MetricTypeGauge, }, { // 2 m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", @@ -1168,7 +1168,7 @@ func TestProtobufParse(t *testing.T) { }, { // 4 m: "go_memstats_alloc_bytes_total", - typ: MetricTypeCounter, + typ: model.MetricTypeCounter, }, { // 5 m: "go_memstats_alloc_bytes_total", @@ -1186,7 +1186,7 @@ func TestProtobufParse(t *testing.T) { }, { // 7 m: "something_untyped", - typ: MetricTypeUnknown, + typ: model.MetricTypeUnknown, }, { // 8 m: "something_untyped", @@ -1202,7 +1202,7 @@ func TestProtobufParse(t *testing.T) { }, { // 10 m: "test_histogram", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { // 11 m: "test_histogram", @@ -1295,7 +1295,7 @@ func TestProtobufParse(t *testing.T) { }, { // 19 m: "test_gauge_histogram", - typ: MetricTypeGaugeHistogram, + typ: model.MetricTypeGaugeHistogram, }, { // 20 m: "test_gauge_histogram", @@ -1389,7 +1389,7 @@ func TestProtobufParse(t *testing.T) { }, { // 28 m: "test_float_histogram", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { // 29 m: "test_float_histogram", @@ -1482,7 +1482,7 @@ func TestProtobufParse(t *testing.T) { }, { // 37 m: "test_gauge_float_histogram", - typ: MetricTypeGaugeHistogram, + typ: model.MetricTypeGaugeHistogram, }, { // 38 m: "test_gauge_float_histogram", @@ -1576,7 +1576,7 @@ func TestProtobufParse(t *testing.T) { }, { // 46 m: "test_histogram2", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { // 47 m: "test_histogram2_count", @@ -1636,7 +1636,7 @@ func TestProtobufParse(t *testing.T) { }, { // 54 m: "test_histogram_family", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { // 55 m: "test_histogram_family\xfffoo\xffbar", @@ -1766,7 +1766,7 @@ func TestProtobufParse(t *testing.T) { }, { // 68 m: "test_float_histogram_with_zerothreshold_zero", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { // 69 m: "test_float_histogram_with_zerothreshold_zero", @@ -1790,7 +1790,7 @@ func TestProtobufParse(t *testing.T) { }, { // 71 m: "rpc_durations_seconds", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { // 72 m: "rpc_durations_seconds_count\xffservice\xffexponential", @@ -1841,7 +1841,7 @@ func TestProtobufParse(t *testing.T) { }, { // 78 m: "without_quantiles", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { // 79 m: "without_quantiles_count", @@ -1863,7 +1863,7 @@ func TestProtobufParse(t *testing.T) { }, { // 79 m: "empty_histogram", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { // 80 m: "empty_histogram", @@ -1882,12 +1882,12 @@ func TestProtobufParse(t *testing.T) { }, { // 82 m: "test_counter_with_createdtimestamp", - typ: MetricTypeCounter, + typ: model.MetricTypeCounter, }, { // 83 m: "test_counter_with_createdtimestamp", v: 42, - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, lset: labels.FromStrings( "__name__", "test_counter_with_createdtimestamp", ), @@ -1898,12 +1898,12 @@ func TestProtobufParse(t *testing.T) { }, { // 85 m: "test_summary_with_createdtimestamp", - typ: MetricTypeSummary, + typ: model.MetricTypeSummary, }, { // 86 m: "test_summary_with_createdtimestamp_count", v: 42, - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_count", ), @@ -1911,7 +1911,7 @@ func TestProtobufParse(t *testing.T) { { // 87 m: "test_summary_with_createdtimestamp_sum", v: 1.234, - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_sum", ), @@ -1922,11 +1922,11 @@ func TestProtobufParse(t *testing.T) { }, { // 89 m: "test_histogram_with_createdtimestamp", - typ: MetricTypeHistogram, + typ: model.MetricTypeHistogram, }, { // 90 m: "test_histogram_with_createdtimestamp", - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, PositiveSpans: []histogram.Span{}, @@ -1942,11 +1942,11 @@ func TestProtobufParse(t *testing.T) { }, { // 92 m: "test_gaugehistogram_with_createdtimestamp", - typ: MetricTypeGaugeHistogram, + typ: model.MetricTypeGaugeHistogram, }, { // 93 m: "test_gaugehistogram_with_createdtimestamp", - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, PositiveSpans: []histogram.Span{}, @@ -1981,30 +1981,29 @@ func TestProtobufParse(t *testing.T) { m, ts, v := p.Series() var e exemplar.Exemplar - var ct types.Timestamp p.Metric(&res) eFound := p.Exemplar(&e) - ctFound := p.CreatedTimestamp(&ct) + ct := p.CreatedTimestamp() require.Equal(t, exp[i].m, string(m), "i: %d", i) if ts != nil { require.Equal(t, exp[i].t, *ts, "i: %d", i) } else { - require.Equal(t, exp[i].t, int64(0), "i: %d", i) + require.Equal(t, int64(0), exp[i].t, "i: %d", i) } require.Equal(t, exp[i].v, v, "i: %d", i) require.Equal(t, exp[i].lset, res, "i: %d", i) if len(exp[i].e) == 0 { - require.Equal(t, false, eFound, "i: %d", i) + require.False(t, eFound, "i: %d", i) } else { - require.Equal(t, true, eFound, "i: %d", i) + require.True(t, eFound, "i: %d", i) require.Equal(t, exp[i].e[0], e, "i: %d", i) require.False(t, p.Exemplar(&e), "too many exemplars returned, i: %d", i) } - if exp[i].ct != nil { - require.Equal(t, true, ctFound, "i: %d", i) - require.Equal(t, exp[i].ct.String(), ct.String(), "i: %d", i) + if exp[i].ct != 0 { + require.NotNilf(t, ct, "i: %d", i) + require.Equal(t, exp[i].ct, *ct, "i: %d", i) } else { - require.Equal(t, false, ctFound, "i: %d", i) + require.Nilf(t, ct, "i: %d", i) } case EntryHistogram: @@ -2014,7 +2013,7 @@ func TestProtobufParse(t *testing.T) { if ts != nil { require.Equal(t, exp[i].t, *ts, "i: %d", i) } else { - require.Equal(t, exp[i].t, int64(0), "i: %d", i) + require.Equal(t, int64(0), exp[i].t, "i: %d", i) } require.Equal(t, exp[i].lset, res, "i: %d", i) require.Equal(t, exp[i].m, string(m), "i: %d", i) @@ -2028,7 +2027,7 @@ func TestProtobufParse(t *testing.T) { require.Equal(t, exp[i].e[j], e, "i: %d", i) e = exemplar.Exemplar{} } - require.Equal(t, len(exp[i].e), j, "not enough exemplars found, i: %d", i) + require.Len(t, exp[i].e, j, "not enough exemplars found, i: %d", i) case EntryType: m, typ := p.Type() @@ -2051,7 +2050,7 @@ func TestProtobufParse(t *testing.T) { i++ } - require.Equal(t, len(exp), i) + require.Len(t, exp, i) }) } } diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index 66ee45c6e8..837ec66bd0 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -83,7 +83,7 @@ func TestHandlerNextBatch(t *testing.T) { require.NoError(t, alertsEqual(expected[0:maxBatchSize], h.nextBatch())) require.NoError(t, alertsEqual(expected[maxBatchSize:2*maxBatchSize], h.nextBatch())) require.NoError(t, alertsEqual(expected[2*maxBatchSize:], h.nextBatch())) - require.Equal(t, 0, len(h.queue), "Expected queue to be empty but got %d alerts", len(h.queue)) + require.Empty(t, h.queue, "Expected queue to be empty but got %d alerts", len(h.queue)) } func alertsEqual(a, b []*Alert) error { @@ -482,7 +482,7 @@ alerting: ` err := yaml.UnmarshalStrict([]byte(s), cfg) require.NoError(t, err, "Unable to load YAML config.") - require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs)) + require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 1) err = n.ApplyConfig(cfg) require.NoError(t, err, "Error applying the config.") @@ -533,7 +533,7 @@ alerting: ` err := yaml.UnmarshalStrict([]byte(s), cfg) require.NoError(t, err, "Unable to load YAML config.") - require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs)) + require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 1) err = n.ApplyConfig(cfg) require.NoError(t, err, "Error applying the config.") diff --git a/prompb/io/prometheus/client/metrics.pb.go b/prompb/io/prometheus/client/metrics.pb.go index 5538395117..702ee62fce 100644 --- a/prompb/io/prometheus/client/metrics.pb.go +++ b/prompb/io/prometheus/client/metrics.pb.go @@ -411,7 +411,7 @@ type Histogram struct { SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"` SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` - // Buckets for the conventional histogram. + // Buckets for the classic histogram. Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"` CreatedTimestamp *types.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. @@ -878,6 +878,7 @@ type MetricFamily struct { Help string `protobuf:"bytes,2,opt,name=help,proto3" json:"help,omitempty"` Type MetricType `protobuf:"varint,3,opt,name=type,proto3,enum=io.prometheus.client.MetricType" json:"type,omitempty"` Metric []Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric"` + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -944,6 +945,13 @@ func (m *MetricFamily) GetMetric() []Metric { return nil } +func (m *MetricFamily) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + func init() { proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") @@ -965,67 +973,68 @@ func init() { } var fileDescriptor_d1e5ddb18987a258 = []byte{ - // 960 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x6e, 0x1b, 0x45, - 0x14, 0xee, 0xd6, 0xbf, 0x7b, 0x1c, 0x27, 0x9b, 0xc1, 0xaa, 0x56, 0x81, 0xc4, 0x66, 0x25, 0xa4, - 0x80, 0x90, 0x2d, 0xa0, 0x08, 0x54, 0x8a, 0x44, 0xd2, 0xa6, 0x2e, 0x2a, 0x6e, 0xcb, 0xd8, 0xbe, - 0x28, 0x37, 0xab, 0xb1, 0x3d, 0x59, 0xaf, 0xd8, 0xdd, 0x59, 0xf6, 0xa7, 0x22, 0xdc, 0xf3, 0x0c, - 0xbc, 0x00, 0x17, 0x3c, 0x05, 0x97, 0xa8, 0x97, 0x5c, 0x71, 0x89, 0x50, 0x9e, 0x04, 0xcd, 0xdf, - 0xae, 0x53, 0xad, 0x03, 0x81, 0xbb, 0x99, 0xcf, 0xdf, 0x39, 0xf3, 0x9d, 0x6f, 0xc6, 0xe7, 0x2c, - 0x38, 0x3e, 0x1b, 0xc5, 0x09, 0x0b, 0x69, 0xb6, 0xa6, 0x79, 0x3a, 0x5a, 0x06, 0x3e, 0x8d, 0xb2, - 0x51, 0x48, 0xb3, 0xc4, 0x5f, 0xa6, 0xc3, 0x38, 0x61, 0x19, 0x43, 0x3d, 0x9f, 0x0d, 0x4b, 0xce, - 0x50, 0x72, 0x0e, 0x7a, 0x1e, 0xf3, 0x98, 0x20, 0x8c, 0xf8, 0x4a, 0x72, 0x0f, 0xfa, 0x1e, 0x63, - 0x5e, 0x40, 0x47, 0x62, 0xb7, 0xc8, 0xcf, 0x47, 0x99, 0x1f, 0xd2, 0x34, 0x23, 0x61, 0x2c, 0x09, - 0xce, 0xc7, 0x60, 0x7e, 0x45, 0x16, 0x34, 0x78, 0x4e, 0xfc, 0x04, 0x21, 0xa8, 0x47, 0x24, 0xa4, - 0xb6, 0x31, 0x30, 0x8e, 0x4d, 0x2c, 0xd6, 0xa8, 0x07, 0x8d, 0x97, 0x24, 0xc8, 0xa9, 0x7d, 0x5b, - 0x80, 0x72, 0xe3, 0x1c, 0x42, 0x63, 0x4c, 0x72, 0x6f, 0xe3, 0x67, 0x1e, 0x63, 0xe8, 0x9f, 0x7f, - 0x36, 0xa0, 0xf5, 0x80, 0xe5, 0x51, 0x46, 0x93, 0x6a, 0x06, 0xba, 0x07, 0x6d, 0xfa, 0x3d, 0x0d, - 0xe3, 0x80, 0x24, 0x22, 0x73, 0xe7, 0xc3, 0xa3, 0x61, 0x55, 0x5d, 0xc3, 0x33, 0xc5, 0xc2, 0x05, - 0x1f, 0x8d, 0x61, 0x7f, 0x99, 0x50, 0x92, 0xd1, 0x95, 0x5b, 0x94, 0x63, 0xd7, 0x44, 0x92, 0x83, - 0xa1, 0x2c, 0x78, 0xa8, 0x0b, 0x1e, 0xce, 0x34, 0x03, 0x5b, 0x2a, 0xa8, 0x40, 0x9c, 0xfb, 0xd0, - 0xfe, 0x3a, 0x27, 0x51, 0xe6, 0x07, 0x14, 0x1d, 0x40, 0xfb, 0x3b, 0xb5, 0x56, 0x4a, 0x8b, 0xfd, - 0x55, 0x0f, 0x8a, 0x22, 0xff, 0x30, 0xa0, 0x35, 0xcd, 0xc3, 0x90, 0x24, 0x17, 0xe8, 0x6d, 0xd8, - 0x49, 0x49, 0x18, 0x07, 0xd4, 0x5d, 0xf2, 0xb2, 0x45, 0x86, 0x3a, 0xee, 0x48, 0x4c, 0x38, 0x81, - 0x0e, 0x01, 0x14, 0x25, 0xcd, 0x43, 0x95, 0xc9, 0x94, 0xc8, 0x34, 0x0f, 0xd1, 0x17, 0x1b, 0xe7, - 0xd7, 0x06, 0xb5, 0xed, 0x86, 0x68, 0xc5, 0xa7, 0xf5, 0x57, 0x7f, 0xf6, 0x6f, 0x6d, 0xa8, 0xac, - 0xb4, 0xa5, 0xfe, 0x1f, 0x6c, 0xe9, 0x43, 0x6b, 0x1e, 0x65, 0x17, 0x31, 0x5d, 0x6d, 0xb9, 0xde, - 0x5f, 0x1b, 0x60, 0x3e, 0xf6, 0xd3, 0x8c, 0x79, 0x09, 0x09, 0xff, 0x4d, 0xed, 0xef, 0x03, 0xda, - 0xa4, 0xb8, 0xe7, 0x01, 0x23, 0x99, 0xd0, 0x66, 0x60, 0x6b, 0x83, 0xf8, 0x88, 0xe3, 0xff, 0xe4, - 0xd4, 0x3d, 0x68, 0x2e, 0xf2, 0xe5, 0xb7, 0x34, 0x53, 0x3e, 0xbd, 0x55, 0xed, 0xd3, 0xa9, 0xe0, - 0x28, 0x97, 0x54, 0x44, 0xb5, 0x47, 0x7b, 0x37, 0xf7, 0x08, 0xdd, 0x81, 0x66, 0xba, 0x5c, 0xd3, - 0x90, 0xd8, 0x8d, 0x81, 0x71, 0xbc, 0x8f, 0xd5, 0x0e, 0xbd, 0x03, 0xbb, 0x3f, 0xd0, 0x84, 0xb9, - 0xd9, 0x3a, 0xa1, 0xe9, 0x9a, 0x05, 0x2b, 0xbb, 0x29, 0xf4, 0x77, 0x39, 0x3a, 0xd3, 0x20, 0x2f, - 0x51, 0xd0, 0xa4, 0x63, 0x2d, 0xe1, 0x98, 0xc9, 0x11, 0xe9, 0xd7, 0x31, 0x58, 0xe5, 0xcf, 0xca, - 0xad, 0xb6, 0xc8, 0xb3, 0x5b, 0x90, 0xa4, 0x57, 0x4f, 0xa0, 0x1b, 0x51, 0x8f, 0x64, 0xfe, 0x4b, - 0xea, 0xa6, 0x31, 0x89, 0x6c, 0x53, 0x78, 0x32, 0xb8, 0xce, 0x93, 0x69, 0x4c, 0x22, 0xe5, 0xcb, - 0x8e, 0x0e, 0xe6, 0x18, 0x17, 0x5f, 0x24, 0x5b, 0xd1, 0x20, 0x23, 0x36, 0x0c, 0x6a, 0xc7, 0x08, - 0x17, 0x47, 0x3c, 0xe4, 0xe0, 0x15, 0x9a, 0x2c, 0xa0, 0x33, 0xa8, 0xf1, 0x1a, 0x35, 0x2a, 0x8b, - 0x78, 0x02, 0xdd, 0x98, 0xa5, 0x7e, 0x29, 0x6d, 0xe7, 0x66, 0xd2, 0x74, 0xb0, 0x96, 0x56, 0x24, - 0x93, 0xd2, 0xba, 0x52, 0x9a, 0x46, 0x0b, 0x69, 0x05, 0x4d, 0x4a, 0xdb, 0x95, 0xd2, 0x34, 0x2a, - 0xa4, 0x39, 0xbf, 0x19, 0xd0, 0x94, 0x07, 0xa2, 0x77, 0xc1, 0x5a, 0xe6, 0x61, 0x1e, 0x6c, 0x96, - 0x23, 0x5f, 0xf0, 0x5e, 0x89, 0xcb, 0x82, 0xee, 0xc2, 0x9d, 0xd7, 0xa9, 0x57, 0x5e, 0x72, 0xef, - 0xb5, 0x00, 0x79, 0x43, 0x7d, 0xe8, 0xe4, 0x71, 0x4c, 0x13, 0x77, 0xc1, 0xf2, 0x68, 0xa5, 0x9e, - 0x33, 0x08, 0xe8, 0x94, 0x23, 0x57, 0x5a, 0x61, 0xed, 0x66, 0xad, 0xd0, 0xb9, 0x0f, 0x50, 0x1a, - 0xc7, 0x1f, 0x25, 0x3b, 0x3f, 0x4f, 0xa9, 0xac, 0x60, 0x1f, 0xab, 0x1d, 0xc7, 0x03, 0x1a, 0x79, - 0xd9, 0x5a, 0x9c, 0xde, 0xc5, 0x6a, 0xe7, 0xfc, 0x64, 0x40, 0x5b, 0x27, 0x45, 0x9f, 0x41, 0x23, - 0xe0, 0x93, 0xc0, 0x36, 0xc4, 0x35, 0xf5, 0xab, 0x35, 0x14, 0xc3, 0x42, 0xdd, 0x92, 0x8c, 0xa9, - 0xee, 0x90, 0xe8, 0x53, 0x30, 0x6f, 0xd2, 0xa0, 0x4b, 0xb2, 0xf3, 0x63, 0x0d, 0x9a, 0x13, 0x31, - 0xf5, 0xfe, 0x9f, 0xae, 0x0f, 0xa0, 0xe1, 0xf1, 0x39, 0xa5, 0x66, 0xcc, 0x9b, 0xd5, 0xc1, 0x62, - 0x94, 0x61, 0xc9, 0x44, 0x9f, 0x40, 0x6b, 0x29, 0x47, 0x97, 0x92, 0x7c, 0x58, 0x1d, 0xa4, 0xe6, - 0x1b, 0xd6, 0x6c, 0x1e, 0x98, 0xca, 0x71, 0xa0, 0xba, 0xee, 0x96, 0x40, 0x35, 0x33, 0xb0, 0x66, - 0xf3, 0xc0, 0x5c, 0xf6, 0x5b, 0xd1, 0x4c, 0xb6, 0x06, 0xaa, 0xa6, 0x8c, 0x35, 0x1b, 0x7d, 0x0e, - 0xe6, 0x5a, 0xb7, 0x61, 0xd1, 0x44, 0xb6, 0xda, 0x53, 0x74, 0x6b, 0x5c, 0x46, 0xf0, 0xc6, 0x5d, - 0x38, 0xee, 0x86, 0xa9, 0xe8, 0x54, 0x35, 0xdc, 0x29, 0xb0, 0x49, 0xea, 0xfc, 0x62, 0xc0, 0x8e, - 0xbc, 0x87, 0x47, 0x24, 0xf4, 0x83, 0x8b, 0xca, 0x4f, 0x04, 0x04, 0xf5, 0x35, 0x0d, 0x62, 0xf5, - 0x85, 0x20, 0xd6, 0xe8, 0x2e, 0xd4, 0xb9, 0x46, 0x61, 0xe1, 0xee, 0xb6, 0xff, 0xbc, 0xcc, 0x3c, - 0xbb, 0x88, 0x29, 0x16, 0x6c, 0xde, 0xda, 0xe5, 0xb7, 0x8e, 0x5d, 0xbf, 0xae, 0xb5, 0xcb, 0x38, - 0xdd, 0xda, 0x65, 0xc4, 0x7b, 0x0b, 0x80, 0x32, 0x1f, 0xea, 0x40, 0xeb, 0xc1, 0xb3, 0xf9, 0xd3, - 0xd9, 0x19, 0xb6, 0x6e, 0x21, 0x13, 0x1a, 0xe3, 0x93, 0xf9, 0xf8, 0xcc, 0x32, 0x38, 0x3e, 0x9d, - 0x4f, 0x26, 0x27, 0xf8, 0x85, 0x75, 0x9b, 0x6f, 0xe6, 0x4f, 0x67, 0x2f, 0x9e, 0x9f, 0x3d, 0xb4, - 0x6a, 0xa8, 0x0b, 0xe6, 0xe3, 0x2f, 0xa7, 0xb3, 0x67, 0x63, 0x7c, 0x32, 0xb1, 0xea, 0xe8, 0x0d, - 0xd8, 0x13, 0x31, 0x6e, 0x09, 0x36, 0x4e, 0x9d, 0x57, 0x97, 0x47, 0xc6, 0xef, 0x97, 0x47, 0xc6, - 0x5f, 0x97, 0x47, 0xc6, 0x37, 0x3d, 0x9f, 0xb9, 0xa5, 0x38, 0x57, 0x8a, 0x5b, 0x34, 0xc5, 0xcb, - 0xfe, 0xe8, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x2e, 0x66, 0xc1, 0xcb, 0x09, 0x00, 0x00, + // 969 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x14, 0xae, 0x9b, 0x5f, 0x9f, 0x6c, 0x76, 0xbd, 0x43, 0x54, 0x59, 0x0b, 0xbb, 0x09, 0x96, 0x90, + 0x16, 0x84, 0x12, 0x01, 0x45, 0xa0, 0x52, 0x24, 0x76, 0xdb, 0x6d, 0x8a, 0x4a, 0xda, 0x32, 0x49, + 0x2e, 0xca, 0x8d, 0x35, 0x49, 0x66, 0x1d, 0x0b, 0xdb, 0x63, 0xec, 0x71, 0xc5, 0x72, 0xcf, 0x33, + 0xf0, 0x02, 0x3c, 0x06, 0xe2, 0x12, 0xf5, 0x92, 0x2b, 0x2e, 0x11, 0xda, 0x27, 0x41, 0xf3, 0x63, + 0x3b, 0x5b, 0x39, 0x0b, 0x0b, 0x77, 0x33, 0x5f, 0xbe, 0x73, 0xe6, 0x3b, 0xdf, 0x4c, 0xce, 0x31, + 0x38, 0x3e, 0x1b, 0xc5, 0x09, 0x0b, 0x29, 0x5f, 0xd3, 0x2c, 0x1d, 0x2d, 0x03, 0x9f, 0x46, 0x7c, + 0x14, 0x52, 0x9e, 0xf8, 0xcb, 0x74, 0x18, 0x27, 0x8c, 0x33, 0xd4, 0xf3, 0xd9, 0xb0, 0xe4, 0x0c, + 0x15, 0xe7, 0xa0, 0xe7, 0x31, 0x8f, 0x49, 0xc2, 0x48, 0xac, 0x14, 0xf7, 0xa0, 0xef, 0x31, 0xe6, + 0x05, 0x74, 0x24, 0x77, 0x8b, 0xec, 0x7c, 0xc4, 0xfd, 0x90, 0xa6, 0x9c, 0x84, 0xb1, 0x22, 0x38, + 0x1f, 0x83, 0xf9, 0x15, 0x59, 0xd0, 0xe0, 0x39, 0xf1, 0x13, 0x84, 0xa0, 0x1e, 0x91, 0x90, 0xda, + 0xc6, 0xc0, 0x38, 0x36, 0xb1, 0x5c, 0xa3, 0x1e, 0x34, 0x5e, 0x92, 0x20, 0xa3, 0xf6, 0x6d, 0x09, + 0xaa, 0x8d, 0x73, 0x08, 0x8d, 0x31, 0xc9, 0xbc, 0x8d, 0x9f, 0x45, 0x8c, 0x91, 0xff, 0xfc, 0xb3, + 0x01, 0xad, 0x07, 0x2c, 0x8b, 0x38, 0x4d, 0xaa, 0x19, 0xe8, 0x1e, 0xb4, 0xe9, 0xf7, 0x34, 0x8c, + 0x03, 0x92, 0xc8, 0xcc, 0x9d, 0x0f, 0x8f, 0x86, 0x55, 0x75, 0x0d, 0xcf, 0x34, 0x0b, 0x17, 0x7c, + 0x34, 0x86, 0xfd, 0x65, 0x42, 0x09, 0xa7, 0x2b, 0xb7, 0x28, 0xc7, 0xae, 0xc9, 0x24, 0x07, 0x43, + 0x55, 0xf0, 0x30, 0x2f, 0x78, 0x38, 0xcb, 0x19, 0xd8, 0xd2, 0x41, 0x05, 0xe2, 0xdc, 0x87, 0xf6, + 0xd7, 0x19, 0x89, 0xb8, 0x1f, 0x50, 0x74, 0x00, 0xed, 0xef, 0xf4, 0x5a, 0x2b, 0x2d, 0xf6, 0x57, + 0x3d, 0x28, 0x8a, 0xfc, 0xc3, 0x80, 0xd6, 0x34, 0x0b, 0x43, 0x92, 0x5c, 0xa0, 0xb7, 0x61, 0x27, + 0x25, 0x61, 0x1c, 0x50, 0x77, 0x29, 0xca, 0x96, 0x19, 0xea, 0xb8, 0xa3, 0x30, 0xe9, 0x04, 0x3a, + 0x04, 0xd0, 0x94, 0x34, 0x0b, 0x75, 0x26, 0x53, 0x21, 0xd3, 0x2c, 0x44, 0x5f, 0x6c, 0x9c, 0x5f, + 0x1b, 0xd4, 0xb6, 0x1b, 0x92, 0x2b, 0x3e, 0xad, 0xbf, 0xfa, 0xb3, 0x7f, 0x6b, 0x43, 0x65, 0xa5, + 0x2d, 0xf5, 0xff, 0x60, 0x4b, 0x1f, 0x5a, 0xf3, 0x88, 0x5f, 0xc4, 0x74, 0xb5, 0xe5, 0x7a, 0x7f, + 0x6d, 0x80, 0xf9, 0xd8, 0x4f, 0x39, 0xf3, 0x12, 0x12, 0xfe, 0x9b, 0xda, 0xdf, 0x07, 0xb4, 0x49, + 0x71, 0xcf, 0x03, 0x46, 0xb8, 0xd4, 0x66, 0x60, 0x6b, 0x83, 0xf8, 0x48, 0xe0, 0xff, 0xe4, 0xd4, + 0x3d, 0x68, 0x2e, 0xb2, 0xe5, 0xb7, 0x94, 0x6b, 0x9f, 0xde, 0xaa, 0xf6, 0xe9, 0x54, 0x72, 0xb4, + 0x4b, 0x3a, 0xa2, 0xda, 0xa3, 0xbd, 0x9b, 0x7b, 0x84, 0xee, 0x40, 0x33, 0x5d, 0xae, 0x69, 0x48, + 0xec, 0xc6, 0xc0, 0x38, 0xde, 0xc7, 0x7a, 0x87, 0xde, 0x81, 0xdd, 0x1f, 0x68, 0xc2, 0x5c, 0xbe, + 0x4e, 0x68, 0xba, 0x66, 0xc1, 0xca, 0x6e, 0x4a, 0xfd, 0x5d, 0x81, 0xce, 0x72, 0x50, 0x94, 0x28, + 0x69, 0xca, 0xb1, 0x96, 0x74, 0xcc, 0x14, 0x88, 0xf2, 0xeb, 0x18, 0xac, 0xf2, 0x67, 0xed, 0x56, + 0x5b, 0xe6, 0xd9, 0x2d, 0x48, 0xca, 0xab, 0x27, 0xd0, 0x8d, 0xa8, 0x47, 0xb8, 0xff, 0x92, 0xba, + 0x69, 0x4c, 0x22, 0xdb, 0x94, 0x9e, 0x0c, 0xae, 0xf3, 0x64, 0x1a, 0x93, 0x48, 0xfb, 0xb2, 0x93, + 0x07, 0x0b, 0x4c, 0x88, 0x2f, 0x92, 0xad, 0x68, 0xc0, 0x89, 0x0d, 0x83, 0xda, 0x31, 0xc2, 0xc5, + 0x11, 0x0f, 0x05, 0x78, 0x85, 0xa6, 0x0a, 0xe8, 0x0c, 0x6a, 0xa2, 0xc6, 0x1c, 0x55, 0x45, 0x3c, + 0x81, 0x6e, 0xcc, 0x52, 0xbf, 0x94, 0xb6, 0x73, 0x33, 0x69, 0x79, 0x70, 0x2e, 0xad, 0x48, 0xa6, + 0xa4, 0x75, 0x95, 0xb4, 0x1c, 0x2d, 0xa4, 0x15, 0x34, 0x25, 0x6d, 0x57, 0x49, 0xcb, 0x51, 0x29, + 0xcd, 0xf9, 0xcd, 0x80, 0xa6, 0x3a, 0x10, 0xbd, 0x0b, 0xd6, 0x32, 0x0b, 0xb3, 0x60, 0xb3, 0x1c, + 0xf5, 0x82, 0xf7, 0x4a, 0x5c, 0x15, 0x74, 0x17, 0xee, 0xbc, 0x4e, 0xbd, 0xf2, 0x92, 0x7b, 0xaf, + 0x05, 0xa8, 0x1b, 0xea, 0x43, 0x27, 0x8b, 0x63, 0x9a, 0xb8, 0x0b, 0x96, 0x45, 0x2b, 0xfd, 0x9c, + 0x41, 0x42, 0xa7, 0x02, 0xb9, 0xd2, 0x0a, 0x6b, 0x37, 0x6b, 0x85, 0xce, 0x7d, 0x80, 0xd2, 0x38, + 0xf1, 0x28, 0xd9, 0xf9, 0x79, 0x4a, 0x55, 0x05, 0xfb, 0x58, 0xef, 0x04, 0x1e, 0xd0, 0xc8, 0xe3, + 0x6b, 0x79, 0x7a, 0x17, 0xeb, 0x9d, 0xf3, 0x93, 0x01, 0xed, 0x3c, 0x29, 0xfa, 0x0c, 0x1a, 0x81, + 0x98, 0x04, 0xb6, 0x21, 0xaf, 0xa9, 0x5f, 0xad, 0xa1, 0x18, 0x16, 0xfa, 0x96, 0x54, 0x4c, 0x75, + 0x87, 0x44, 0x9f, 0x82, 0x79, 0x93, 0x06, 0x5d, 0x92, 0x9d, 0x1f, 0x6b, 0xd0, 0x9c, 0xc8, 0xa9, + 0xf7, 0xff, 0x74, 0x7d, 0x00, 0x0d, 0x4f, 0xcc, 0x29, 0x3d, 0x63, 0xde, 0xac, 0x0e, 0x96, 0xa3, + 0x0c, 0x2b, 0x26, 0xfa, 0x04, 0x5a, 0x4b, 0x35, 0xba, 0xb4, 0xe4, 0xc3, 0xea, 0x20, 0x3d, 0xdf, + 0x70, 0xce, 0x16, 0x81, 0xa9, 0x1a, 0x07, 0xba, 0xeb, 0x6e, 0x09, 0xd4, 0x33, 0x03, 0xe7, 0x6c, + 0x11, 0x98, 0xa9, 0x7e, 0x2b, 0x9b, 0xc9, 0xd6, 0x40, 0xdd, 0x94, 0x71, 0xce, 0x46, 0x9f, 0x83, + 0xb9, 0xce, 0xdb, 0xb0, 0x6c, 0x22, 0x5b, 0xed, 0x29, 0xba, 0x35, 0x2e, 0x23, 0x44, 0xe3, 0x2e, + 0x1c, 0x77, 0xc3, 0x54, 0x76, 0xaa, 0x1a, 0xee, 0x14, 0xd8, 0x24, 0x75, 0x7e, 0x31, 0x60, 0x47, + 0xdd, 0xc3, 0x23, 0x12, 0xfa, 0xc1, 0x45, 0xe5, 0x27, 0x02, 0x82, 0xfa, 0x9a, 0x06, 0xb1, 0xfe, + 0x42, 0x90, 0x6b, 0x74, 0x17, 0xea, 0x42, 0xa3, 0xb4, 0x70, 0x77, 0xdb, 0x7f, 0x5e, 0x65, 0x9e, + 0x5d, 0xc4, 0x14, 0x4b, 0xb6, 0x68, 0xed, 0xea, 0x5b, 0xc7, 0xae, 0x5f, 0xd7, 0xda, 0x55, 0x5c, + 0xde, 0xda, 0x55, 0x84, 0x50, 0x91, 0x45, 0x3e, 0x97, 0x16, 0x9a, 0x58, 0xae, 0xdf, 0x5b, 0x00, + 0x94, 0x67, 0xa0, 0x0e, 0xb4, 0x1e, 0x3c, 0x9b, 0x3f, 0x9d, 0x9d, 0x61, 0xeb, 0x16, 0x32, 0xa1, + 0x31, 0x3e, 0x99, 0x8f, 0xcf, 0x2c, 0x43, 0xe0, 0xd3, 0xf9, 0x64, 0x72, 0x82, 0x5f, 0x58, 0xb7, + 0xc5, 0x66, 0xfe, 0x74, 0xf6, 0xe2, 0xf9, 0xd9, 0x43, 0xab, 0x86, 0xba, 0x60, 0x3e, 0xfe, 0x72, + 0x3a, 0x7b, 0x36, 0xc6, 0x27, 0x13, 0xab, 0x8e, 0xde, 0x80, 0x3d, 0x19, 0xe3, 0x96, 0x60, 0xe3, + 0xd4, 0x79, 0x75, 0x79, 0x64, 0xfc, 0x7e, 0x79, 0x64, 0xfc, 0x75, 0x79, 0x64, 0x7c, 0xd3, 0xf3, + 0x99, 0x5b, 0x0a, 0x76, 0x95, 0xe0, 0x45, 0x53, 0xbe, 0xf6, 0x8f, 0xfe, 0x0e, 0x00, 0x00, 0xff, + 0xff, 0x6d, 0x53, 0xc5, 0x1e, 0xdf, 0x09, 0x00, 0x00, } func (m *LabelPair) Marshal() (dAtA []byte, err error) { @@ -1755,6 +1764,13 @@ func (m *MetricFamily) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Unit) > 0 { + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0x2a + } if len(m.Metric) > 0 { for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { { @@ -2128,6 +2144,10 @@ func (m *MetricFamily) Size() (n int) { n += 1 + l + sovMetrics(uint64(l)) } } + l = len(m.Unit) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -4074,6 +4094,38 @@ func (m *MetricFamily) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) diff --git a/prompb/io/prometheus/client/metrics.proto b/prompb/io/prometheus/client/metrics.proto index be4d2dbae1..06dcc34afb 100644 --- a/prompb/io/prometheus/client/metrics.proto +++ b/prompb/io/prometheus/client/metrics.proto @@ -76,7 +76,7 @@ message Histogram { uint64 sample_count = 1; double sample_count_float = 4; // Overrides sample_count if > 0. double sample_sum = 2; - // Buckets for the conventional histogram. + // Buckets for the classic histogram. repeated Bucket bucket = 3 [(gogoproto.nullable) = false]; // Ordered in increasing order of upper_bound, +Inf bucket is optional. google.protobuf.Timestamp created_timestamp = 15; @@ -153,4 +153,5 @@ message MetricFamily { string help = 2; MetricType type = 3; repeated Metric metric = 4 [(gogoproto.nullable) = false]; + string unit = 5; } diff --git a/prompb/types.pb.go b/prompb/types.pb.go index 125f868e97..93883daa13 100644 --- a/prompb/types.pb.go +++ b/prompb/types.pb.go @@ -164,7 +164,7 @@ func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { type MetricMetadata struct { // Represents the metric type, these match the set from Prometheus. - // Refer to model/textparse/interface.go for details. + // Refer to github.com/prometheus/common/model/metadata.go for details. Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.MetricMetadata_MetricType" json:"type,omitempty"` MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"` Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"` diff --git a/prompb/types.proto b/prompb/types.proto index aa322515c3..61fc1e0143 100644 --- a/prompb/types.proto +++ b/prompb/types.proto @@ -31,7 +31,7 @@ message MetricMetadata { } // Represents the metric type, these match the set from Prometheus. - // Refer to model/textparse/interface.go for details. + // Refer to github.com/prometheus/common/model/metadata.go for details. MetricType type = 1; string metric_family_name = 2; string help = 4; diff --git a/promql/bench_test.go b/promql/bench_test.go index 13eba3714e..b7a4978de2 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -296,8 +296,12 @@ func BenchmarkNativeHistograms(b *testing.B) { query: "sum(native_histogram_series)", }, { - name: "sum rate", - query: "sum(rate(native_histogram_series[1m]))", + name: "sum rate with short rate interval", + query: "sum(rate(native_histogram_series[2m]))", + }, + { + name: "sum rate with long rate interval", + query: "sum(rate(native_histogram_series[20m]))", }, } diff --git a/promql/engine.go b/promql/engine.go index 5b6b79d4da..cca43a51e8 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1075,7 +1075,7 @@ type EvalNodeHelper struct { // Caches. // DropMetricName and label_*. Dmn map[uint64]labels.Labels - // funcHistogramQuantile for conventional histograms. + // funcHistogramQuantile for classic histograms. signatureToMetricWithBuckets map[string]*metricWithBuckets // label_replace. regex *regexp.Regexp @@ -2053,7 +2053,12 @@ func (ev *evaluator) matrixIterSlice( var drop int for drop = 0; histograms[drop].T < mint; drop++ { } + // Rotate the buffer around the drop index so that points before mint can be + // reused to store new histograms. + tail := make([]HPoint, drop) + copy(tail, histograms[:drop]) copy(histograms, histograms[drop:]) + copy(histograms[len(histograms)-drop:], tail) histograms = histograms[:len(histograms)-drop] ev.currentSamples -= totalHPointSize(histograms) // Only append points with timestamps after the last timestamp we have. @@ -2079,21 +2084,27 @@ loop: case chunkenc.ValNone: break loop case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: - t, h := buf.AtFloatHistogram() - if value.IsStaleNaN(h.Sum) { - continue loop - } + t := buf.AtT() // Values in the buffer are guaranteed to be smaller than maxt. if t >= mintHistograms { - if ev.currentSamples >= ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } - point := HPoint{T: t, H: h} if histograms == nil { histograms = getHPointSlice(16) } - histograms = append(histograms, point) - ev.currentSamples += point.size() + n := len(histograms) + if n < cap(histograms) { + histograms = histograms[:n+1] + } else { + histograms = append(histograms, HPoint{}) + } + histograms[n].T, histograms[n].H = buf.AtFloatHistogram(histograms[n].H) + if value.IsStaleNaN(histograms[n].H.Sum) { + histograms = histograms[:n] + continue loop + } + if ev.currentSamples >= ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + ev.currentSamples += histograms[n].size() } case chunkenc.ValFloat: t, f := buf.At() @@ -2116,17 +2127,22 @@ loop: // The sought sample might also be in the range. switch soughtValueType { case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: - t, h := it.AtFloatHistogram() - if t == maxt && !value.IsStaleNaN(h.Sum) { - if ev.currentSamples >= ev.maxSamples { - ev.error(ErrTooManySamples(env)) + t := it.AtT() + if t == maxt { + _, h := it.AtFloatHistogram() + if !value.IsStaleNaN(h.Sum) { + if ev.currentSamples >= ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + if histograms == nil { + histograms = getHPointSlice(16) + } + // The last sample comes directly from the iterator, so we need to copy it to + // avoid having the same reference twice in the buffer. + point := HPoint{T: t, H: h.Copy()} + histograms = append(histograms, point) + ev.currentSamples += point.size() } - if histograms == nil { - histograms = getHPointSlice(16) - } - point := HPoint{T: t, H: h} - histograms = append(histograms, point) - ev.currentSamples += point.size() } case chunkenc.ValFloat: t, f := it.At() diff --git a/promql/engine_test.go b/promql/engine_test.go index 60199671d6..09f102bed2 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -141,7 +141,7 @@ func TestQueryTimeout(t *testing.T) { require.Error(t, res.Err, "expected timeout error but got none") var e ErrQueryTimeout - require.True(t, errors.As(res.Err, &e), "expected timeout error but got: %s", res.Err) + require.ErrorAs(t, res.Err, &e, "expected timeout error but got: %s", res.Err) } const errQueryCanceled = ErrQueryCanceled("test statement execution") @@ -240,14 +240,14 @@ func TestQueryError(t *testing.T) { res := vectorQuery.Exec(ctx) require.Error(t, res.Err, "expected error on failed select but got none") - require.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match") + require.ErrorIs(t, res.Err, errStorage, "expected error doesn't match") matrixQuery, err := engine.NewInstantQuery(ctx, queryable, nil, "foo[1m]", time.Unix(1, 0)) require.NoError(t, err) res = matrixQuery.Exec(ctx) require.Error(t, res.Err, "expected error on failed select but got none") - require.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match") + require.ErrorIs(t, res.Err, errStorage, "expected error doesn't match") } type noopHintRecordingQueryable struct { @@ -636,7 +636,7 @@ func TestEngineShutdown(t *testing.T) { require.Error(t, res2.Err, "expected error on querying with canceled context but got none") var e ErrQueryCanceled - require.True(t, errors.As(res2.Err, &e), "expected cancellation error but got: %s", res2.Err) + require.ErrorAs(t, res2.Err, &e, "expected cancellation error but got: %s", res2.Err) } func TestEngineEvalStmtTimestamps(t *testing.T) { @@ -2058,7 +2058,7 @@ func TestQueryLogger_basic(t *testing.T) { l := len(f1.logs) queryExec() - require.Equal(t, 2*l, len(f1.logs)) + require.Len(t, f1.logs, 2*l) // Test that we close the query logger when unsetting it. require.False(t, f1.closed, "expected f1 to be open, got closed") @@ -3004,8 +3004,8 @@ func TestEngineOptsValidation(t *testing.T) { require.Equal(t, c.expError, err1) require.Equal(t, c.expError, err2) } else { - require.Nil(t, err1) - require.Nil(t, err2) + require.NoError(t, err1) + require.NoError(t, err2) } } } @@ -3190,28 +3190,75 @@ func TestNativeHistogramRate(t *testing.T) { } require.NoError(t, app.Commit()) - queryString := fmt.Sprintf("rate(%s[1m])", seriesName) - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) - require.NoError(t, err) - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - vector, err := res.Vector() - require.NoError(t, err) - require.Len(t, vector, 1) - actualHistogram := vector[0].H - expectedHistogram := &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 1, - ZeroThreshold: 0.001, - ZeroCount: 1. / 15., - Count: 9. / 15., - Sum: 1.226666666666667, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - } - require.Equal(t, expectedHistogram, actualHistogram) + queryString := fmt.Sprintf("rate(%s[45s])", seriesName) + t.Run("instant_query", func(t *testing.T) { + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) + require.NoError(t, err) + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + vector, err := res.Vector() + require.NoError(t, err) + require.Len(t, vector, 1) + actualHistogram := vector[0].H + expectedHistogram := &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + ZeroThreshold: 0.001, + ZeroCount: 1. / 15., + Count: 9. / 15., + Sum: 1.2266666666666663, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + } + require.Equal(t, expectedHistogram, actualHistogram) + }) + + t.Run("range_query", func(t *testing.T) { + step := 30 * time.Second + start := timestamp.Time(int64(5 * time.Minute / time.Millisecond)) + end := start.Add(step) + qry, err := engine.NewRangeQuery(context.Background(), storage, nil, queryString, start, end, step) + require.NoError(t, err) + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + matrix, err := res.Matrix() + require.NoError(t, err) + require.Len(t, matrix, 1) + require.Len(t, matrix[0].Histograms, 2) + actualHistograms := matrix[0].Histograms + expectedHistograms := []HPoint{{ + T: 300000, + H: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + ZeroThreshold: 0.001, + ZeroCount: 1. / 15., + Count: 9. / 15., + Sum: 1.2266666666666663, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + }, + }, { + T: 330000, + H: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + ZeroThreshold: 0.001, + ZeroCount: 1. / 15., + Count: 9. / 15., + Sum: 1.2266666666666663, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + }, + }} + require.Equal(t, expectedHistograms, actualHistograms) + }) } func TestNativeFloatHistogramRate(t *testing.T) { @@ -3288,7 +3335,7 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { app := storage.Appender(context.Background()) var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, h.ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, h, nil) } @@ -3308,7 +3355,7 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { require.Len(t, vector, 1) require.Nil(t, vector[0].H) if floatHisto { - require.Equal(t, h.ToFloat().Count, vector[0].F) + require.Equal(t, h.ToFloat(nil).Count, vector[0].F) } else { require.Equal(t, float64(h.Count), vector[0].F) } @@ -3326,7 +3373,7 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { require.Len(t, vector, 1) require.Nil(t, vector[0].H) if floatHisto { - require.Equal(t, h.ToFloat().Sum, vector[0].F) + require.Equal(t, h.ToFloat(nil).Sum, vector[0].F) } else { require.Equal(t, h.Sum, vector[0].F) } @@ -3454,7 +3501,7 @@ func TestNativeHistogram_HistogramStdDevVar(t *testing.T) { app := storage.Appender(context.Background()) var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, tc.h.ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, tc.h.ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, tc.h, nil) } @@ -3699,7 +3746,7 @@ func TestNativeHistogram_HistogramQuantile(t *testing.T) { app := storage.Appender(context.Background()) var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) } @@ -4130,7 +4177,7 @@ func TestNativeHistogram_HistogramFraction(t *testing.T) { app := storage.Appender(context.Background()) var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) } @@ -4293,7 +4340,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { // Since we mutate h later, we need to create a copy here. var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) } @@ -4303,7 +4350,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond) // Since we mutate h later, we need to create a copy here. if floatHisto { - _, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat()) + _, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil) } @@ -4551,7 +4598,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { // Since we mutate h later, we need to create a copy here. var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) } @@ -4708,7 +4755,7 @@ func TestNativeHistogram_MulDivOperator(t *testing.T) { // Since we mutate h later, we need to create a copy here. var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) } diff --git a/promql/functions.go b/promql/functions.go index 07e439cf25..407a11b50a 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -23,6 +23,7 @@ import ( "github.com/grafana/regexp" "github.com/prometheus/common/model" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -367,6 +368,68 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel return Vector(byValueSorter), nil } +// === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === +func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + // In case the labels are the same, NaN should sort to the bottom, so take + // ascending sort with NaN first and reverse it. + var anno annotations.Annotations + vals[0], anno = funcSort(vals, args, enh) + labels := stringSliceFromArgs(args[1:]) + slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { + // Iterate over each given label + for _, label := range labels { + lv1 := a.Metric.Get(label) + lv2 := b.Metric.Get(label) + // 0 if a == b, -1 if a < b, and +1 if a > b. + switch strings.Compare(lv1, lv2) { + case -1: + return -1 + case +1: + return +1 + default: + continue + } + } + + return 0 + }) + + return vals[0].(Vector), anno +} + +// === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === +func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + // In case the labels are the same, NaN should sort to the bottom, so take + // ascending sort with NaN first and reverse it. + var anno annotations.Annotations + vals[0], anno = funcSortDesc(vals, args, enh) + labels := stringSliceFromArgs(args[1:]) + slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { + // Iterate over each given label + for _, label := range labels { + lv1 := a.Metric.Get(label) + lv2 := b.Metric.Get(label) + // If label values are the same, continue to the next label + if lv1 == lv2 { + continue + } + // 0 if a == b, -1 if a < b, and +1 if a > b. + switch strings.Compare(lv1, lv2) { + case -1: + return +1 + case +1: + return -1 + default: + continue + } + } + + return 0 + }) + + return vals[0].(Vector), anno +} + // === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) @@ -546,6 +609,25 @@ func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod }), nil } +// === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + if len(vals[0].(Matrix)[0].Floats) == 0 { + return enh.Out, nil + } + return aggrOverTime(vals, enh, func(s Series) float64 { + values := make(vectorByValueHeap, 0, len(s.Floats)) + for _, f := range s.Floats { + values = append(values, Sample{F: f.F}) + } + median := quantile(0.5, values) + values = make(vectorByValueHeap, 0, len(s.Floats)) + for _, f := range s.Floats { + values = append(values, Sample{F: math.Abs(f.F - median)}) + } + return quantile(0.5, values) + }), nil +} + // === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { @@ -1113,7 +1195,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev var histogramSamples []Sample for _, sample := range inVec { - // We are only looking for conventional buckets here. Remember + // We are only looking for classic buckets here. Remember // the histograms for later treatment. if sample.H != nil { histogramSamples = append(histogramSamples, sample) @@ -1144,10 +1226,10 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev // Now deal with the histograms. for _, sample := range histogramSamples { // We have to reconstruct the exact same signature as above for - // a conventional histogram, just ignoring any le label. + // a classic histogram, just ignoring any le label. enh.lblBuf = sample.Metric.Bytes(enh.lblBuf) if mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 { - // At this data point, we have conventional histogram + // At this data point, we have classic histogram // buckets and a native histogram with the same name and // labels. Do not evaluate anything. annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), args[1].PositionRange())) @@ -1475,6 +1557,7 @@ var FunctionCalls = map[string]FunctionCall{ "log10": funcLog10, "log2": funcLog2, "last_over_time": funcLastOverTime, + "mad_over_time": funcMadOverTime, "max_over_time": funcMaxOverTime, "min_over_time": funcMinOverTime, "minute": funcMinute, @@ -1493,6 +1576,8 @@ var FunctionCalls = map[string]FunctionCall{ "sinh": funcSinh, "sort": funcSort, "sort_desc": funcSortDesc, + "sort_by_label": funcSortByLabel, + "sort_by_label_desc": funcSortByLabelDesc, "sqrt": funcSqrt, "stddev_over_time": funcStddevOverTime, "stdvar_over_time": funcStdvarOverTime, @@ -1637,3 +1722,11 @@ func stringFromArg(e parser.Expr) string { unwrapParenExpr(&tmp) // Optionally unwrap ParenExpr return tmp.(*parser.StringLiteral).Val } + +func stringSliceFromArgs(args parser.Expressions) []string { + tmp := make([]string, len(args)) + for i := 0; i < len(args); i++ { + tmp[i] = stringFromArg(args[i]) + } + return tmp +} diff --git a/promql/functions_test.go b/promql/functions_test.go index faf6859e77..6d5c3784ea 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -64,7 +64,7 @@ func TestDeriv(t *testing.T) { require.NoError(t, result.Err) vec, _ := result.Vector() - require.Equal(t, 1, len(vec), "Expected 1 result, got %d", len(vec)) + require.Len(t, vec, 1, "Expected 1 result, got %d", len(vec)) require.Equal(t, 0.0, vec[0].F, "Expected 0.0 as value, got %f", vec[0].F) } diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 8d9d92aa14..46d50d5476 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -254,6 +254,12 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "mad_over_time": { + Name: "mad_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Experimental: true, + }, "max_over_time": { Name: "max_over_time", ArgTypes: []ValueType{ValueTypeMatrix}, @@ -347,6 +353,20 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "sort_by_label": { + Name: "sort_by_label", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeString}, + Variadic: -1, + ReturnType: ValueTypeVector, + Experimental: true, + }, + "sort_by_label_desc": { + Name: "sort_by_label_desc", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeString}, + Variadic: -1, + ReturnType: ValueTypeVector, + Experimental: true, + }, "sqrt": { Name: "sqrt", ArgTypes: []ValueType{ValueTypeVector}, diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 4f602433dd..641bedcfcc 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -566,9 +566,8 @@ Loop: if l.peek() == ':' { l.emit(desc) return lexHistogram - } else { - l.errorf("missing `:` for histogram descriptor") } + l.errorf("missing `:` for histogram descriptor") } else { l.errorf("bad histogram descriptor found: %q", word) } diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 09d84fe6bc..122286c551 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -72,7 +72,7 @@ func WithFunctions(functions map[string]*Function) Opt { } // NewParser returns a new parser. -func NewParser(input string, opts ...Opt) *parser { +func NewParser(input string, opts ...Opt) *parser { //nolint:revive // unexported-return. p := parserPool.Get().(*parser) p.functions = Functions diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index 3f90cc3c19..eac09996d5 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -3604,9 +3604,9 @@ func TestParseExpressions(t *testing.T) { require.True(t, ok, "unexpected error type") for _, e := range errorList { - require.True(t, 0 <= e.PositionRange.Start, "parse error has negative position\nExpression '%s'\nError: %v", test.input, e) - require.True(t, e.PositionRange.Start <= e.PositionRange.End, "parse error has negative length\nExpression '%s'\nError: %v", test.input, e) - require.True(t, e.PositionRange.End <= posrange.Pos(len(test.input)), "parse error is not contained in input\nExpression '%s'\nError: %v", test.input, e) + require.LessOrEqual(t, 0, e.PositionRange.Start, "parse error has negative position\nExpression '%s'\nError: %v", test.input, e) + require.LessOrEqual(t, e.PositionRange.Start, e.PositionRange.End, "parse error has negative length\nExpression '%s'\nError: %v", test.input, e) + require.LessOrEqual(t, e.PositionRange.End, posrange.Pos(len(test.input)), "parse error is not contained in input\nExpression '%s'\nError: %v", test.input, e) } } }) diff --git a/promql/query_logger_test.go b/promql/query_logger_test.go index ce55fecbbb..4135753fd7 100644 --- a/promql/query_logger_test.go +++ b/promql/query_logger_test.go @@ -123,7 +123,7 @@ func TestMMapFile(t *testing.T) { bytes := make([]byte, 4) n, err := f.Read(bytes) - require.Equal(t, n, 2) + require.Equal(t, 2, n) require.NoError(t, err, "Unexpected error while reading file.") require.Equal(t, fileAsBytes, bytes[:2], "Mmap failed") diff --git a/promql/test.go b/promql/test.go index 7274a8f0d9..589b1e5b6b 100644 --- a/promql/test.go +++ b/promql/test.go @@ -73,6 +73,9 @@ func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage { // RunBuiltinTests runs an acceptance test suite against the provided engine. func RunBuiltinTests(t *testing.T, engine engineQuerier) { + t.Cleanup(func() { parser.EnableExperimentalFunctions = false }) + parser.EnableExperimentalFunctions = true + files, err := fs.Glob(testsFs, "*/*.test") require.NoError(t, err) diff --git a/promql/testdata/functions.test b/promql/testdata/functions.test index 02e6a32190..b4547886a7 100644 --- a/promql/testdata/functions.test +++ b/promql/testdata/functions.test @@ -469,6 +469,116 @@ eval_ordered instant at 50m sort_desc(http_requests) http_requests{group="production", instance="0", job="api-server"} 100 http_requests{group="canary", instance="2", job="api-server"} NaN +# Tests for sort_by_label/sort_by_label_desc. +clear +load 5m + http_requests{job="api-server", instance="0", group="production"} 0+10x10 + http_requests{job="api-server", instance="1", group="production"} 0+20x10 + http_requests{job="api-server", instance="0", group="canary"} 0+30x10 + http_requests{job="api-server", instance="1", group="canary"} 0+40x10 + http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="app-server", instance="0", group="production"} 0+50x10 + http_requests{job="app-server", instance="1", group="production"} 0+60x10 + http_requests{job="app-server", instance="0", group="canary"} 0+70x10 + http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + http_requests{job="api-server", instance="2", group="production"} 0+10x10 + +eval_ordered instant at 50m sort_by_label(http_requests, "instance") + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="production", instance="1", job="app-server"} 600 + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="production", instance="2", job="api-server"} 100 + http_requests{group="canary", instance="2", job="api-server"} NaN + +eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group") + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="production", instance="1", job="app-server"} 600 + http_requests{group="canary", instance="2", job="api-server"} NaN + http_requests{group="production", instance="2", job="api-server"} 100 + +eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group") + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="production", instance="1", job="app-server"} 600 + http_requests{group="canary", instance="2", job="api-server"} NaN + http_requests{group="production", instance="2", job="api-server"} 100 + +eval_ordered instant at 50m sort_by_label(http_requests, "group", "instance", "job") + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="canary", instance="2", job="api-server"} NaN + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="production", instance="1", job="app-server"} 600 + http_requests{group="production", instance="2", job="api-server"} 100 + +eval_ordered instant at 50m sort_by_label(http_requests, "job", "instance", "group") + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="canary", instance="2", job="api-server"} NaN + http_requests{group="production", instance="2", job="api-server"} 100 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="production", instance="1", job="app-server"} 600 + +eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance") + http_requests{group="production", instance="2", job="api-server"} 100 + http_requests{group="canary", instance="2", job="api-server"} NaN + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="production", instance="1", job="app-server"} 600 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="canary", instance="0", job="api-server"} 300 + http_requests{group="production", instance="0", job="api-server"} 100 + +eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group") + http_requests{group="production", instance="2", job="api-server"} 100 + http_requests{group="canary", instance="2", job="api-server"} NaN + http_requests{group="production", instance="1", job="app-server"} 600 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="canary", instance="0", job="api-server"} 300 + +eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group", "job") + http_requests{group="production", instance="2", job="api-server"} 100 + http_requests{group="canary", instance="2", job="api-server"} NaN + http_requests{group="production", instance="1", job="app-server"} 600 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="canary", instance="1", job="api-server"} 400 + http_requests{group="production", instance="0", job="app-server"} 500 + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="canary", instance="0", job="api-server"} 300 + # Tests for holt_winters clear @@ -629,6 +739,14 @@ eval instant at 1m stdvar_over_time(metric[1m]) eval instant at 1m stddev_over_time(metric[1m]) {} 0 +# Tests for mad_over_time. +clear +load 10s + metric 4 6 2 1 999 1 2 + +eval instant at 70s mad_over_time(metric[70s]) + {} 1 + # Tests for quantile_over_time clear diff --git a/rules/alerting_test.go b/rules/alerting_test.go index 19d804a816..f13015f320 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -185,7 +185,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute) res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) - require.Equal(t, 0, len(res)) + require.Empty(t, res) } func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { @@ -828,7 +828,7 @@ func TestKeepFiringFor(t *testing.T) { evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute) res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) - require.Equal(t, 0, len(res)) + require.Empty(t, res) } func TestPendingAndKeepFiringFor(t *testing.T) { @@ -880,7 +880,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) { evalTime := baseTime.Add(time.Minute) res, err = rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) - require.Equal(t, 0, len(res)) + require.Empty(t, res) } // TestAlertingEvalWithOrigin checks that the alerting rule details are passed through the context. diff --git a/rules/manager_test.go b/rules/manager_test.go index ed4b52b1c1..7c5f041778 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -169,7 +169,7 @@ func TestAlertingRule(t *testing.T) { filteredRes = append(filteredRes, smpl) } else { // If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'. - require.Equal(t, smplName, "ALERTS_FOR_STATE") + require.Equal(t, "ALERTS_FOR_STATE", smplName) } } for i := range test.result { @@ -337,7 +337,6 @@ func TestForStateAddSamples(t *testing.T) { for _, aa := range rule.ActiveAlerts() { require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) } - } }) } @@ -478,12 +477,12 @@ func TestForStateRestore(t *testing.T) { // Checking if we have restored it correctly. switch { case tst.noRestore: - require.Equal(t, tst.num, len(got)) + require.Len(t, got, tst.num) for _, e := range got { require.Equal(t, e.ActiveAt, restoreTime) } case tst.gracePeriod: - require.Equal(t, tst.num, len(got)) + require.Len(t, got, tst.num) for _, e := range got { require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime)) } @@ -740,7 +739,7 @@ func TestUpdate(t *testing.T) { err := ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil) require.NoError(t, err) - require.Greater(t, len(ruleManager.groups), 0, "expected non-empty rule groups") + require.NotEmpty(t, ruleManager.groups, "expected non-empty rule groups") ogs := map[string]*Group{} for h, g := range ruleManager.groups { g.seriesInPreviousEval = []map[string]labels.Labels{ @@ -761,7 +760,7 @@ func TestUpdate(t *testing.T) { // Groups will be recreated if updated. rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml") - require.Equal(t, 0, len(errs), "file parsing failures") + require.Empty(t, errs, "file parsing failures") tmpFile, err := os.CreateTemp("", "rules.test.*.yaml") require.NoError(t, err) @@ -1141,20 +1140,20 @@ func TestNotify(t *testing.T) { // Alert sent right away group.Eval(ctx, time.Unix(1, 0)) - require.Equal(t, 1, len(lastNotified)) + require.Len(t, lastNotified, 1) require.NotZero(t, lastNotified[0].ValidUntil, "ValidUntil should not be zero") // Alert is not sent 1s later group.Eval(ctx, time.Unix(2, 0)) - require.Equal(t, 0, len(lastNotified)) + require.Empty(t, lastNotified) // Alert is resent at t=5s group.Eval(ctx, time.Unix(5, 0)) - require.Equal(t, 1, len(lastNotified)) + require.Len(t, lastNotified, 1) // Resolution alert sent right away group.Eval(ctx, time.Unix(6, 0)) - require.Equal(t, 1, len(lastNotified)) + require.Len(t, lastNotified, 1) } func TestMetricsUpdate(t *testing.T) { @@ -1352,7 +1351,7 @@ func TestMetricsStalenessOnManagerShutdown(t *testing.T) { require.NoError(t, err) ruleManager.Stop() stopped = true - require.True(t, time.Since(start) < 1*time.Second, "rule manager does not stop early") + require.Less(t, time.Since(start), 1*time.Second, "rule manager does not stop early") time.Sleep(5 * time.Second) require.Equal(t, 0, countStaleNaN(t, storage), "invalid count of staleness markers after stopping the engine") } @@ -1857,9 +1856,9 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) { require.Equal(t, labels.FromStrings("__name__", "sum:histogram_metric"), s.Labels()) - expHist := hists[0].ToFloat() + expHist := hists[0].ToFloat(nil) for _, h := range hists[1:] { - expHist = expHist.Add(h.ToFloat()) + expHist = expHist.Add(h.ToFloat(nil)) } it := s.Iterator(nil) diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go index c580a50510..43ee0fcecf 100644 --- a/scrape/helpers_test.go +++ b/scrape/helpers_test.go @@ -14,10 +14,18 @@ package scrape import ( + "bytes" "context" + "encoding/binary" "fmt" "math/rand" "strings" + "sync" + "testing" + + "github.com/gogo/protobuf/proto" + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -50,6 +58,10 @@ func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.M return 0, nil } +func (a nopAppender) AppendCTZeroSample(storage.SeriesRef, labels.Labels, int64, int64) (storage.SeriesRef, error) { + return 0, nil +} + func (a nopAppender) Commit() error { return nil } func (a nopAppender) Rollback() error { return nil } @@ -65,9 +77,19 @@ type histogramSample struct { fh *histogram.FloatHistogram } +type collectResultAppendable struct { + *collectResultAppender +} + +func (a *collectResultAppendable) Appender(_ context.Context) storage.Appender { + return a +} + // collectResultAppender records all samples that were added through the appender. // It can be used as its zero value or be backed by another appender it writes samples through. type collectResultAppender struct { + mtx sync.Mutex + next storage.Appender resultFloats []floatSample pendingFloats []floatSample @@ -82,6 +104,8 @@ type collectResultAppender struct { } func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() a.pendingFloats = append(a.pendingFloats, floatSample{ metric: lset, t: t, @@ -103,6 +127,8 @@ func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels } func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() a.pendingExemplars = append(a.pendingExemplars, e) if a.next == nil { return 0, nil @@ -112,6 +138,8 @@ func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.L } func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t}) if a.next == nil { return 0, nil @@ -121,6 +149,8 @@ func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels. } func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() a.pendingMetadata = append(a.pendingMetadata, m) if ref == 0 { ref = storage.SeriesRef(rand.Uint64()) @@ -132,7 +162,13 @@ func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.L return a.next.UpdateMetadata(ref, l, m) } +func (a *collectResultAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { + return a.Append(ref, l, ct, 0.0) +} + func (a *collectResultAppender) Commit() error { + a.mtx.Lock() + defer a.mtx.Unlock() a.resultFloats = append(a.resultFloats, a.pendingFloats...) a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...) a.resultHistograms = append(a.resultHistograms, a.pendingHistograms...) @@ -148,6 +184,8 @@ func (a *collectResultAppender) Commit() error { } func (a *collectResultAppender) Rollback() error { + a.mtx.Lock() + defer a.mtx.Unlock() a.rolledbackFloats = a.pendingFloats a.rolledbackHistograms = a.pendingHistograms a.pendingFloats = nil @@ -171,3 +209,22 @@ func (a *collectResultAppender) String() string { } return sb.String() } + +// protoMarshalDelimited marshals a MetricFamily into a delimited +// Prometheus proto exposition format bytes (known as 'encoding=delimited`) +// +// See also https://eli.thegreenplace.net/2011/08/02/length-prefix-framing-for-protocol-buffers +func protoMarshalDelimited(t *testing.T, mf *dto.MetricFamily) []byte { + t.Helper() + + protoBuf, err := proto.Marshal(mf) + require.NoError(t, err) + + varintBuf := make([]byte, binary.MaxVarintLen32) + varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) + + buf := &bytes.Buffer{} + buf.Write(varintBuf[:varintLength]) + buf.Write(protoBuf) + return buf.Bytes() +} diff --git a/scrape/manager.go b/scrape/manager.go index 3b70e48a13..3ad315a50a 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -78,9 +78,15 @@ type Options struct { EnableMetadataStorage bool // Option to increase the interval used by scrape manager to throttle target groups updates. DiscoveryReloadInterval model.Duration + // Option to enable the ingestion of the created timestamp as a synthetic zero sample. + // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md + EnableCreatedTimestampZeroIngestion bool // Optional HTTP client options to use when scraping. HTTPClientOptions []config_util.HTTPClientOption + + // private option for testability. + skipOffsetting bool } // Manager maintains a set of scrape pools and manages start/stop cycles @@ -282,24 +288,10 @@ func (m *Manager) TargetsActive() map[string][]*Target { m.mtxScrape.Lock() defer m.mtxScrape.Unlock() - var ( - wg sync.WaitGroup - mtx sync.Mutex - ) - targets := make(map[string][]*Target, len(m.scrapePools)) - wg.Add(len(m.scrapePools)) for tset, sp := range m.scrapePools { - // Running in parallel limits the blocking time of scrapePool to scrape - // interval when there's an update from SD. - go func(tset string, sp *scrapePool) { - mtx.Lock() - targets[tset] = sp.ActiveTargets() - mtx.Unlock() - wg.Done() - }(tset, sp) + targets[tset] = sp.ActiveTargets() } - wg.Wait() return targets } diff --git a/scrape/manager_test.go b/scrape/manager_test.go index a689c469d4..524424269e 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -15,14 +15,23 @@ package scrape import ( "context" + "fmt" "net/http" + "net/http/httptest" + "net/url" + "os" "strconv" + "sync" "testing" "time" + "github.com/go-kit/log" + "github.com/gogo/protobuf/proto" "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" @@ -30,6 +39,7 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/util/runutil" ) func TestPopulateLabels(t *testing.T) { @@ -714,3 +724,146 @@ scrape_configs: reload(scrapeManager, cfg2) require.ElementsMatch(t, []string{"job1", "job3"}, scrapeManager.ScrapePools()) } + +// TestManagerCTZeroIngestion tests scrape manager for CT cases. +func TestManagerCTZeroIngestion(t *testing.T) { + const mName = "expected_counter" + + for _, tc := range []struct { + name string + counterSample *dto.Counter + enableCTZeroIngestion bool + + expectedValues []float64 + }{ + { + name: "disabled with CT on counter", + counterSample: &dto.Counter{ + Value: proto.Float64(1.0), + // Timestamp does not matter as long as it exists in this test. + CreatedTimestamp: timestamppb.Now(), + }, + expectedValues: []float64{1.0}, + }, + { + name: "enabled with CT on counter", + counterSample: &dto.Counter{ + Value: proto.Float64(1.0), + // Timestamp does not matter as long as it exists in this test. + CreatedTimestamp: timestamppb.Now(), + }, + enableCTZeroIngestion: true, + expectedValues: []float64{0.0, 1.0}, + }, + { + name: "enabled without CT on counter", + counterSample: &dto.Counter{ + Value: proto.Float64(1.0), + }, + enableCTZeroIngestion: true, + expectedValues: []float64{1.0}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + app := &collectResultAppender{} + scrapeManager, err := NewManager( + &Options{ + EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion, + skipOffsetting: true, + }, + log.NewLogfmtLogger(os.Stderr), + &collectResultAppendable{app}, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + + require.NoError(t, scrapeManager.ApplyConfig(&config.Config{ + GlobalConfig: config.GlobalConfig{ + // Disable regular scrapes. + ScrapeInterval: model.Duration(9999 * time.Minute), + ScrapeTimeout: model.Duration(5 * time.Second), + // Ensure the proto is chosen. We need proto as it's the only protocol + // with the CT parsing support. + ScrapeProtocols: []config.ScrapeProtocol{config.PrometheusProto}, + }, + ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test"}}, + })) + + once := sync.Once{} + // Start fake HTTP target to that allow one scrape only. + server := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fail := true + once.Do(func() { + fail = false + w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`) + + ctrType := dto.MetricType_COUNTER + w.Write(protoMarshalDelimited(t, &dto.MetricFamily{ + Name: proto.String(mName), + Type: &ctrType, + Metric: []*dto.Metric{{Counter: tc.counterSample}}, + })) + }) + + if fail { + w.WriteHeader(http.StatusInternalServerError) + } + }), + ) + defer server.Close() + + serverURL, err := url.Parse(server.URL) + require.NoError(t, err) + + // Add fake target directly into tsets + reload. Normally users would use + // Manager.Run and wait for minimum 5s refresh interval. + scrapeManager.updateTsets(map[string][]*targetgroup.Group{ + "test": {{ + Targets: []model.LabelSet{{ + model.SchemeLabel: model.LabelValue(serverURL.Scheme), + model.AddressLabel: model.LabelValue(serverURL.Host), + }}, + }}, + }) + scrapeManager.reload() + + // Wait for one scrape. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { + if countFloatSamples(app, mName) != len(tc.expectedValues) { + return fmt.Errorf("expected %v samples", tc.expectedValues) + } + return nil + }), "after 1 minute") + scrapeManager.Stop() + + require.Equal(t, tc.expectedValues, getResultFloats(app, mName)) + }) + } +} + +func countFloatSamples(a *collectResultAppender, expectedMetricName string) (count int) { + a.mtx.Lock() + defer a.mtx.Unlock() + + for _, f := range a.resultFloats { + if f.metric.Get(model.MetricNameLabel) == expectedMetricName { + count++ + } + } + return count +} + +func getResultFloats(app *collectResultAppender, expectedMetricName string) (result []float64) { + app.mtx.Lock() + defer app.mtx.Unlock() + + for _, f := range app.resultFloats { + if f.metric.Get(model.MetricNameLabel) == expectedMetricName { + result = append(result, f.f) + } + } + return result +} diff --git a/scrape/metrics.go b/scrape/metrics.go index d74143185b..7082bc743b 100644 --- a/scrape/metrics.go +++ b/scrape/metrics.go @@ -286,8 +286,8 @@ func (mc *MetadataMetricsCollector) Collect(ch chan<- prometheus.Metric) { for tset, targets := range mc.TargetsGatherer.TargetsActive() { var size, length int for _, t := range targets { - size += t.MetadataSize() - length += t.MetadataLength() + size += t.SizeMetadata() + length += t.LengthMetadata() } ch <- prometheus.MustNewConstMetric( diff --git a/scrape/scrape.go b/scrape/scrape.go index 9a0ba1d009..2518912a7a 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -106,9 +106,10 @@ type scrapeLoopOptions struct { interval time.Duration timeout time.Duration scrapeClassicHistograms bool - mrc []*relabel.Config - cache *scrapeCache - enableCompression bool + + mrc []*relabel.Config + cache *scrapeCache + enableCompression bool } const maxAheadTime = 10 * time.Minute @@ -168,11 +169,13 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed opts.interval, opts.timeout, opts.scrapeClassicHistograms, + options.EnableCreatedTimestampZeroIngestion, options.ExtraMetrics, options.EnableMetadataStorage, opts.target, options.PassMetadataInContext, metrics, + options.skipOffsetting, ) } sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) @@ -672,7 +675,7 @@ func acceptHeader(sps []config.ScrapeProtocol) string { weight-- } // Default match anything. - vals = append(vals, fmt.Sprintf("*/*;q=%d", weight)) + vals = append(vals, fmt.Sprintf("*/*;q=0.%d", weight)) return strings.Join(vals, ",") } @@ -787,6 +790,7 @@ type scrapeLoop struct { interval time.Duration timeout time.Duration scrapeClassicHistograms bool + enableCTZeroIngestion bool appender func(ctx context.Context) storage.Appender sampleMutator labelsMutator @@ -804,6 +808,8 @@ type scrapeLoop struct { appendMetadataToWAL bool metrics *scrapeMetrics + + skipOffsetting bool // For testability. } // scrapeCache tracks mappings of exposed metric strings to label sets and @@ -955,12 +961,12 @@ func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) { } } -func (c *scrapeCache) setType(metric []byte, t textparse.MetricType) { +func (c *scrapeCache) setType(metric []byte, t model.MetricType) { c.metaMtx.Lock() e, ok := c.metadata[string(metric)] if !ok { - e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} + e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} c.metadata[string(metric)] = e } if e.Type != t { @@ -977,7 +983,7 @@ func (c *scrapeCache) setHelp(metric, help []byte) { e, ok := c.metadata[string(metric)] if !ok { - e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} + e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} c.metadata[string(metric)] = e } if e.Help != string(help) { @@ -994,7 +1000,7 @@ func (c *scrapeCache) setUnit(metric, unit []byte) { e, ok := c.metadata[string(metric)] if !ok { - e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} + e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}} c.metadata[string(metric)] = e } if e.Unit != string(unit) { @@ -1076,11 +1082,13 @@ func newScrapeLoop(ctx context.Context, interval time.Duration, timeout time.Duration, scrapeClassicHistograms bool, + enableCTZeroIngestion bool, reportExtraMetrics bool, appendMetadataToWAL bool, target *Target, passMetadataInContext bool, metrics *scrapeMetrics, + skipOffsetting bool, ) *scrapeLoop { if l == nil { l = log.NewNopLogger() @@ -1124,9 +1132,11 @@ func newScrapeLoop(ctx context.Context, interval: interval, timeout: timeout, scrapeClassicHistograms: scrapeClassicHistograms, + enableCTZeroIngestion: enableCTZeroIngestion, reportExtraMetrics: reportExtraMetrics, appendMetadataToWAL: appendMetadataToWAL, metrics: metrics, + skipOffsetting: skipOffsetting, } sl.ctx, sl.cancel = context.WithCancel(ctx) @@ -1134,12 +1144,14 @@ func newScrapeLoop(ctx context.Context, } func (sl *scrapeLoop) run(errc chan<- error) { - select { - case <-time.After(sl.scraper.offset(sl.interval, sl.offsetSeed)): - // Continue after a scraping offset. - case <-sl.ctx.Done(): - close(sl.stopped) - return + if !sl.skipOffsetting { + select { + case <-time.After(sl.scraper.offset(sl.interval, sl.offsetSeed)): + // Continue after a scraping offset. + case <-sl.ctx.Done(): + close(sl.stopped) + return + } } var last time.Time @@ -1557,6 +1569,15 @@ loop: updateMetadata(lset, true) } + if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil { + ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. + // CT is an experimental feature. For now, we don't need to fail the + // scrape on errors updating the created timestamp, log debug. + level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) + } + } + if isHistogram { if h != nil { ref, err = app.AppendHistogram(ref, lset, t, h, nil) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 5b61a8948e..1a416eeb6f 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -111,14 +111,14 @@ func TestDroppedTargetsList(t *testing.T) { ) sp.Sync(tgs) sp.Sync(tgs) - require.Equal(t, expectedLength, len(sp.droppedTargets)) + require.Len(t, sp.droppedTargets, expectedLength) require.Equal(t, expectedLength, sp.droppedTargetsCount) require.Equal(t, expectedLabelSetString, sp.droppedTargets[0].DiscoveredLabels().String()) // Check that count is still correct when we don't retain all dropped targets. sp.config.KeepDroppedTargets = 1 sp.Sync(tgs) - require.Equal(t, 1, len(sp.droppedTargets)) + require.Len(t, sp.droppedTargets, 1) require.Equal(t, expectedLength, sp.droppedTargetsCount) } @@ -242,11 +242,11 @@ func TestScrapePoolStop(t *testing.T) { } mtx.Lock() - require.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops") + require.Len(t, stopped, numTargets, "Unexpected number of stopped loops") mtx.Unlock() - require.Equal(t, 0, len(sp.activeTargets), "Targets were not cleared on stopping: %d left", len(sp.activeTargets)) - require.Equal(t, 0, len(sp.loops), "Loops were not cleared on stopping: %d left", len(sp.loops)) + require.Empty(t, sp.activeTargets, "Targets were not cleared on stopping: %d left", len(sp.activeTargets)) + require.Empty(t, sp.loops, "Loops were not cleared on stopping: %d left", len(sp.loops)) } func TestScrapePoolReload(t *testing.T) { @@ -333,11 +333,11 @@ func TestScrapePoolReload(t *testing.T) { } mtx.Lock() - require.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops") + require.Len(t, stopped, numTargets, "Unexpected number of stopped loops") mtx.Unlock() require.Equal(t, sp.activeTargets, beforeTargets, "Reloading affected target states unexpectedly") - require.Equal(t, numTargets, len(sp.loops), "Unexpected number of stopped loops after reload") + require.Len(t, sp.loops, numTargets, "Unexpected number of stopped loops after reload") } func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) { @@ -437,10 +437,10 @@ func TestScrapePoolTargetLimit(t *testing.T) { for _, l := range sp.loops { lerr := l.(*testLoop).getForcedError() if shouldErr { - require.NotNil(t, lerr, "error was expected for %d targets with a limit of %d", targets, limit) + require.Error(t, lerr, "error was expected for %d targets with a limit of %d", targets, limit) require.Equal(t, fmt.Sprintf("target_limit exceeded (number of targets: %d, limit: %d)", targets, limit), lerr.Error()) } else { - require.Equal(t, nil, lerr) + require.NoError(t, lerr) } } } @@ -582,8 +582,8 @@ func TestScrapePoolRaces(t *testing.T) { dropped := sp.DroppedTargets() expectedActive, expectedDropped := len(tgts[0].Targets), 0 - require.Equal(t, expectedActive, len(active), "Invalid number of active targets") - require.Equal(t, expectedDropped, len(dropped), "Invalid number of dropped targets") + require.Len(t, active, expectedActive, "Invalid number of active targets") + require.Len(t, dropped, expectedDropped, "Invalid number of dropped targets") for i := 0; i < 20; i++ { time.Sleep(10 * time.Millisecond) @@ -633,7 +633,7 @@ func TestScrapePoolScrapeLoopsStarted(t *testing.T) { })) sp.Sync(tgs) - require.Equal(t, 1, len(sp.loops)) + require.Len(t, sp.loops, 1) wg.Wait() for _, l := range sp.loops { @@ -660,9 +660,11 @@ func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app false, false, false, + false, nil, false, newTestScrapeMetrics(t), + false, ) } @@ -801,9 +803,11 @@ func TestScrapeLoopRun(t *testing.T) { false, false, false, + false, nil, false, scrapeMetrics, + false, ) // The loop must terminate during the initial offset if the context @@ -945,9 +949,11 @@ func TestScrapeLoopMetadata(t *testing.T) { false, false, false, + false, nil, false, scrapeMetrics, + false, ) defer cancel() @@ -965,19 +971,19 @@ test_metric 1 md, ok := cache.GetMetadata("test_metric") require.True(t, ok, "expected metadata to be present") - require.Equal(t, textparse.MetricTypeCounter, md.Type, "unexpected metric type") + require.Equal(t, model.MetricTypeCounter, md.Type, "unexpected metric type") require.Equal(t, "some help text", md.Help) require.Equal(t, "metric", md.Unit) md, ok = cache.GetMetadata("test_metric_no_help") require.True(t, ok, "expected metadata to be present") - require.Equal(t, textparse.MetricTypeGauge, md.Type, "unexpected metric type") + require.Equal(t, model.MetricTypeGauge, md.Type, "unexpected metric type") require.Equal(t, "", md.Help) require.Equal(t, "", md.Unit) md, ok = cache.GetMetadata("test_metric_no_type") require.True(t, ok, "expected metadata to be present") - require.Equal(t, textparse.MetricTypeUnknown, md.Type, "unexpected metric type") + require.Equal(t, model.MetricTypeUnknown, md.Type, "unexpected metric type") require.Equal(t, "other help text", md.Help) require.Equal(t, "", md.Unit) } @@ -1123,7 +1129,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for // each scrape successful or not. - require.Equal(t, 27, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender) + require.Len(t, appender.resultFloats, 27, "Appended samples not as expected:\n%s", appender) require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) @@ -1170,7 +1176,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for // each scrape successful or not. - require.Equal(t, 17, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender) + require.Len(t, appender.resultFloats, 17, "Appended samples not as expected:\n%s", appender) require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) @@ -1237,7 +1243,7 @@ func TestScrapeLoopCache(t *testing.T) { // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for // each scrape successful or not. - require.Equal(t, 26, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender) + require.Len(t, appender.resultFloats, 26, "Appended samples not as expected:\n%s", appender) } func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { @@ -1624,6 +1630,29 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { msg, err = MetricFamilyToProtobuf(histogramMetricFamily) require.NoError(t, err) + now = time.Now() + total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now) + require.NoError(t, err) + require.Equal(t, 3, total) + require.Equal(t, 3, added) + require.Equal(t, 3, seriesAdded) + + err = sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric) + require.NoError(t, err) + metricValue = metric.GetCounter().GetValue() + require.Equal(t, beforeMetricValue, metricValue) + beforeMetricValue = metricValue + + nativeHistogram.WithLabelValues("L").Observe(100000.0) // in different bucket since > 10*1.1 + + gathered, err = registry.Gather() + require.NoError(t, err) + require.NotEmpty(t, gathered) + + histogramMetricFamily = gathered[0] + msg, err = MetricFamilyToProtobuf(histogramMetricFamily) + require.NoError(t, err) + now = time.Now() total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now) if !errors.Is(err, errBucketLimit) { @@ -2354,7 +2383,7 @@ func TestTargetScraperScrapeOK(t *testing.T) { runTest(acceptHeader(config.DefaultScrapeProtocols)) protobufParsing = true - runTest(acceptHeader(config.DefaultNativeHistogramScrapeProtocols)) + runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols)) } func TestTargetScrapeScrapeCancel(t *testing.T) { @@ -2506,7 +2535,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) { require.NoError(t, err) _, err = ts.readResponse(context.Background(), resp, &buf) require.NoError(t, err) - require.Equal(t, len(responseBody), buf.Len()) + require.Len(t, responseBody, buf.Len()) // Target response gzip compressed body, scrape without body size limit. gzipResponse = true buf.Reset() @@ -2514,7 +2543,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) { require.NoError(t, err) _, err = ts.readResponse(context.Background(), resp, &buf) require.NoError(t, err) - require.Equal(t, len(responseBody), buf.Len()) + require.Len(t, responseBody, buf.Len()) } // testScraper implements the scraper interface and allows setting values @@ -2619,7 +2648,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { q, err := s.Querier(time.Time{}.UnixNano(), 0) require.NoError(t, err) series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) - require.Equal(t, false, series.Next(), "series found in tsdb") + require.False(t, series.Next(), "series found in tsdb") require.NoError(t, series.Err()) // We add a good metric to check that it is recorded. @@ -2631,9 +2660,9 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { q, err = s.Querier(time.Time{}.UnixNano(), 0) require.NoError(t, err) series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "le", "500")) - require.Equal(t, true, series.Next(), "series not found in tsdb") + require.True(t, series.Next(), "series not found in tsdb") require.NoError(t, series.Err()) - require.Equal(t, false, series.Next(), "more than one series found in tsdb") + require.False(t, series.Next(), "more than one series found in tsdb") } func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { @@ -2661,7 +2690,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { q, err := s.Querier(time.Time{}.UnixNano(), 0) require.NoError(t, err) series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) - require.Equal(t, false, series.Next(), "series found in tsdb") + require.False(t, series.Next(), "series found in tsdb") require.NoError(t, series.Err()) } @@ -2721,14 +2750,14 @@ func TestReusableConfig(t *testing.T) { } for i, m := range match { - require.Equal(t, true, reusableCache(variants[m[0]], variants[m[1]]), "match test %d", i) - require.Equal(t, true, reusableCache(variants[m[1]], variants[m[0]]), "match test %d", i) - require.Equal(t, true, reusableCache(variants[m[1]], variants[m[1]]), "match test %d", i) - require.Equal(t, true, reusableCache(variants[m[0]], variants[m[0]]), "match test %d", i) + require.True(t, reusableCache(variants[m[0]], variants[m[1]]), "match test %d", i) + require.True(t, reusableCache(variants[m[1]], variants[m[0]]), "match test %d", i) + require.True(t, reusableCache(variants[m[1]], variants[m[1]]), "match test %d", i) + require.True(t, reusableCache(variants[m[0]], variants[m[0]]), "match test %d", i) } for i, m := range noMatch { - require.Equal(t, false, reusableCache(variants[m[0]], variants[m[1]]), "not match test %d", i) - require.Equal(t, false, reusableCache(variants[m[1]], variants[m[0]]), "not match test %d", i) + require.False(t, reusableCache(variants[m[0]], variants[m[1]]), "not match test %d", i) + require.False(t, reusableCache(variants[m[1]], variants[m[0]]), "not match test %d", i) } } @@ -3294,7 +3323,7 @@ test_summary_count 199 Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}}, }, }) - require.Equal(t, 1, len(sp.ActiveTargets())) + require.Len(t, sp.ActiveTargets(), 1) select { case <-time.After(5 * time.Second): @@ -3371,7 +3400,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t * // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for // each scrape successful or not. - require.Equal(t, 27, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender) + require.Len(t, appender.resultFloats, 27, "Appended samples not as expected:\n%s", appender) require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) @@ -3426,7 +3455,7 @@ func TestScrapeLoopCompression(t *testing.T) { Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}}, }, }) - require.Equal(t, 1, len(sp.ActiveTargets())) + require.Len(t, sp.ActiveTargets(), 1) select { case <-time.After(5 * time.Second): diff --git a/scrape/target.go b/scrape/target.go index 8cc8597a4e..0605f53490 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -30,7 +30,6 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" - "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" ) @@ -87,12 +86,12 @@ type MetricMetadataStore interface { // MetricMetadata is a piece of metadata for a metric. type MetricMetadata struct { Metric string - Type textparse.MetricType + Type model.MetricType Help string Unit string } -func (t *Target) MetadataList() []MetricMetadata { +func (t *Target) ListMetadata() []MetricMetadata { t.mtx.RLock() defer t.mtx.RUnlock() @@ -102,7 +101,7 @@ func (t *Target) MetadataList() []MetricMetadata { return t.metadata.ListMetadata() } -func (t *Target) MetadataSize() int { +func (t *Target) SizeMetadata() int { t.mtx.RLock() defer t.mtx.RUnlock() @@ -113,7 +112,7 @@ func (t *Target) MetadataSize() int { return t.metadata.SizeMetadata() } -func (t *Target) MetadataLength() int { +func (t *Target) LengthMetadata() int { t.mtx.RLock() defer t.mtx.RUnlock() @@ -124,8 +123,8 @@ func (t *Target) MetadataLength() int { return t.metadata.LengthMetadata() } -// Metadata returns type and help metadata for the given metric. -func (t *Target) Metadata(metric string) (MetricMetadata, bool) { +// GetMetadata returns type and help metadata for the given metric. +func (t *Target) GetMetadata(metric string) (MetricMetadata, bool) { t.mtx.RLock() defer t.mtx.RUnlock() @@ -366,13 +365,19 @@ type bucketLimitAppender struct { func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { - if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { - return 0, errBucketLimit + for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { + if h.Schema == -4 { + return 0, errBucketLimit + } + h = h.ReduceResolution(h.Schema - 1) } } if fh != nil { - if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { - return 0, errBucketLimit + for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { + if fh.Schema == -4 { + return 0, errBucketLimit + } + fh = fh.ReduceResolution(fh.Schema - 1) } } ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh) diff --git a/scrape/target_test.go b/scrape/target_test.go index 4f0c840cd0..f37c75a769 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -507,10 +507,25 @@ func TestBucketLimitAppender(t *testing.T) { NegativeBuckets: []int64{3, 0, 0}, } + bigGap := histogram.Histogram{ + Schema: 0, + Count: 21, + Sum: 33, + ZeroThreshold: 0.001, + ZeroCount: 3, + PositiveSpans: []histogram.Span{ + {Offset: 1, Length: 1}, // in (1, 2] + {Offset: 2, Length: 1}, // in (8, 16] + }, + PositiveBuckets: []int64{1, 0}, // 1, 1 + } + cases := []struct { - h histogram.Histogram - limit int - expectError bool + h histogram.Histogram + limit int + expectError bool + expectBucketCount int + expectSchema int32 }{ { h: example, @@ -518,9 +533,25 @@ func TestBucketLimitAppender(t *testing.T) { expectError: true, }, { - h: example, - limit: 10, - expectError: false, + h: example, + limit: 4, + expectError: false, + expectBucketCount: 4, + expectSchema: -1, + }, + { + h: example, + limit: 10, + expectError: false, + expectBucketCount: 6, + expectSchema: 0, + }, + { + h: bigGap, + limit: 1, + expectError: false, + expectBucketCount: 1, + expectSchema: -2, }, } @@ -531,18 +562,28 @@ func TestBucketLimitAppender(t *testing.T) { t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { app := &bucketLimitAppender{Appender: resApp, limit: c.limit} ts := int64(10 * time.Minute / time.Millisecond) - h := c.h lbls := labels.FromStrings("__name__", "sparse_histogram_series") var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + fh := c.h.Copy().ToFloat(nil) + _, err = app.AppendHistogram(0, lbls, ts, nil, fh) + if c.expectError { + require.Error(t, err) + } else { + require.Equal(t, c.expectSchema, fh.Schema) + require.Equal(t, c.expectBucketCount, len(fh.NegativeBuckets)+len(fh.PositiveBuckets)) + require.NoError(t, err) + } } else { - _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) - } - if c.expectError { - require.Error(t, err) - } else { - require.NoError(t, err) + h := c.h.Copy() + _, err = app.AppendHistogram(0, lbls, ts, h, nil) + if c.expectError { + require.Error(t, err) + } else { + require.Equal(t, c.expectSchema, h.Schema) + require.Equal(t, c.expectBucketCount, len(h.NegativeBuckets)+len(h.PositiveBuckets)) + require.NoError(t, err) + } } require.NoError(t, app.Commit()) }) diff --git a/scripts/check-go-mod-version.sh b/scripts/check-go-mod-version.sh new file mode 100755 index 0000000000..d651a62036 --- /dev/null +++ b/scripts/check-go-mod-version.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +readarray -t mod_files < <(find . -type f -name go.mod) + +echo "Checking files ${mod_files[@]}" + +matches=$(awk '$1 == "go" {print $2}' "${mod_files[@]}" | sort -u | wc -l) + +if [[ "${matches}" -ne 1 ]]; then + echo 'Not all go.mod files have matching go versions' + exit 1 +fi diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index ffa6b3090e..4b292229de 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -12,8 +12,14 @@ on: - ".golangci.yml" pull_request: +permissions: # added using https://github.com/step-security/secure-repo + contents: read + jobs: golangci: + permissions: + contents: read # for actions/checkout to fetch code + pull-requests: read # for golangci/golangci-lint-action to fetch pull requests name: lint runs-on: ubuntu-latest steps: @@ -29,4 +35,4 @@ jobs: - name: Lint uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 with: - version: v1.54.2 + version: v1.55.2 diff --git a/storage/buffer.go b/storage/buffer.go index d2d89e0425..d19f841d43 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -74,7 +74,7 @@ func (b *BufferedSeriesIterator) PeekBack(n int) (sample chunks.Sample, ok bool) // Buffer returns an iterator over the buffered data. Invalidates previously // returned iterators. -func (b *BufferedSeriesIterator) Buffer() chunkenc.Iterator { +func (b *BufferedSeriesIterator) Buffer() *SampleRingIterator { return b.buf.iterator() } @@ -202,7 +202,7 @@ func (s hSample) H() *histogram.Histogram { } func (s hSample) FH() *histogram.FloatHistogram { - return s.h.ToFloat() + return s.h.ToFloat(nil) } func (s hSample) Type() chunkenc.ValueType { @@ -252,7 +252,7 @@ type sampleRing struct { f int // Position of first element in ring buffer. l int // Number of elements in buffer. - it sampleRingIterator + it SampleRingIterator } type bufType int @@ -304,13 +304,15 @@ func (r *sampleRing) reset() { } // Returns the current iterator. Invalidates previously returned iterators. -func (r *sampleRing) iterator() chunkenc.Iterator { +func (r *sampleRing) iterator() *SampleRingIterator { r.it.r = r r.it.i = -1 return &r.it } -type sampleRingIterator struct { +// SampleRingIterator is returned by BufferedSeriesIterator.Buffer() and can be +// used to iterate samples buffered in the lookback window. +type SampleRingIterator struct { r *sampleRing i int t int64 @@ -319,7 +321,7 @@ type sampleRingIterator struct { fh *histogram.FloatHistogram } -func (it *sampleRingIterator) Next() chunkenc.ValueType { +func (it *SampleRingIterator) Next() chunkenc.ValueType { it.i++ if it.i >= it.r.l { return chunkenc.ValNone @@ -358,30 +360,28 @@ func (it *sampleRingIterator) Next() chunkenc.ValueType { } } -func (it *sampleRingIterator) Seek(int64) chunkenc.ValueType { - return chunkenc.ValNone -} - -func (it *sampleRingIterator) Err() error { - return nil -} - -func (it *sampleRingIterator) At() (int64, float64) { +// At returns the current float element of the iterator. +func (it *SampleRingIterator) At() (int64, float64) { return it.t, it.f } -func (it *sampleRingIterator) AtHistogram() (int64, *histogram.Histogram) { +// AtHistogram returns the current histogram element of the iterator. +func (it *SampleRingIterator) AtHistogram() (int64, *histogram.Histogram) { return it.t, it.h } -func (it *sampleRingIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { +// AtFloatHistogram returns the current histogram element of the iterator. If the +// current sample is an integer histogram, it will be converted to a float histogram. +// An optional histogram.FloatHistogram can be provided to avoid allocating a new +// object for the conversion. +func (it *SampleRingIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { if it.fh == nil { - return it.t, it.h.ToFloat() + return it.t, it.h.ToFloat(fh) } return it.t, it.fh } -func (it *sampleRingIterator) AtT() int64 { +func (it *SampleRingIterator) AtT() int64 { return it.t } diff --git a/storage/buffer_test.go b/storage/buffer_test.go index c2542f3d9c..12e6ff0f05 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -233,7 +233,7 @@ func TestBufferedSeriesIteratorMixedHistograms(t *testing.T) { histograms := tsdbutil.GenerateTestHistograms(2) it := NewBufferIterator(NewListSeriesIterator(samples{ - fhSample{t: 1, fh: histograms[0].ToFloat()}, + fhSample{t: 1, fh: histograms[0].ToFloat(nil)}, hSample{t: 2, h: histograms[1]}, }), 2) @@ -243,12 +243,12 @@ func TestBufferedSeriesIteratorMixedHistograms(t *testing.T) { buf := it.Buffer() require.Equal(t, chunkenc.ValFloatHistogram, buf.Next()) - _, fh := buf.AtFloatHistogram() - require.Equal(t, histograms[0].ToFloat(), fh) + _, fh := buf.AtFloatHistogram(nil) + require.Equal(t, histograms[0].ToFloat(nil), fh) require.Equal(t, chunkenc.ValHistogram, buf.Next()) - _, fh = buf.AtFloatHistogram() - require.Equal(t, histograms[1].ToFloat(), fh) + _, fh = buf.AtFloatHistogram(nil) + require.Equal(t, histograms[1].ToFloat(nil), fh) } func BenchmarkBufferedSeriesIterator(b *testing.B) { diff --git a/storage/fanout.go b/storage/fanout.go index 33257046f2..a9a3f904b6 100644 --- a/storage/fanout.go +++ b/storage/fanout.go @@ -202,6 +202,20 @@ func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metada return ref, nil } +func (f *fanoutAppender) AppendCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64) (SeriesRef, error) { + ref, err := f.primary.AppendCTZeroSample(ref, l, t, ct) + if err != nil { + return ref, err + } + + for _, appender := range f.secondaries { + if _, err := appender.AppendCTZeroSample(ref, l, t, ct); err != nil { + return 0, err + } + } + return ref, nil +} + func (f *fanoutAppender) Commit() (err error) { err = f.primary.Commit() diff --git a/storage/fanout_test.go b/storage/fanout_test.go index a99c2f803d..913e2fe24e 100644 --- a/storage/fanout_test.go +++ b/storage/fanout_test.go @@ -100,7 +100,7 @@ func TestFanout_SelectSorted(t *testing.T) { } require.Equal(t, labelsResult, outputLabel) - require.Equal(t, inputTotalSize, len(result)) + require.Len(t, result, inputTotalSize) }) t.Run("chunk querier", func(t *testing.T) { querier, err := fanoutStorage.ChunkQuerier(0, 8000) @@ -128,7 +128,7 @@ func TestFanout_SelectSorted(t *testing.T) { require.NoError(t, seriesSet.Err()) require.Equal(t, labelsResult, outputLabel) - require.Equal(t, inputTotalSize, len(result)) + require.Len(t, result, inputTotalSize) }) } @@ -178,7 +178,7 @@ func TestFanoutErrors(t *testing.T) { } if tc.warning != nil { - require.Greater(t, len(ss.Warnings()), 0, "warnings expected") + require.NotEmpty(t, ss.Warnings(), "warnings expected") w := ss.Warnings() require.Error(t, w.AsErrors()[0]) require.Equal(t, tc.warning.Error(), w.AsStrings("", 0)[0]) @@ -204,7 +204,7 @@ func TestFanoutErrors(t *testing.T) { } if tc.warning != nil { - require.Greater(t, len(ss.Warnings()), 0, "warnings expected") + require.NotEmpty(t, ss.Warnings(), "warnings expected") w := ss.Warnings() require.Error(t, w.AsErrors()[0]) require.Equal(t, tc.warning.Error(), w.AsStrings("", 0)[0]) diff --git a/storage/interface.go b/storage/interface.go index 3e6cd0a533..240c38ddc5 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -43,6 +43,13 @@ var ( ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0") ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled") + + // ErrOutOfOrderCT indicates failed append of CT to the storage + // due to CT being older the then newer sample. + // NOTE(bwplotka): This can be both an instrumentation failure or commonly expected + // behaviour, and we currently don't have a way to determine this. As a result + // it's recommended to ignore this error for now. + ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") ) // SeriesRef is a generic series reference. In prometheus it is either a @@ -240,6 +247,7 @@ type Appender interface { ExemplarAppender HistogramAppender MetadataUpdater + CreatedTimestampAppender } // GetRef is an extra interface on Appenders used by downstream projects @@ -297,6 +305,24 @@ type MetadataUpdater interface { UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) } +// CreatedTimestampAppender provides an interface for appending CT to storage. +type CreatedTimestampAppender interface { + // AppendCTZeroSample adds synthetic zero sample for the given ct timestamp, + // which will be associated with given series, labels and the incoming + // sample's t (timestamp). AppendCTZeroSample returns error if zero sample can't be + // appended, for example when ct is too old, or when it would collide with + // incoming sample (sample has priority). + // + // AppendCTZeroSample has to be called before the corresponding sample Append. + // A series reference number is returned which can be used to modify the + // CT for the given series in the same or later transactions. + // Returned reference numbers are ephemeral and may be rejected in calls + // to AppendCTZeroSample() at any point. + // + // If the reference is 0 it must not be used for caching. + AppendCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64) (SeriesRef, error) +} + // SeriesSet contains a set of series. type SeriesSet interface { Next() bool diff --git a/storage/memoized_iterator_test.go b/storage/memoized_iterator_test.go index 4791113800..d1cd565170 100644 --- a/storage/memoized_iterator_test.go +++ b/storage/memoized_iterator_test.go @@ -68,26 +68,26 @@ func TestMemoizedSeriesIterator(t *testing.T) { fSample{t: 400, f: 12}, }), 2) - require.Equal(t, it.Seek(-123), chunkenc.ValFloat, "seek failed") + require.Equal(t, chunkenc.ValFloat, it.Seek(-123), "seek failed") sampleEq(1, 2, nil) prevSampleEq(0, 0, nil, false) - require.Equal(t, it.Seek(5), chunkenc.ValFloat, "seek failed") + require.Equal(t, chunkenc.ValFloat, it.Seek(5), "seek failed") sampleEq(5, 6, nil) prevSampleEq(4, 5, nil, true) // Seek to a histogram sample with a previous float sample. - require.Equal(t, it.Seek(102), chunkenc.ValFloatHistogram, "seek failed") + require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(102), "seek failed") sampleEq(102, 10, tsdbutil.GenerateTestFloatHistogram(0)) prevSampleEq(101, 10, nil, true) // Attempt to seek backwards (no-op). - require.Equal(t, it.Seek(50), chunkenc.ValFloatHistogram, "seek failed") + require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(50), "seek failed") sampleEq(102, 10, tsdbutil.GenerateTestFloatHistogram(0)) prevSampleEq(101, 10, nil, true) // Seek to a float histogram sample with a previous histogram sample. - require.Equal(t, it.Seek(104), chunkenc.ValFloatHistogram, "seek failed") + require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(104), "seek failed") sampleEq(104, 0, tsdbutil.GenerateTestFloatHistogram(2)) prevSampleEq(103, 0, tsdbutil.GenerateTestFloatHistogram(1), true) @@ -101,7 +101,7 @@ func TestMemoizedSeriesIterator(t *testing.T) { sampleEq(400, 12, nil) prevSampleEq(399, 0, tsdbutil.GenerateTestFloatHistogram(6), true) - require.Equal(t, it.Seek(1024), chunkenc.ValNone, "seek succeeded unexpectedly") + require.Equal(t, chunkenc.ValNone, it.Seek(1024), "seek succeeded unexpectedly") } func BenchmarkMemoizedSeriesIterator(b *testing.B) { diff --git a/storage/merge_test.go b/storage/merge_test.go index a12753955f..fe156a57bf 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -1562,7 +1562,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { } require.Subset(t, tcase.expectedWarnings, res.Warnings()) require.Equal(t, tcase.expectedErrs[0], res.Err()) - require.True(t, errors.Is(res.Err(), tcase.expectedErrs[0]), "expected error doesn't match") + require.ErrorIs(t, res.Err(), tcase.expectedErrs[0], "expected error doesn't match") require.Equal(t, tcase.expectedSelectsSeries, lbls) for _, qr := range q.queriers { @@ -1578,7 +1578,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { t.Run("LabelNames", func(t *testing.T) { res, w, err := q.LabelNames(ctx) require.Subset(t, tcase.expectedWarnings, w) - require.True(t, errors.Is(err, tcase.expectedErrs[1]), "expected error doesn't match") + require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match") require.Equal(t, tcase.expectedLabels, res) if err != nil { @@ -1593,7 +1593,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { t.Run("LabelValues", func(t *testing.T) { res, w, err := q.LabelValues(ctx, "test") require.Subset(t, tcase.expectedWarnings, w) - require.True(t, errors.Is(err, tcase.expectedErrs[2]), "expected error doesn't match") + require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match") require.Equal(t, tcase.expectedLabels, res) if err != nil { @@ -1609,7 +1609,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue") res, w, err := q.LabelValues(ctx, "test2", matcher) require.Subset(t, tcase.expectedWarnings, w) - require.True(t, errors.Is(err, tcase.expectedErrs[3]), "expected error doesn't match") + require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match") require.Equal(t, tcase.expectedLabels, res) if err != nil { diff --git a/storage/remote/azuread/azuread.go b/storage/remote/azuread/azuread.go index 3938862869..20d48d0087 100644 --- a/storage/remote/azuread/azuread.go +++ b/storage/remote/azuread/azuread.go @@ -62,7 +62,7 @@ type OAuthConfig struct { } // AzureADConfig is used to store the config values. -type AzureADConfig struct { +type AzureADConfig struct { //nolint:revive // exported. // ManagedIdentity is the managed identity that is being used to authenticate. ManagedIdentity *ManagedIdentityConfig `yaml:"managed_identity,omitempty"` diff --git a/storage/remote/azuread/azuread_test.go b/storage/remote/azuread/azuread_test.go index 1143b71da7..5eed2c0b19 100644 --- a/storage/remote/azuread/azuread_test.go +++ b/storage/remote/azuread/azuread_test.go @@ -100,7 +100,7 @@ func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() { ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil) tokenProvider, err := newTokenProvider(c.cfg, ad.mockCredential) - ad.Assert().NoError(err) + ad.Require().NoError(err) rt := &azureADRoundTripper{ next: promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { @@ -113,15 +113,15 @@ func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() { cli := &http.Client{Transport: rt} req, err := http.NewRequest(http.MethodPost, "https://example.com", strings.NewReader("Hello, world!")) - ad.Assert().NoError(err) + ad.Require().NoError(err) _, err = cli.Do(req) - ad.Assert().NoError(err) - ad.Assert().NotNil(gotReq) + ad.Require().NoError(err) + ad.NotNil(gotReq) origReq := gotReq - ad.Assert().NotEmpty(origReq.Header.Get("Authorization")) - ad.Assert().Equal("Bearer "+testTokenString, origReq.Header.Get("Authorization")) + ad.NotEmpty(origReq.Header.Get("Authorization")) + ad.Equal("Bearer "+testTokenString, origReq.Header.Get("Authorization")) } } @@ -258,9 +258,9 @@ func (s *TokenProviderTestSuite) TestNewTokenProvider() { if c.err != "" { actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential) - s.Assert().Nil(actualTokenProvider) - s.Assert().NotNil(actualErr) - s.Assert().ErrorContains(actualErr, c.err) + s.Nil(actualTokenProvider) + s.Require().Error(actualErr) + s.Require().ErrorContains(actualErr, c.err) } else { testToken := &azcore.AccessToken{ Token: testTokenString, @@ -272,21 +272,21 @@ func (s *TokenProviderTestSuite) TestNewTokenProvider() { actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential) - s.Assert().NotNil(actualTokenProvider) - s.Assert().Nil(actualErr) - s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background())) + s.NotNil(actualTokenProvider) + s.Require().NoError(actualErr) + s.NotNil(actualTokenProvider.getAccessToken(context.Background())) // Token set to refresh at half of the expiry time. The test tokens are set to expiry in 5s. // Hence, the 4 seconds wait to check if the token is refreshed. time.Sleep(4 * time.Second) - s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background())) + s.NotNil(actualTokenProvider.getAccessToken(context.Background())) s.mockCredential.AssertNumberOfCalls(s.T(), "GetToken", 2*mockGetTokenCallCounter) - mockGetTokenCallCounter += 1 + mockGetTokenCallCounter++ accessToken, err := actualTokenProvider.getAccessToken(context.Background()) - s.Assert().Nil(err) - s.Assert().NotEqual(accessToken, testTokenString) + s.Require().NoError(err) + s.NotEqual(testTokenString, accessToken) } } } diff --git a/storage/remote/chunked_test.go b/storage/remote/chunked_test.go index 49a55779fa..7c3993ca62 100644 --- a/storage/remote/chunked_test.go +++ b/storage/remote/chunked_test.go @@ -46,7 +46,7 @@ func TestChunkedReaderCanReadFromChunkedWriter(t *testing.T) { for _, msg := range msgs { n, err := w.Write(msg) require.NoError(t, err) - require.Equal(t, len(msg), n) + require.Len(t, msg, n) } i := 0 diff --git a/storage/remote/codec.go b/storage/remote/codec.go index 67035cd8ec..ffab821a5f 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -33,7 +33,6 @@ import ( "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -784,7 +783,7 @@ func labelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label } // metricTypeToMetricTypeProto transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum. -func metricTypeToMetricTypeProto(t textparse.MetricType) prompb.MetricMetadata_MetricType { +func metricTypeToMetricTypeProto(t model.MetricType) prompb.MetricMetadata_MetricType { mt := strings.ToUpper(string(t)) v, ok := prompb.MetricMetadata_MetricType_value[mt] if !ok { diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 585bdfd88f..ac8b0f0b54 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -20,11 +20,11 @@ import ( "testing" "github.com/gogo/protobuf/proto" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -57,7 +57,7 @@ var writeRequestFixture = &prompb.WriteRequest{ }, Samples: []prompb.Sample{{Value: 1, Timestamp: 0}}, Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}}, - Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat())}, + Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))}, }, { Labels: []prompb.Label{ @@ -69,7 +69,7 @@ var writeRequestFixture = &prompb.WriteRequest{ }, Samples: []prompb.Sample{{Value: 2, Timestamp: 1}}, Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}}, - Histograms: []prompb.Histogram{HistogramToHistogramProto(2, &testHistogram), FloatHistogramToHistogramProto(3, testHistogram.ToFloat())}, + Histograms: []prompb.Histogram{HistogramToHistogramProto(2, &testHistogram), FloatHistogramToHistogramProto(3, testHistogram.ToFloat(nil))}, }, }, } @@ -488,17 +488,17 @@ func TestMergeLabels(t *testing.T) { func TestMetricTypeToMetricTypeProto(t *testing.T) { tc := []struct { desc string - input textparse.MetricType + input model.MetricType expected prompb.MetricMetadata_MetricType }{ { desc: "with a single-word metric", - input: textparse.MetricTypeCounter, + input: model.MetricTypeCounter, expected: prompb.MetricMetadata_COUNTER, }, { desc: "with a two-word metric", - input: textparse.MetricTypeStateset, + input: model.MetricTypeStateset, expected: prompb.MetricMetadata_STATESET, }, { @@ -755,7 +755,7 @@ func TestStreamResponse(t *testing.T) { maxBytesInFrame, &sync.Pool{}) require.Nil(t, warning) - require.Nil(t, err) + require.NoError(t, err) expectData := []*prompb.ChunkedSeries{{ Labels: lbs1, Chunks: []prompb.Chunk{chunk, chunk}, diff --git a/storage/remote/intern_test.go b/storage/remote/intern_test.go index 84a74f32cc..917c42e912 100644 --- a/storage/remote/intern_test.go +++ b/storage/remote/intern_test.go @@ -32,7 +32,7 @@ func TestIntern(t *testing.T) { interner.intern(testString) interned, ok := interner.pool[testString] - require.Equal(t, true, ok) + require.True(t, ok) require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) } @@ -43,13 +43,13 @@ func TestIntern_MultiRef(t *testing.T) { interner.intern(testString) interned, ok := interner.pool[testString] - require.Equal(t, true, ok) + require.True(t, ok) require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) interner.intern(testString) interned, ok = interner.pool[testString] - require.Equal(t, true, ok) + require.True(t, ok) require.Equal(t, int64(2), interned.refs.Load(), fmt.Sprintf("expected refs to be 2 but it was %d", interned.refs.Load())) } @@ -60,12 +60,12 @@ func TestIntern_DeleteRef(t *testing.T) { interner.intern(testString) interned, ok := interner.pool[testString] - require.Equal(t, true, ok) + require.True(t, ok) require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) interner.release(testString) _, ok = interner.pool[testString] - require.Equal(t, false, ok) + require.False(t, ok) } func TestIntern_MultiRef_Concurrent(t *testing.T) { @@ -74,7 +74,7 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) { interner.intern(testString) interned, ok := interner.pool[testString] - require.Equal(t, true, ok) + require.True(t, ok) require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) go interner.release(testString) @@ -86,6 +86,6 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) { interner.mtx.RLock() interned, ok = interner.pool[testString] interner.mtx.RUnlock() - require.Equal(t, true, ok) + require.True(t, ok) require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) } diff --git a/storage/remote/metadata_watcher.go b/storage/remote/metadata_watcher.go index 21de565ed9..abfea3c7b0 100644 --- a/storage/remote/metadata_watcher.go +++ b/storage/remote/metadata_watcher.go @@ -136,7 +136,7 @@ func (mw *MetadataWatcher) collect() { metadata := []scrape.MetricMetadata{} for _, tset := range mw.manager.TargetsActive() { for _, target := range tset { - for _, entry := range target.MetadataList() { + for _, entry := range target.ListMetadata() { if _, ok := metadataSet[entry]; !ok { metadata = append(metadata, entry) metadataSet[entry] = struct{}{} diff --git a/storage/remote/metadata_watcher_test.go b/storage/remote/metadata_watcher_test.go index bc841a0147..0cd6027a83 100644 --- a/storage/remote/metadata_watcher_test.go +++ b/storage/remote/metadata_watcher_test.go @@ -22,7 +22,6 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/scrape" ) @@ -94,7 +93,7 @@ func TestWatchScrapeManager_NotReady(t *testing.T) { } mw := NewMetadataWatcher(nil, smm, "", wt, interval, deadline) - require.Equal(t, false, mw.ready()) + require.False(t, mw.ready()) mw.collect() @@ -108,13 +107,13 @@ func TestWatchScrapeManager_ReadyForCollection(t *testing.T) { Metadata: []scrape.MetricMetadata{ { Metric: "prometheus_tsdb_head_chunks_created_total", - Type: textparse.MetricTypeCounter, + Type: model.MetricTypeCounter, Help: "Total number", Unit: "", }, { Metric: "prometheus_remote_storage_retried_samples_total", - Type: textparse.MetricTypeCounter, + Type: model.MetricTypeCounter, Help: "Total number", Unit: "", }, @@ -124,7 +123,7 @@ func TestWatchScrapeManager_ReadyForCollection(t *testing.T) { Metadata: []scrape.MetricMetadata{ { Metric: "prometheus_tsdb_head_chunks_created_total", - Type: textparse.MetricTypeCounter, + Type: model.MetricTypeCounter, Help: "Total number", Unit: "", }, diff --git a/storage/remote/otlptranslator/prometheus/normalize_label.go b/storage/remote/otlptranslator/prometheus/normalize_label.go index af0960e862..b44ba869fe 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label.go @@ -25,7 +25,6 @@ var dropSanitizationGate = featuregate.GlobalRegistry().MustRegister( // // Exception is made for double-underscores which are allowed func NormalizeLabel(label string) string { - // Trivial case if len(label) == 0 { return label diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index a141df3480..17a904fcdf 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -38,7 +38,6 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/scrape" @@ -180,7 +179,7 @@ func TestMetadataDelivery(t *testing.T) { for i := 0; i < numMetadata; i++ { metadata = append(metadata, scrape.MetricMetadata{ Metric: "prometheus_remote_storage_sent_metadata_bytes_total_" + strconv.Itoa(i), - Type: textparse.MetricTypeCounter, + Type: model.MetricTypeCounter, Help: "a nice help text", Unit: "", }) @@ -188,7 +187,7 @@ func TestMetadataDelivery(t *testing.T) { m.AppendMetadata(context.Background(), metadata) - require.Equal(t, numMetadata, len(c.receivedMetadata)) + require.Len(t, c.receivedMetadata, numMetadata) // One more write than the rounded qoutient should be performed in order to get samples that didn't // fit into MaxSamplesPerSend. require.Equal(t, numMetadata/mcfg.MaxSamplesPerSend+1, c.writesReceived) @@ -318,9 +317,9 @@ func TestSeriesReset(t *testing.T) { } m.StoreSeries(series, i) } - require.Equal(t, numSegments*numSeries, len(m.seriesLabels)) + require.Len(t, m.seriesLabels, numSegments*numSeries) m.SeriesReset(2) - require.Equal(t, numSegments*numSeries/2, len(m.seriesLabels)) + require.Len(t, m.seriesLabels, numSegments*numSeries/2) } func TestReshard(t *testing.T) { @@ -619,7 +618,7 @@ func createHistograms(numSamples, numSeries int, floatHistogram bool) ([]record. fh := record.RefFloatHistogramSample{ Ref: chunks.HeadSeriesRef(i), T: int64(j), - FH: hist.ToFloat(), + FH: hist.ToFloat(nil), } floatHistograms = append(floatHistograms, fh) } else { @@ -1288,7 +1287,7 @@ func TestQueueManagerMetrics(t *testing.T) { // Make sure metrics pass linting. problems, err := client_testutil.GatherAndLint(reg) require.NoError(t, err) - require.Equal(t, 0, len(problems), "Metric linting problems detected: %v", problems) + require.Empty(t, problems, "Metric linting problems detected: %v", problems) // Make sure all metrics were unregistered. A failure here means you need // unregister a metric in `queueManagerMetrics.unregister()`. diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index fdb9f04dd2..e83a0cb21a 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -97,7 +97,7 @@ func TestSampledReadEndpoint(t *testing.T) { err = proto.Unmarshal(uncompressed, &resp) require.NoError(t, err) - require.Equal(t, 2, len(resp.Results), "Expected 2 results.") + require.Len(t, resp.Results, 2, "Expected 2 results.") require.Equal(t, &prompb.QueryResult{ Timeseries: []*prompb.TimeSeries{ @@ -191,7 +191,7 @@ func BenchmarkStreamReadEndpoint(b *testing.B) { results = append(results, res) } - require.Equal(b, 6, len(results), "Expected 6 results.") + require.Len(b, results, 6, "Expected 6 results.") } } @@ -291,7 +291,7 @@ func TestStreamReadEndpoint(t *testing.T) { results = append(results, res) } - require.Equal(t, 6, len(results), "Expected 6 results.") + require.Len(t, results, 6, "Expected 6 results.") require.Equal(t, []*prompb.ChunkedReadResponse{ { diff --git a/storage/remote/read_test.go b/storage/remote/read_test.go index 54d4825f6a..931bacf050 100644 --- a/storage/remote/read_test.go +++ b/storage/remote/read_test.go @@ -186,7 +186,7 @@ func TestSeriesSetFilter(t *testing.T) { filtered := newSeriesSetFilter(FromQueryResult(true, tc.in), tc.toRemove) act, ws, err := ToQueryResult(filtered, 1e6) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, tc.expected, act) } } diff --git a/storage/remote/storage_test.go b/storage/remote/storage_test.go index 040a23a5a6..a62cd2da39 100644 --- a/storage/remote/storage_test.go +++ b/storage/remote/storage_test.go @@ -44,10 +44,10 @@ func TestStorageLifecycle(t *testing.T) { require.NoError(t, s.ApplyConfig(conf)) // make sure remote write has a queue. - require.Equal(t, 1, len(s.rws.queues)) + require.Len(t, s.rws.queues, 1) // make sure remote write has a queue. - require.Equal(t, 1, len(s.queryables)) + require.Len(t, s.queryables, 1) err := s.Close() require.NoError(t, err) @@ -62,13 +62,13 @@ func TestUpdateRemoteReadConfigs(t *testing.T) { GlobalConfig: config.GlobalConfig{}, } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 0, len(s.queryables)) + require.Empty(t, s.queryables) conf.RemoteReadConfigs = []*config.RemoteReadConfig{ baseRemoteReadConfig("http://test-storage.com"), } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queryables)) + require.Len(t, s.queryables, 1) err := s.Close() require.NoError(t, err) @@ -85,14 +85,14 @@ func TestFilterExternalLabels(t *testing.T) { }, } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 0, len(s.queryables)) + require.Empty(t, s.queryables) conf.RemoteReadConfigs = []*config.RemoteReadConfig{ baseRemoteReadConfig("http://test-storage.com"), } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queryables)) + require.Len(t, s.queryables, 1) require.Equal(t, 1, s.queryables[0].(*sampleAndChunkQueryableClient).externalLabels.Len()) err := s.Close() @@ -110,7 +110,7 @@ func TestIgnoreExternalLabels(t *testing.T) { }, } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 0, len(s.queryables)) + require.Empty(t, s.queryables) conf.RemoteReadConfigs = []*config.RemoteReadConfig{ baseRemoteReadConfig("http://test-storage.com"), @@ -119,7 +119,7 @@ func TestIgnoreExternalLabels(t *testing.T) { conf.RemoteReadConfigs[0].FilterExternalLabels = false require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queryables)) + require.Len(t, s.queryables, 1) require.Equal(t, 0, s.queryables[0].(*sampleAndChunkQueryableClient).externalLabels.Len()) err := s.Close() diff --git a/storage/remote/write.go b/storage/remote/write.go index 237f8caa91..66455cb4dd 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -303,6 +303,11 @@ func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, return 0, nil } +func (t *timestampTracker) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { + // AppendCTZeroSample is no-op for remote-write for now. + return 0, nil +} + // Commit implements storage.Appender. func (t *timestampTracker) Commit() error { t.writeStorage.samplesIn.incr(t.samples + t.exemplars + t.histograms) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 9891c6aae7..971f7b457e 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -68,7 +68,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { err = h.write(r.Context(), req) switch { case err == nil: - case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample): // Indicated an out of order sample is a bad request to prevent retries. http.Error(w, err.Error(), http.StatusBadRequest) return diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 27d0e9fabd..fd5b34ecd2 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -134,7 +134,7 @@ func TestOutOfOrderExemplar(t *testing.T) { func TestOutOfOrderHistogram(t *testing.T) { buf, _, err := buildWriteRequest([]prompb.TimeSeries{{ Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}}, - Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat())}, + Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))}, }}, nil, nil, nil) require.NoError(t, err) @@ -228,7 +228,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) { recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) require.Equal(b, http.StatusNoContent, recorder.Code) - require.Equal(b, db.Head().NumSeries(), uint64(1000)) + require.Equal(b, uint64(1000), db.Head().NumSeries()) var bufRequests [][]byte for i := 0; i < 100; i++ { @@ -245,7 +245,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) { recorder = httptest.NewRecorder() handler.ServeHTTP(recorder, req) require.Equal(b, http.StatusNoContent, recorder.Code) - require.Equal(b, db.Head().NumSeries(), uint64(1000)) + require.Equal(b, uint64(1000), db.Head().NumSeries()) } } @@ -339,3 +339,8 @@ func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ // UpdateMetadata is no-op for remote write (where mockAppendable is being used to test) for now. return 0, nil } + +func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { + // AppendCTZeroSample is no-op for remote-write for now. + return 0, nil +} diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go index 824e319c2f..4a9a3bafc4 100644 --- a/storage/remote/write_test.go +++ b/storage/remote/write_test.go @@ -212,7 +212,7 @@ func TestWriteStorageLifecycle(t *testing.T) { }, } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queues)) + require.Len(t, s.queues, 1) err := s.Close() require.NoError(t, err) @@ -233,14 +233,14 @@ func TestUpdateExternalLabels(t *testing.T) { hash, err := toHash(conf.RemoteWriteConfigs[0]) require.NoError(t, err) require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queues)) - require.Equal(t, 0, len(s.queues[hash].externalLabels)) + require.Len(t, s.queues, 1) + require.Empty(t, s.queues[hash].externalLabels) conf.GlobalConfig.ExternalLabels = externalLabels hash, err = toHash(conf.RemoteWriteConfigs[0]) require.NoError(t, err) require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queues)) + require.Len(t, s.queues, 1) require.Equal(t, []labels.Label{{Name: "external", Value: "true"}}, s.queues[hash].externalLabels) err = s.Close() @@ -262,10 +262,10 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) { require.NoError(t, err) require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queues)) + require.Len(t, s.queues, 1) require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queues)) + require.Len(t, s.queues, 1) _, hashExists := s.queues[hash] require.True(t, hashExists, "Queue pointer should have remained the same") @@ -312,7 +312,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) { } } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 3, len(s.queues)) + require.Len(t, s.queues, 3) hashes := make([]string, len(conf.RemoteWriteConfigs)) queues := make([]*QueueManager, len(conf.RemoteWriteConfigs)) @@ -334,7 +334,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) { RemoteWriteConfigs: []*config.RemoteWriteConfig{c0, c1, c2}, } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 3, len(s.queues)) + require.Len(t, s.queues, 3) _, hashExists := s.queues[hashes[0]] require.False(t, hashExists, "The queue for the first remote write configuration should have been restarted because the relabel configuration has changed.") @@ -350,7 +350,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) { c1.HTTPClientConfig.BearerToken = "bar" err := s.ApplyConfig(conf) require.NoError(t, err) - require.Equal(t, 3, len(s.queues)) + require.Len(t, s.queues, 3) _, hashExists = s.queues[hashes[0]] require.True(t, hashExists, "Pointer of unchanged queue should have remained the same") @@ -367,7 +367,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) { RemoteWriteConfigs: []*config.RemoteWriteConfig{c1, c2}, } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 2, len(s.queues)) + require.Len(t, s.queues, 2) _, hashExists = s.queues[hashes[0]] require.False(t, hashExists, "If a config is removed, the queue should be stopped and recreated.") @@ -399,9 +399,9 @@ func TestOTLPWriteHandler(t *testing.T) { resp := recorder.Result() require.Equal(t, http.StatusOK, resp.StatusCode) - require.Equal(t, 12, len(appendable.samples)) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count) - require.Equal(t, 1, len(appendable.histograms)) // 1 (exponential histogram) - require.Equal(t, 1, len(appendable.exemplars)) // 1 (exemplar) + require.Len(t, appendable.samples, 12) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count) + require.Len(t, appendable.histograms, 1) // 1 (exponential histogram) + require.Len(t, appendable.exemplars, 1) // 1 (exemplar) } func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest { diff --git a/storage/series_test.go b/storage/series_test.go index 163a47ec9c..130e19484c 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -151,7 +151,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) { j := 0 for iter.Next() == chunkenc.ValFloat { ts, v := iter.At() - require.EqualValues(t, series[i].samples[j], fSample{t: ts, f: v}) + require.EqualValues(t, fSample{t: ts, f: v}, series[i].samples[j]) j++ } } diff --git a/tracing/tracing.go b/tracing/tracing.go index 6ff7dd4c0e..d17540a919 100644 --- a/tracing/tracing.go +++ b/tracing/tracing.go @@ -32,6 +32,7 @@ import ( tracesdk "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" "google.golang.org/grpc/credentials" "github.com/prometheus/prometheus/config" @@ -87,7 +88,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { if cfg.TracingConfig.Endpoint == "" { m.config = cfg.TracingConfig m.shutdownFunc = nil - otel.SetTracerProvider(trace.NewNoopTracerProvider()) + otel.SetTracerProvider(noop.NewTracerProvider()) level.Info(m.logger).Log("msg", "Tracing provider uninstalled.") return nil } diff --git a/tracing/tracing_test.go b/tracing/tracing_test.go index 7f0b5d62e6..b7996c6104 100644 --- a/tracing/tracing_test.go +++ b/tracing/tracing_test.go @@ -20,7 +20,7 @@ import ( config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" "github.com/prometheus/prometheus/config" ) @@ -105,7 +105,7 @@ func TestUninstallingTracerProvider(t *testing.T) { } require.NoError(t, m.ApplyConfig(&cfg)) - require.NotEqual(t, trace.NewNoopTracerProvider(), otel.GetTracerProvider()) + require.NotEqual(t, noop.NewTracerProvider(), otel.GetTracerProvider()) // Uninstall by passing empty config. cfg2 := config.Config{ @@ -114,7 +114,7 @@ func TestUninstallingTracerProvider(t *testing.T) { require.NoError(t, m.ApplyConfig(&cfg2)) // Make sure we get a no-op tracer provider after uninstallation. - require.Equal(t, trace.NewNoopTracerProvider(), otel.GetTracerProvider()) + require.Equal(t, noop.NewTracerProvider(), otel.GetTracerProvider()) } func TestTracerProviderShutdown(t *testing.T) { @@ -131,5 +131,5 @@ func TestTracerProviderShutdown(t *testing.T) { // Check if we closed the done channel. _, ok := <-m.done - require.Equal(t, ok, false) + require.False(t, ok) } diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 66861a487c..d399897133 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -241,6 +241,8 @@ type DB struct { donec chan struct{} stopc chan struct{} + writeNotified wlog.WriteNotified + metrics *dbMetrics } @@ -311,6 +313,12 @@ func Open(l log.Logger, reg prometheus.Registerer, rs *remote.Storage, dir strin return db, nil } +// SetWriteNotified allows to set an instance to notify when a write happens. +// It must be used during initialization. It is not safe to use it during execution. +func (db *DB) SetWriteNotified(wn wlog.WriteNotified) { + db.writeNotified = wn +} + func validateOptions(opts *Options) *Options { if opts == nil { opts = DefaultOptions() @@ -351,7 +359,7 @@ func (db *DB) replayWAL() error { start := time.Now() dir, startFrom, err := wlog.LastCheckpoint(db.wal.Dir()) - if err != nil && err != record.ErrNotFound { + if err != nil && !errors.Is(err, record.ErrNotFound) { return fmt.Errorf("find last checkpoint: %w", err) } @@ -954,6 +962,11 @@ func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Met return 0, nil } +func (a *appender) AppendCTZeroSample(storage.SeriesRef, labels.Labels, int64, int64) (storage.SeriesRef, error) { + // TODO(bwplotka): Wire metadata in the Agent's appender. + return 0, nil +} + // Commit submits the collected samples and purges the batch. func (a *appender) Commit() error { if err := a.log(); err != nil { @@ -962,6 +975,10 @@ func (a *appender) Commit() error { a.clearData() a.appenderPool.Put(a) + + if a.writeNotified != nil { + a.writeNotified.Notify() + } return nil } diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index 1e0976c3f1..32b7e4394e 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -84,7 +84,7 @@ func TestDB_InvalidSeries(t *testing.T) { }) } -func createTestAgentDB(t *testing.T, reg prometheus.Registerer, opts *Options) *DB { +func createTestAgentDB(t testing.TB, reg prometheus.Registerer, opts *Options) *DB { t.Helper() dbDir := t.TempDir() @@ -878,3 +878,21 @@ func TestDBAllowOOOSamples(t *testing.T) { require.Equal(t, float64(80), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms") require.NoError(t, db.Close()) } + +func BenchmarkCreateSeries(b *testing.B) { + s := createTestAgentDB(b, nil, DefaultOptions()) + defer s.Close() + + app := s.Appender(context.Background()).(*appender) + lbls := make([]labels.Labels, b.N) + + for i, l := range labelsForTest("benchmark", b.N) { + lbls[i] = labels.New(l...) + } + + b.ResetTimer() + + for _, l := range lbls { + app.getOrCreate(l) + } +} diff --git a/tsdb/agent/series.go b/tsdb/agent/series.go index d38518f71e..76e7342171 100644 --- a/tsdb/agent/series.go +++ b/tsdb/agent/series.go @@ -45,14 +45,23 @@ func (m *memSeries) updateTimestamp(newTs int64) bool { return false } -// seriesHashmap is a simple hashmap for memSeries by their label set. -// It is built on top of a regular hashmap and holds a slice of series to -// resolve hash collisions. Its methods require the hash to be submitted +// seriesHashmap lets agent find a memSeries by its label set, via a 64-bit hash. +// There is one map for the common case where the hash value is unique, and a +// second map for the case that two series have the same hash value. +// Each series is in only one of the maps. Its methods require the hash to be submitted // with the label set to avoid re-computing hash throughout the code. -type seriesHashmap map[uint64][]*memSeries +type seriesHashmap struct { + unique map[uint64]*memSeries + conflicts map[uint64][]*memSeries +} -func (m seriesHashmap) Get(hash uint64, lset labels.Labels) *memSeries { - for _, s := range m[hash] { +func (m *seriesHashmap) Get(hash uint64, lset labels.Labels) *memSeries { + if s, found := m.unique[hash]; found { + if labels.Equal(s.lset, lset) { + return s + } + } + for _, s := range m.conflicts[hash] { if labels.Equal(s.lset, lset) { return s } @@ -60,28 +69,49 @@ func (m seriesHashmap) Get(hash uint64, lset labels.Labels) *memSeries { return nil } -func (m seriesHashmap) Set(hash uint64, s *memSeries) { - seriesSet := m[hash] +func (m *seriesHashmap) Set(hash uint64, s *memSeries) { + if existing, found := m.unique[hash]; !found || labels.Equal(existing.lset, s.lset) { + m.unique[hash] = s + return + } + if m.conflicts == nil { + m.conflicts = make(map[uint64][]*memSeries) + } + seriesSet := m.conflicts[hash] for i, prev := range seriesSet { if labels.Equal(prev.lset, s.lset) { seriesSet[i] = s return } } - m[hash] = append(seriesSet, s) + m.conflicts[hash] = append(seriesSet, s) } -func (m seriesHashmap) Delete(hash uint64, ref chunks.HeadSeriesRef) { +func (m *seriesHashmap) Delete(hash uint64, ref chunks.HeadSeriesRef) { var rem []*memSeries - for _, s := range m[hash] { - if s.ref != ref { - rem = append(rem, s) + unique, found := m.unique[hash] + switch { + case !found: // Supplied hash is not stored. + return + case unique.ref == ref: + conflicts := m.conflicts[hash] + if len(conflicts) == 0 { // Exactly one series with this hash was stored + delete(m.unique, hash) + return + } + m.unique[hash] = conflicts[0] // First remaining series goes in 'unique'. + rem = conflicts[1:] // Keep the rest. + default: // The series to delete is somewhere in 'conflicts'. Keep all the ones that don't match. + for _, s := range m.conflicts[hash] { + if s.ref != ref { + rem = append(rem, s) + } } } if len(rem) == 0 { - delete(m, hash) + delete(m.conflicts, hash) } else { - m[hash] = rem + m.conflicts[hash] = rem } } @@ -117,7 +147,10 @@ func newStripeSeries(stripeSize int) *stripeSeries { s.series[i] = map[chunks.HeadSeriesRef]*memSeries{} } for i := range s.hashes { - s.hashes[i] = seriesHashmap{} + s.hashes[i] = seriesHashmap{ + unique: map[uint64]*memSeries{}, + conflicts: nil, // Initialized on demand in set(). + } } for i := range s.exemplars { s.exemplars[i] = map[chunks.HeadSeriesRef]*exemplar.Exemplar{} @@ -136,40 +169,49 @@ func (s *stripeSeries) GC(mint int64) map[chunks.HeadSeriesRef]struct{} { defer s.gcMut.Unlock() deleted := map[chunks.HeadSeriesRef]struct{}{} + + // For one series, truncate old chunks and check if any chunks left. If not, mark as deleted and collect the ID. + check := func(hashLock int, hash uint64, series *memSeries) { + series.Lock() + + // Any series that has received a write since mint is still alive. + if series.lastTs >= mint { + series.Unlock() + return + } + + // The series is stale. We need to obtain a second lock for the + // ref if it's different than the hash lock. + refLock := int(series.ref) & (s.size - 1) + if hashLock != refLock { + s.locks[refLock].Lock() + } + + deleted[series.ref] = struct{}{} + delete(s.series[refLock], series.ref) + s.hashes[hashLock].Delete(hash, series.ref) + + // Since the series is gone, we'll also delete + // the latest stored exemplar. + delete(s.exemplars[refLock], series.ref) + + if hashLock != refLock { + s.locks[refLock].Unlock() + } + series.Unlock() + } + for hashLock := 0; hashLock < s.size; hashLock++ { s.locks[hashLock].Lock() - for hash, all := range s.hashes[hashLock] { + for hash, all := range s.hashes[hashLock].conflicts { for _, series := range all { - series.Lock() - - // Any series that has received a write since mint is still alive. - if series.lastTs >= mint { - series.Unlock() - continue - } - - // The series is stale. We need to obtain a second lock for the - // ref if it's different than the hash lock. - refLock := int(series.ref) & (s.size - 1) - if hashLock != refLock { - s.locks[refLock].Lock() - } - - deleted[series.ref] = struct{}{} - delete(s.series[refLock], series.ref) - s.hashes[hashLock].Delete(hash, series.ref) - - // Since the series is gone, we'll also delete - // the latest stored exemplar. - delete(s.exemplars[refLock], series.ref) - - if hashLock != refLock { - s.locks[refLock].Unlock() - } - series.Unlock() + check(hashLock, hash, series) } } + for hash, series := range s.hashes[hashLock].unique { + check(hashLock, hash, series) + } s.locks[hashLock].Unlock() } diff --git a/tsdb/agent/series_test.go b/tsdb/agent/series_test.go index 480f6c0263..ae327d8582 100644 --- a/tsdb/agent/series_test.go +++ b/tsdb/agent/series_test.go @@ -74,3 +74,63 @@ func TestNoDeadlock(t *testing.T) { require.FailNow(t, "deadlock detected") } } + +func labelsWithHashCollision() (labels.Labels, labels.Labels) { + // These two series have the same XXHash; thanks to https://github.com/pstibrany/labels_hash_collisions + ls1 := labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "l6CQ5y") + ls2 := labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "v7uDlF") + + if ls1.Hash() != ls2.Hash() { + // These ones are the same when using -tags stringlabels + ls1 = labels.FromStrings("__name__", "metric", "lbl", "HFnEaGl") + ls2 = labels.FromStrings("__name__", "metric", "lbl", "RqcXatm") + } + + if ls1.Hash() != ls2.Hash() { + panic("This code needs to be updated: find new labels with colliding hash values.") + } + + return ls1, ls2 +} + +// stripeSeriesWithCollidingSeries returns a stripeSeries with two memSeries having the same, colliding, hash. +func stripeSeriesWithCollidingSeries(_ *testing.T) (*stripeSeries, *memSeries, *memSeries) { + lbls1, lbls2 := labelsWithHashCollision() + ms1 := memSeries{ + lset: lbls1, + } + ms2 := memSeries{ + lset: lbls2, + } + hash := lbls1.Hash() + s := newStripeSeries(1) + + s.Set(hash, &ms1) + s.Set(hash, &ms2) + + return s, &ms1, &ms2 +} + +func TestStripeSeries_Get(t *testing.T) { + s, ms1, ms2 := stripeSeriesWithCollidingSeries(t) + hash := ms1.lset.Hash() + + // Verify that we can get both of the series despite the hash collision + got := s.GetByHash(hash, ms1.lset) + require.Same(t, ms1, got) + got = s.GetByHash(hash, ms2.lset) + require.Same(t, ms2, got) +} + +func TestStripeSeries_gc(t *testing.T) { + s, ms1, ms2 := stripeSeriesWithCollidingSeries(t) + hash := ms1.lset.Hash() + + s.GC(1) + + // Verify that we can get neither ms1 nor ms2 after gc-ing corresponding series + got := s.GetByHash(hash, ms1.lset) + require.Nil(t, got) + got = s.GetByHash(hash, ms2.lset) + require.Nil(t, got) +} diff --git a/tsdb/block.go b/tsdb/block.go index 9b0bf0bf1e..9ccef35a45 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -17,6 +17,7 @@ package tsdb import ( "context" "encoding/json" + "errors" "fmt" "io" "os" @@ -27,7 +28,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" @@ -501,14 +501,19 @@ func (r blockIndexReader) SortedLabelValues(ctx context.Context, name string, ma slices.Sort(st) } } - - return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + if err != nil { + return st, fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err) + } + return st, nil } func (r blockIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { if len(matchers) == 0 { st, err := r.ir.LabelValues(ctx, name) - return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + if err != nil { + return st, fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err) + } + return st, nil } return labelValuesWithMatchers(ctx, r.ir, name, matchers...) @@ -525,7 +530,7 @@ func (r blockIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Ma func (r blockIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) { p, err := r.ir.Postings(ctx, name, values...) if err != nil { - return p, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + return p, fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err) } return p, nil } @@ -544,7 +549,7 @@ func (r blockIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou func (r blockIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { if err := r.ir.Series(ref, builder, chks); err != nil { - return errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + return fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err) } return nil } @@ -596,7 +601,7 @@ func (pb *Block) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Mat p, err := pb.indexr.PostingsForMatchers(ctx, false, ms...) if err != nil { - return errors.Wrap(err, "select series") + return fmt.Errorf("select series: %w", err) } ir := pb.indexr @@ -684,12 +689,12 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, er func (pb *Block) Snapshot(dir string) error { blockDir := filepath.Join(dir, pb.meta.ULID.String()) if err := os.MkdirAll(blockDir, 0o777); err != nil { - return errors.Wrap(err, "create snapshot block dir") + return fmt.Errorf("create snapshot block dir: %w", err) } chunksDir := chunkDir(blockDir) if err := os.MkdirAll(chunksDir, 0o777); err != nil { - return errors.Wrap(err, "create snapshot chunk dir") + return fmt.Errorf("create snapshot chunk dir: %w", err) } // Hardlink meta, index and tombstones @@ -699,7 +704,7 @@ func (pb *Block) Snapshot(dir string) error { tombstones.TombstonesFilename, } { if err := os.Link(filepath.Join(pb.dir, fname), filepath.Join(blockDir, fname)); err != nil { - return errors.Wrapf(err, "create snapshot %s", fname) + return fmt.Errorf("create snapshot %s: %w", fname, err) } } @@ -707,13 +712,13 @@ func (pb *Block) Snapshot(dir string) error { curChunkDir := chunkDir(pb.dir) files, err := os.ReadDir(curChunkDir) if err != nil { - return errors.Wrap(err, "ReadDir the current chunk dir") + return fmt.Errorf("ReadDir the current chunk dir: %w", err) } for _, f := range files { err := os.Link(filepath.Join(curChunkDir, f.Name()), filepath.Join(chunksDir, f.Name())) if err != nil { - return errors.Wrap(err, "hardlink a chunk") + return fmt.Errorf("hardlink a chunk: %w", err) } } diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 26123f0a9c..7c23f76d22 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -59,14 +59,14 @@ func TestSetCompactionFailed(t *testing.T) { blockDir := createBlock(t, tmpdir, genSeries(1, 1, 0, 1)) b, err := OpenBlock(nil, blockDir, nil) require.NoError(t, err) - require.Equal(t, false, b.meta.Compaction.Failed) + require.False(t, b.meta.Compaction.Failed) require.NoError(t, b.setCompactionFailed()) - require.Equal(t, true, b.meta.Compaction.Failed) + require.True(t, b.meta.Compaction.Failed) require.NoError(t, b.Close()) b, err = OpenBlock(nil, blockDir, nil) require.NoError(t, err) - require.Equal(t, true, b.meta.Compaction.Failed) + require.True(t, b.meta.Compaction.Failed) require.NoError(t, b.Close()) } @@ -166,7 +166,7 @@ func TestCorruptedChunk(t *testing.T) { require.NoError(t, err) n, err := f.Write([]byte("x")) require.NoError(t, err) - require.Equal(t, n, 1) + require.Equal(t, 1, n) }, iterErr: errors.New("cannot populate chunk 8 from block 00000000000000000000000000: checksum mismatch expected:cfc0526c, actual:34815eae"), }, @@ -178,7 +178,7 @@ func TestCorruptedChunk(t *testing.T) { blockDir := createBlock(t, tmpdir, []storage.Series{series}) files, err := sequenceFiles(chunkDir(blockDir)) require.NoError(t, err) - require.Greater(t, len(files), 0, "No chunk created.") + require.NotEmpty(t, files, "No chunk created.") f, err := os.OpenFile(files[0], os.O_RDWR, 0o666) require.NoError(t, err) @@ -224,7 +224,7 @@ func TestLabelValuesWithMatchers(t *testing.T) { blockDir := createBlock(t, tmpdir, seriesEntries) files, err := sequenceFiles(chunkDir(blockDir)) require.NoError(t, err) - require.Greater(t, len(files), 0, "No chunk created.") + require.NotEmpty(t, files, "No chunk created.") // Check open err. block, err := OpenBlock(nil, blockDir, nil) @@ -355,16 +355,14 @@ func TestReadIndexFormatV1(t *testing.T) { q, err := NewBlockQuerier(block, 0, 1000) require.NoError(t, err) - require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")), - map[string][]chunks.Sample{`{foo="bar"}`: {sample{t: 1, f: 2}}}) + require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: {sample{t: 1, f: 2}}}, query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))) q, err = NewBlockQuerier(block, 0, 1000) require.NoError(t, err) - require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$")), - map[string][]chunks.Sample{ - `{foo="bar"}`: {sample{t: 1, f: 2}}, - `{foo="baz"}`: {sample{t: 3, f: 4}}, - }) + require.Equal(t, map[string][]chunks.Sample{ + `{foo="bar"}`: {sample{t: 1, f: 2}}, + `{foo="baz"}`: {sample{t: 3, f: 4}}, + }, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$"))) } func BenchmarkLabelValuesWithMatchers(b *testing.B) { @@ -386,7 +384,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) { blockDir := createBlock(b, tmpdir, seriesEntries) files, err := sequenceFiles(chunkDir(blockDir)) require.NoError(b, err) - require.Greater(b, len(files), 0, "No chunk created.") + require.NotEmpty(b, files, "No chunk created.") // Check open err. block, err := OpenBlock(nil, blockDir, nil) @@ -405,7 +403,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) { for benchIdx := 0; benchIdx < b.N; benchIdx++ { actualValues, err := indexReader.LabelValues(ctx, "b_tens", matchers...) require.NoError(b, err) - require.Equal(b, 9, len(actualValues)) + require.Len(b, actualValues, 9) } } @@ -439,7 +437,7 @@ func TestLabelNamesWithMatchers(t *testing.T) { blockDir := createBlock(t, tmpdir, seriesEntries) files, err := sequenceFiles(chunkDir(blockDir)) require.NoError(t, err) - require.Greater(t, len(files), 0, "No chunk created.") + require.NotEmpty(t, files, "No chunk created.") // Check open err. block, err := OpenBlock(nil, blockDir, nil) @@ -661,7 +659,7 @@ func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64, flo h.CounterResetHint = histogram.NotCounterReset } if floatHistogram { - return sample{t: ts, fh: h.ToFloat()} + return sample{t: ts, fh: h.ToFloat(nil)} } return sample{t: ts, h: h} }) @@ -697,7 +695,7 @@ func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step in h.CounterResetHint = histogram.NotCounterReset } if floatHistogram { - s = sample{t: ts, fh: h.ToFloat()} + s = sample{t: ts, fh: h.ToFloat(nil)} } else { s = sample{t: ts, h: h} } diff --git a/tsdb/blockwriter.go b/tsdb/blockwriter.go index adfb64fc11..872c765302 100644 --- a/tsdb/blockwriter.go +++ b/tsdb/blockwriter.go @@ -15,13 +15,14 @@ package tsdb import ( "context" + "errors" + "fmt" "math" "os" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/storage" @@ -65,7 +66,7 @@ func NewBlockWriter(logger log.Logger, dir string, blockSize int64) (*BlockWrite func (w *BlockWriter) initHead() error { chunkDir, err := os.MkdirTemp(os.TempDir(), "head") if err != nil { - return errors.Wrap(err, "create temp dir") + return fmt.Errorf("create temp dir: %w", err) } w.chunkDir = chunkDir opts := DefaultHeadOptions() @@ -74,7 +75,7 @@ func (w *BlockWriter) initHead() error { opts.EnableNativeHistograms.Store(true) h, err := NewHead(nil, w.logger, nil, nil, opts, NewHeadStats()) if err != nil { - return errors.Wrap(err, "tsdb.NewHead") + return fmt.Errorf("tsdb.NewHead: %w", err) } w.head = h @@ -102,11 +103,11 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { []int64{w.blockSize}, chunkenc.NewPool(), nil, true) if err != nil { - return ulid.ULID{}, errors.Wrap(err, "create leveled compactor") + return ulid.ULID{}, fmt.Errorf("create leveled compactor: %w", err) } id, err := compactor.Write(w.destinationDir, w.head, mint, maxt, nil) if err != nil { - return ulid.ULID{}, errors.Wrap(err, "compactor write") + return ulid.ULID{}, fmt.Errorf("compactor write: %w", err) } return id, nil diff --git a/tsdb/chunkenc/chunk_test.go b/tsdb/chunkenc/chunk_test.go index 3d22f74cca..9db1bf364f 100644 --- a/tsdb/chunkenc/chunk_test.go +++ b/tsdb/chunkenc/chunk_test.go @@ -14,6 +14,7 @@ package chunkenc import ( + "errors" "fmt" "io" "math/rand" @@ -153,8 +154,8 @@ func benchmarkIterator(b *testing.B, newChunk func() Chunk) { res = v i++ } - if it.Err() != io.EOF { - require.NoError(b, it.Err()) + if err := it.Err(); err != nil && !errors.Is(err, io.EOF) { + require.NoError(b, err) } _ = res } diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index dd35b9cae4..3d76cdf65f 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -605,10 +605,10 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend pForwardInserts, nForwardInserts, okToAppend, counterReset := a.appendable(h) if !okToAppend || counterReset { if appendOnly { - if !okToAppend { - return nil, false, a, fmt.Errorf("float histogram schema change") + if counterReset { + return nil, false, a, fmt.Errorf("float histogram counter reset") } - return nil, false, a, fmt.Errorf("float histogram counter reset") + return nil, false, a, fmt.Errorf("float histogram schema change") } newChunk := NewFloatHistogramChunk() app, err := newChunk.Appender() diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go index a54125fdcb..6f5a95fb1c 100644 --- a/tsdb/chunkenc/float_histogram_test.go +++ b/tsdb/chunkenc/float_histogram_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/tsdb/tsdbutil" ) type floatResult struct { @@ -93,10 +94,10 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) { }, NegativeBuckets: []int64{2, 1, -1, -1}, // counts: 2, 3, 2, 1 (total 8) } - chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.ToFloat(), false) + chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.ToFloat(nil), false) require.NoError(t, err) require.Nil(t, chk) - exp = append(exp, floatResult{t: ts, h: h.ToFloat()}) + exp = append(exp, floatResult{t: ts, h: h.ToFloat(nil)}) require.Equal(t, 1, c.NumSamples()) // Add an updated histogram. @@ -107,10 +108,10 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) { h.Sum = 24.4 h.PositiveBuckets = []int64{5, -2, 1, -2} // counts: 5, 3, 4, 2 (total 14) h.NegativeBuckets = []int64{4, -1, 1, -1} // counts: 4, 3, 4, 4 (total 15) - chk, _, _, err = app.AppendFloatHistogram(nil, ts, h.ToFloat(), false) + chk, _, _, err = app.AppendFloatHistogram(nil, ts, h.ToFloat(nil), false) require.NoError(t, err) require.Nil(t, chk) - expH := h.ToFloat() + expH := h.ToFloat(nil) expH.CounterResetHint = histogram.NotCounterReset exp = append(exp, floatResult{t: ts, h: expH}) require.Equal(t, 2, c.NumSamples()) @@ -126,10 +127,10 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) { h.Sum = 24.4 h.PositiveBuckets = []int64{6, 1, -3, 6} // counts: 6, 7, 4, 10 (total 27) h.NegativeBuckets = []int64{5, 1, -2, 3} // counts: 5, 6, 4, 7 (total 22) - chk, _, _, err = app.AppendFloatHistogram(nil, ts, h.ToFloat(), false) + chk, _, _, err = app.AppendFloatHistogram(nil, ts, h.ToFloat(nil), false) require.NoError(t, err) require.Nil(t, chk) - expH = h.ToFloat() + expH = h.ToFloat(nil) expH.CounterResetHint = histogram.NotCounterReset exp = append(exp, floatResult{t: ts, h: expH}) require.Equal(t, 3, c.NumSamples()) @@ -216,7 +217,7 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) { NegativeBuckets: []int64{1}, } - chk, _, app, err := app.AppendFloatHistogram(nil, ts1, h1.ToFloat(), false) + chk, _, app, err := app.AppendFloatHistogram(nil, ts1, h1.ToFloat(nil), false) require.NoError(t, err) require.Nil(t, chk) require.Equal(t, 1, c.NumSamples()) @@ -244,13 +245,13 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) { h2.NegativeBuckets = []int64{2, -1} // 2 1 (total 3) // This is how span changes will be handled. hApp, _ := app.(*FloatHistogramAppender) - posInterjections, negInterjections, ok, cr := hApp.appendable(h2.ToFloat()) - require.Greater(t, len(posInterjections), 0) - require.Greater(t, len(negInterjections), 0) + posInterjections, negInterjections, ok, cr := hApp.appendable(h2.ToFloat(nil)) + require.NotEmpty(t, posInterjections) + require.NotEmpty(t, negInterjections) require.True(t, ok) // Only new buckets came in. require.False(t, cr) c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans) - chk, _, _, err = app.AppendFloatHistogram(nil, ts2, h2.ToFloat(), false) + chk, _, _, err = app.AppendFloatHistogram(nil, ts2, h2.ToFloat(nil), false) require.NoError(t, err) require.Nil(t, chk) require.Equal(t, 2, c.NumSamples()) @@ -262,10 +263,10 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) { h1.PositiveBuckets = []int64{6, -3, -3, 3, -3, 0, 2, 2, 1, -5, 1} h1.NegativeSpans = h2.NegativeSpans h1.NegativeBuckets = []int64{0, 1} - expH2 := h2.ToFloat() + expH2 := h2.ToFloat(nil) expH2.CounterResetHint = histogram.NotCounterReset exp := []floatResult{ - {t: ts1, h: h1.ToFloat()}, + {t: ts1, h: h1.ToFloat(nil)}, {t: ts2, h: expH2}, } it := c.Iterator(nil) @@ -347,8 +348,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Greater(t, len(posInterjections), 0) - require.Equal(t, 0, len(negInterjections)) + require.NotEmpty(t, posInterjections) + require.Empty(t, negInterjections) require.True(t, ok) // Only new buckets came in. require.False(t, cr) @@ -369,8 +370,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []float64{6, 3, 2, 4, 5, 1} posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -384,8 +385,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1} posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -405,8 +406,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0} posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -432,8 +433,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []float64{1, 2, 5, 3, 3, 2, 4, 5, 1} posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -809,10 +810,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) @@ -833,10 +834,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []float64{6, 3, 3, 2, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Len(t, pI, 0) - require.Len(t, nI, 0) - require.Greater(t, len(pBackwardI), 0) - require.Len(t, nBackwardI, 0) + require.Empty(t, pI) + require.Empty(t, nI) + require.NotEmpty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) @@ -855,10 +856,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []float64{6, 3, 2, 4, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Greater(t, len(pBackwardI), 0) - require.Len(t, nI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.NotEmpty(t, pBackwardI) + require.Empty(t, nI) + require.Empty(t, nBackwardI) require.True(t, ok) assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) @@ -871,10 +872,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Len(t, pI, 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.Empty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) @@ -893,10 +894,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) @@ -919,12 +920,59 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []float64{1, 2, 5, 3, 3, 2, 4, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) } } + +func TestFloatHistogramAppendOnlyErrors(t *testing.T) { + t.Run("schema change error", func(t *testing.T) { + c := Chunk(NewFloatHistogramChunk()) + + // Create fresh appender and add the first histogram. + app, err := c.Appender() + require.NoError(t, err) + + h := tsdbutil.GenerateTestFloatHistogram(0) + var isRecoded bool + c, isRecoded, app, err = app.AppendFloatHistogram(nil, 1, h, true) + require.Nil(t, c) + require.False(t, isRecoded) + require.NoError(t, err) + + // Add erroring histogram. + h2 := h.Copy() + h2.Schema++ + c, isRecoded, _, err = app.AppendFloatHistogram(nil, 2, h2, true) + require.Nil(t, c) + require.False(t, isRecoded) + require.EqualError(t, err, "float histogram schema change") + }) + t.Run("counter reset error", func(t *testing.T) { + c := Chunk(NewFloatHistogramChunk()) + + // Create fresh appender and add the first histogram. + app, err := c.Appender() + require.NoError(t, err) + + h := tsdbutil.GenerateTestFloatHistogram(0) + var isRecoded bool + c, isRecoded, app, err = app.AppendFloatHistogram(nil, 1, h, true) + require.Nil(t, c) + require.False(t, isRecoded) + require.NoError(t, err) + + // Add erroring histogram. + h2 := h.Copy() + h2.CounterResetHint = histogram.CounterReset + c, isRecoded, _, err = app.AppendFloatHistogram(nil, 2, h2, true) + require.Nil(t, c) + require.False(t, isRecoded) + require.EqualError(t, err, "float histogram counter reset") + }) +} diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index 7c6f07f1f2..ac84e7a1ef 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -640,10 +640,10 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h pForwardInserts, nForwardInserts, okToAppend, counterReset := a.appendable(h) if !okToAppend || counterReset { if appendOnly { - if !okToAppend { - return nil, false, a, fmt.Errorf("histogram schema change") + if counterReset { + return nil, false, a, fmt.Errorf("histogram counter reset") } - return nil, false, a, fmt.Errorf("histogram counter reset") + return nil, false, a, fmt.Errorf("histogram schema change") } newChunk := NewHistogramChunk() app, err := newChunk.Appender() diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index 25e415da76..53aee89db5 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/tsdb/tsdbutil" ) type result struct { @@ -97,7 +98,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) { chk, _, app, err := app.AppendHistogram(nil, ts, h, false) require.NoError(t, err) require.Nil(t, chk) - exp = append(exp, result{t: ts, h: h, fh: h.ToFloat()}) + exp = append(exp, result{t: ts, h: h, fh: h.ToFloat(nil)}) require.Equal(t, 1, c.NumSamples()) // Add an updated histogram. @@ -113,7 +114,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) { require.Nil(t, chk) hExp := h.Copy() hExp.CounterResetHint = histogram.NotCounterReset - exp = append(exp, result{t: ts, h: hExp, fh: hExp.ToFloat()}) + exp = append(exp, result{t: ts, h: hExp, fh: hExp.ToFloat(nil)}) require.Equal(t, 2, c.NumSamples()) // Add update with new appender. @@ -132,7 +133,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) { require.Nil(t, chk) hExp = h.Copy() hExp.CounterResetHint = histogram.NotCounterReset - exp = append(exp, result{t: ts, h: hExp, fh: hExp.ToFloat()}) + exp = append(exp, result{t: ts, h: hExp, fh: hExp.ToFloat(nil)}) require.Equal(t, 3, c.NumSamples()) // 1. Expand iterator in simple case. @@ -256,8 +257,8 @@ func TestHistogramChunkBucketChanges(t *testing.T) { // This is how span changes will be handled. hApp, _ := app.(*HistogramAppender) posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Greater(t, len(posInterjections), 0) - require.Greater(t, len(negInterjections), 0) + require.NotEmpty(t, posInterjections) + require.NotEmpty(t, negInterjections) require.True(t, ok) // Only new buckets came in. require.False(t, cr) c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans) @@ -277,8 +278,8 @@ func TestHistogramChunkBucketChanges(t *testing.T) { hExp := h2.Copy() hExp.CounterResetHint = histogram.NotCounterReset exp := []result{ - {t: ts1, h: h1, fh: h1.ToFloat()}, - {t: ts2, h: hExp, fh: hExp.ToFloat()}, + {t: ts1, h: h1, fh: h1.ToFloat(nil)}, + {t: ts2, h: hExp, fh: hExp.ToFloat(nil)}, } it := c.Iterator(nil) var act []result @@ -364,8 +365,8 @@ func TestHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30) posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Greater(t, len(posInterjections), 0) - require.Equal(t, 0, len(negInterjections)) + require.NotEmpty(t, posInterjections) + require.Empty(t, negInterjections) require.True(t, ok) // Only new buckets came in. require.False(t, cr) @@ -385,8 +386,8 @@ func TestHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []int64{6, -3, -1, 2, 1, -4} // counts: 6, 3, 2, 4, 5, 1 (total 21) posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -400,8 +401,8 @@ func TestHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // counts: 6, 2, 3, 2, 4, 5, 1 (total 23) posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -424,8 +425,8 @@ func TestHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 0} // 7 5 1 3 1 0 2 5 5 0 0 (total 29) posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -454,8 +455,8 @@ func TestHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []int64{1, 1, 3, -2, 0, -1, 2, 1, -4} // counts: 1, 2, 5, 3, 3, 2, 4, 5, 1 (total 26) posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -986,10 +987,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // {7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) @@ -1014,10 +1015,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []int64{6, -3, 0, -1, 3, -4} // {6, 3, 3, 2, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Len(t, pI, 0) - require.Len(t, nI, 0) - require.Greater(t, len(pBackwardI), 0) - require.Len(t, nBackwardI, 0) + require.Empty(t, pI) + require.Empty(t, nI) + require.NotEmpty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) @@ -1040,10 +1041,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []int64{6, -3, -1, 2, 1, -4} // {6, 3, 2, 4, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Greater(t, len(pBackwardI), 0) - require.Len(t, nI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.NotEmpty(t, pBackwardI) + require.Empty(t, nI) + require.Empty(t, nBackwardI) require.True(t, ok) newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) @@ -1060,10 +1061,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // {6, 2, 3, 2, 4, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Len(t, pI, 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.Empty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) @@ -1086,10 +1087,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 0} // {7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) @@ -1116,10 +1117,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []int64{1, 1, 3, -2, 0, -1, 2, 1, -4} // {1, 2, 5, 3, 3, 2, 4, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) @@ -1129,3 +1130,50 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { require.Equal(t, GaugeType, c.(*HistogramChunk).GetCounterResetHeader()) } } + +func TestHistogramAppendOnlyErrors(t *testing.T) { + t.Run("schema change error", func(t *testing.T) { + c := Chunk(NewHistogramChunk()) + + // Create fresh appender and add the first histogram. + app, err := c.Appender() + require.NoError(t, err) + + h := tsdbutil.GenerateTestHistogram(0) + var isRecoded bool + c, isRecoded, app, err = app.AppendHistogram(nil, 1, h, true) + require.Nil(t, c) + require.False(t, isRecoded) + require.NoError(t, err) + + // Add erroring histogram. + h2 := h.Copy() + h2.Schema++ + c, isRecoded, _, err = app.AppendHistogram(nil, 2, h2, true) + require.Nil(t, c) + require.False(t, isRecoded) + require.EqualError(t, err, "histogram schema change") + }) + t.Run("counter reset error", func(t *testing.T) { + c := Chunk(NewHistogramChunk()) + + // Create fresh appender and add the first histogram. + app, err := c.Appender() + require.NoError(t, err) + + h := tsdbutil.GenerateTestHistogram(0) + var isRecoded bool + c, isRecoded, app, err = app.AppendHistogram(nil, 1, h, true) + require.Nil(t, c) + require.False(t, isRecoded) + require.NoError(t, err) + + // Add erroring histogram. + h2 := h.Copy() + h2.CounterResetHint = histogram.CounterReset + c, isRecoded, _, err = app.AppendHistogram(nil, 2, h2, true) + require.Nil(t, c) + require.False(t, isRecoded) + require.EqualError(t, err, "histogram counter reset") + }) +} diff --git a/tsdb/chunks/chunks.go b/tsdb/chunks/chunks.go index f22285a0ca..2c6db3637a 100644 --- a/tsdb/chunks/chunks.go +++ b/tsdb/chunks/chunks.go @@ -671,7 +671,7 @@ func (s *Reader) Size() int64 { return s.size } -// Chunk returns a chunk from a given reference. +// ChunkOrIterable returns a chunk from a given reference. func (s *Reader) ChunkOrIterable(meta Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { sgmIndex, chkStart := BlockChunkRef(meta.Ref).Unpack() diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go index 1cdf4a1ddd..ececdb4813 100644 --- a/tsdb/chunks/head_chunks.go +++ b/tsdb/chunks/head_chunks.go @@ -111,6 +111,10 @@ func (e *CorruptionErr) Error() string { return fmt.Errorf("corruption in head chunk file %s: %w", segmentFile(e.Dir, e.FileIndex), e.Err).Error() } +func (e *CorruptionErr) Unwrap() error { + return e.Err +} + // chunkPos keeps track of the position in the head chunk files. // chunkPos is not thread-safe, a lock must be used to protect it. type chunkPos struct { @@ -400,7 +404,7 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro buf := make([]byte, MagicChunksSize) size, err := f.Read(buf) - if err != nil && err != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return files, fmt.Errorf("failed to read magic number during last head chunk file repair: %w", err) } if err := f.Close(); err != nil { @@ -893,7 +897,8 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu // Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding. chkEnc = cdm.RemoveMasks(chkEnc) if err := f(seriesRef, chunkRef, mint, maxt, numSamples, chkEnc, isOOO); err != nil { - if cerr, ok := err.(*CorruptionErr); ok { + var cerr *CorruptionErr + if errors.As(err, &cerr) { cerr.Dir = cdm.dir.Name() cerr.FileIndex = segID return cerr diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index ae66378bba..acfa7461be 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -128,7 +128,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) { } // Checking on-disk bytes for the first file. - require.Equal(t, 3, len(hrw.mmappedChunkFiles), "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles)) + require.Len(t, hrw.mmappedChunkFiles, 3, "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles)) require.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers)) actualBytes, err := os.ReadFile(firstFileName) @@ -173,7 +173,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) { idx++ return nil })) - require.Equal(t, len(expectedData), idx) + require.Len(t, expectedData, idx) } func TestChunkDiskMapper_WriteUnsupportedChunk_Chunk_IterateChunks(t *testing.T) { @@ -252,7 +252,7 @@ func TestChunkDiskMapper_Truncate(t *testing.T) { for _, i := range remainingFiles { _, ok := hrw.mmappedChunkFiles[i] - require.Equal(t, true, ok) + require.True(t, ok) } } @@ -509,7 +509,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) { nonEmptyFile() // 2. nonEmptyFile() // 3. - require.Equal(t, 3, len(hrw.mmappedChunkFiles)) + require.Len(t, hrw.mmappedChunkFiles, 3) lastFile := 0 for idx := range hrw.mmappedChunkFiles { if idx > lastFile { @@ -538,7 +538,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) { hrw = createChunkDiskMapper(t, dir) // Removed from memory. - require.Equal(t, 3, len(hrw.mmappedChunkFiles)) + require.Len(t, hrw.mmappedChunkFiles, 3) for idx := range hrw.mmappedChunkFiles { require.LessOrEqual(t, idx, lastFile, "file index is bigger than previous last file") } @@ -546,7 +546,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) { // Removed even from disk. files, err := os.ReadDir(dir) require.NoError(t, err) - require.Equal(t, 3, len(files)) + require.Len(t, files, 3) for _, fi := range files { seq, err := strconv.ParseUint(fi.Name(), 10, 64) require.NoError(t, err) diff --git a/tsdb/chunks/queue_test.go b/tsdb/chunks/queue_test.go index e678a040ba..5756e45856 100644 --- a/tsdb/chunks/queue_test.go +++ b/tsdb/chunks/queue_test.go @@ -29,33 +29,33 @@ func (q *writeJobQueue) assertInvariants(t *testing.T) { totalSize := 0 for s := q.first; s != nil; s = s.nextSegment { - require.True(t, s.segment != nil) + require.NotNil(t, s.segment) // Next read index is lower or equal than next write index (we cannot past written jobs) - require.True(t, s.nextRead <= s.nextWrite) + require.LessOrEqual(t, s.nextRead, s.nextWrite) // Number of unread elements in this segment. totalSize += s.nextWrite - s.nextRead // First segment can be partially read, other segments were not read yet. if s == q.first { - require.True(t, s.nextRead >= 0) + require.GreaterOrEqual(t, s.nextRead, 0) } else { - require.True(t, s.nextRead == 0) + require.Equal(t, 0, s.nextRead) } // If first shard is empty (everything was read from it already), it must have extra capacity for // additional elements, otherwise it would have been removed. if s == q.first && s.nextRead == s.nextWrite { - require.True(t, s.nextWrite < len(s.segment)) + require.Less(t, s.nextWrite, len(s.segment)) } // Segments in the middle are full. if s != q.first && s != q.last { - require.True(t, s.nextWrite == len(s.segment)) + require.Len(t, s.segment, s.nextWrite) } // Last segment must have at least one element, or we wouldn't have created it. - require.True(t, s.nextWrite > 0) + require.Greater(t, s.nextWrite, 0) } require.Equal(t, q.size, totalSize) @@ -307,7 +307,7 @@ func TestQueuePushPopManyGoroutines(t *testing.T) { readersWG.Wait() // Check if we have all expected values - require.Equal(t, writeGoroutines*writes, len(refs)) + require.Len(t, refs, writeGoroutines*writes) } func TestQueueSegmentIsKeptEvenIfEmpty(t *testing.T) { diff --git a/tsdb/compact.go b/tsdb/compact.go index 5c52dfe561..8d5a53b3a3 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -16,6 +16,7 @@ package tsdb import ( "context" "crypto/rand" + "errors" "fmt" "io" "os" @@ -26,7 +27,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "golang.org/x/exp/slices" @@ -549,7 +549,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, if !errors.Is(err, context.Canceled) { for _, b := range bs { if err := b.setCompactionFailed(); err != nil { - errs.Add(errors.Wrapf(err, "setting compaction failed for block: %s", b.Dir())) + errs.Add(fmt.Errorf("setting compaction failed for block: %s: %w", b.Dir(), err)) } } } @@ -791,7 +791,7 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPop var chunkw ChunkWriter chunkw, err = chunks.NewWriterWithSegSize(chunkDir(tmp), c.maxBlockChunkSegmentSize) if err != nil { - return errors.Wrap(err, "open chunk writer") + return fmt.Errorf("open chunk writer: %w", err) } chunkw = newPreventDoubleCloseChunkWriter(chunkw) // We now close chunkWriter in populateBlock, but keep it in the closers here as well. @@ -812,7 +812,7 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPop var indexw IndexWriter indexw, err = index.NewWriter(c.ctx, filepath.Join(tmp, indexFilename)) if err != nil { - return errors.Wrap(err, "open index writer") + return fmt.Errorf("open index writer: %w", err) } indexw = newPreventDoubleCloseIndexWriter(indexw) // We now close indexWriter in populateBlock, but keep it in the closers here as well. closers = append(closers, indexw) @@ -822,7 +822,7 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPop // We use MinTime and MaxTime from first output block, because ALL output blocks have the same min/max times set. if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, c.concurrencyOpts, blocks, outBlocks[0].meta.MinTime, outBlocks[0].meta.MaxTime, outBlocks); err != nil { - return errors.Wrap(err, "populate block") + return fmt.Errorf("populate block: %w", err) } select { @@ -851,17 +851,17 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPop } if _, err = writeMetaFile(c.logger, ob.tmpDir, ob.meta); err != nil { - return errors.Wrap(err, "write merged meta") + return fmt.Errorf("write merged meta: %w", err) } // Create an empty tombstones file. if _, err := tombstones.WriteFile(c.logger, ob.tmpDir, tombstones.NewMemTombstones()); err != nil { - return errors.Wrap(err, "write new tombstones file") + return fmt.Errorf("write new tombstones file: %w", err) } df, err := fileutil.OpenDir(ob.tmpDir) if err != nil { - return errors.Wrap(err, "open temporary block dir") + return fmt.Errorf("open temporary block dir: %w", err) } defer func() { if df != nil { @@ -870,18 +870,18 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPop }() if err := df.Sync(); err != nil { - return errors.Wrap(err, "sync temporary dir file") + return fmt.Errorf("sync temporary dir file: %w", err) } // Close temp dir before rename block dir (for windows platform). if err = df.Close(); err != nil { - return errors.Wrap(err, "close temporary dir") + return fmt.Errorf("close temporary dir: %w", err) } df = nil // Block successfully written, make it visible in destination dir by moving it from tmp one. if err := fileutil.Replace(ob.tmpDir, ob.blockDir); err != nil { - return errors.Wrap(err, "rename block dir") + return fmt.Errorf("rename block dir: %w", err) } } @@ -971,7 +971,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa defer func() { errs := tsdb_errors.NewMulti(err) if cerr := tsdb_errors.CloseAll(closers); cerr != nil { - errs.Add(errors.Wrap(cerr, "close")) + errs.Add(fmt.Errorf("close: %w", cerr)) } err = errs.Err() metrics.PopulatingBlocks.Set(0) @@ -999,19 +999,19 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa indexr, err := b.Index() if err != nil { - return errors.Wrapf(err, "open index reader for block %+v", b.Meta()) + return fmt.Errorf("open index reader for block %+v: %w", b.Meta(), err) } closers = append(closers, indexr) chunkr, err := b.Chunks() if err != nil { - return errors.Wrapf(err, "open chunk reader for block %+v", b.Meta()) + return fmt.Errorf("open chunk reader for block %+v: %w", b.Meta(), err) } closers = append(closers, chunkr) tombsr, err := b.Tombstones() if err != nil { - return errors.Wrapf(err, "open tombstone reader for block %+v", b.Meta()) + return fmt.Errorf("open tombstone reader for block %+v: %w", b.Meta(), err) } closers = append(closers, tombsr) @@ -1048,11 +1048,11 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa if len(outBlocks) == 1 { for symbols.Next() { if err := outBlocks[0].indexw.AddSymbol(symbols.At()); err != nil { - return errors.Wrap(err, "add symbol") + return fmt.Errorf("add symbol: %w", err) } } - if symbols.Err() != nil { - return errors.Wrap(symbols.Err(), "next symbol") + if err := symbols.Err(); err != nil { + return fmt.Errorf("next symbol: %w", err) } } else { if err := populateSymbols(ctx, mergeFunc, concurrencyOpts, symbolsSets, outBlocks); err != nil { @@ -1105,8 +1105,8 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa // chunk file purposes. chks = append(chks, chksIter.At()) } - if chksIter.Err() != nil { - return errors.Wrap(chksIter.Err(), "chunk iter") + if err := chksIter.Err(); err != nil { + return fmt.Errorf("chunk iter: %w", err) } // Skip the series with all deleted chunks. @@ -1123,11 +1123,11 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa err := blockWriters[obIx].addSeries(s.Labels(), chks) if err != nil { - return errors.Wrap(err, "adding series") + return fmt.Errorf("adding series: %w", err) } } - if set.Err() != nil { - return errors.Wrap(set.Err(), "iterate compaction set") + if err := set.Err(); err != nil { + return fmt.Errorf("iterate compaction set: %w", err) } for ix := range blockWriters { @@ -1137,7 +1137,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa for ix := range blockWriters { stats, err := blockWriters[ix].waitFinished() if err != nil { - return errors.Wrap(err, "writing block") + return fmt.Errorf("writing block: %w", err) } outBlocks[ix].meta.Stats = stats @@ -1168,7 +1168,7 @@ func populateSymbols(ctx context.Context, mergeFunc storage.VerticalChunkSeriesM // and if we only include symbols from series, we would skip it. // It may not be required, but it's small and better be safe than sorry. if err := batchers[ix].addSymbol(""); err != nil { - return errors.Wrap(err, "addSymbol to batcher") + return fmt.Errorf("addSymbol to batcher: %w", err) } } @@ -1191,10 +1191,10 @@ func populateSymbols(ctx context.Context, mergeFunc storage.VerticalChunkSeriesM err := s.Labels().Validate(func(l labels.Label) error { if err := batchers[obIx].addSymbol(l.Name); err != nil { - return errors.Wrap(err, "addSymbol to batcher") + return fmt.Errorf("addSymbol to batcher: %w", err) } if err := batchers[obIx].addSymbol(l.Value); err != nil { - return errors.Wrap(err, "addSymbol to batcher") + return fmt.Errorf("addSymbol to batcher: %w", err) } return nil }) @@ -1206,13 +1206,13 @@ func populateSymbols(ctx context.Context, mergeFunc storage.VerticalChunkSeriesM for ix := range outBlocks { // Flush the batcher to write remaining symbols. if err := batchers[ix].flushSymbols(true); err != nil { - return errors.Wrap(err, "flushing batcher") + return fmt.Errorf("flushing batcher: %w", err) } } err := flushers.close() if err != nil { - return errors.Wrap(err, "closing flushers") + return fmt.Errorf("closing flushers: %w", err) } for ix := range outBlocks { @@ -1224,7 +1224,7 @@ func populateSymbols(ctx context.Context, mergeFunc storage.VerticalChunkSeriesM it, err := newSymbolsIterator(symbolFiles) if err != nil { - return errors.Wrap(err, "opening symbols iterator") + return fmt.Errorf("opening symbols iterator: %w", err) } // Each symbols iterator must be closed to close underlying files. @@ -1239,12 +1239,12 @@ func populateSymbols(ctx context.Context, mergeFunc storage.VerticalChunkSeriesM for sym, err = it.NextSymbol(); err == nil; sym, err = it.NextSymbol() { err = outBlocks[ix].indexw.AddSymbol(sym) if err != nil { - return errors.Wrap(err, "AddSymbol") + return fmt.Errorf("AddSymbol: %w", err) } } if err != io.EOF { - return errors.Wrap(err, "iterating symbols") + return fmt.Errorf("iterating symbols: %w", err) } // if err == io.EOF, we have iterated through all symbols. We can close underlying @@ -1256,7 +1256,7 @@ func populateSymbols(ctx context.Context, mergeFunc storage.VerticalChunkSeriesM // or compaction fails, because in that case compactor already removes entire (temp) output block directory. for _, fn := range symbolFiles { if err := os.Remove(fn); err != nil { - return errors.Wrap(err, "deleting symbols file") + return fmt.Errorf("deleting symbols file: %w", err) } } } diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 50630caccf..aea02dbfef 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -16,6 +16,7 @@ package tsdb import ( "context" crand "crypto/rand" + "errors" "fmt" "math" "math/rand" @@ -28,7 +29,6 @@ import ( "github.com/go-kit/log" "github.com/oklog/ulid" - "github.com/pkg/errors" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/require" "golang.org/x/sync/semaphore" @@ -1426,7 +1426,7 @@ func TestDisableAutoCompactions(t *testing.T) { } require.Greater(t, prom_testutil.ToFloat64(db.metrics.compactionsSkipped), 0.0, "No compaction was skipped after the set timeout.") - require.Equal(t, 0, len(db.blocks)) + require.Empty(t, db.blocks) // Enable the compaction, trigger it and check that the block is persisted. db.EnableCompactions() @@ -1440,7 +1440,7 @@ func TestDisableAutoCompactions(t *testing.T) { } time.Sleep(100 * time.Millisecond) } - require.Greater(t, len(db.Blocks()), 0, "No block was persisted after the set timeout.") + require.NotEmpty(t, db.Blocks(), "No block was persisted after the set timeout.") } // TestCancelCompactions ensures that when the db is closed @@ -1463,7 +1463,7 @@ func TestCancelCompactions(t *testing.T) { { db, err := open(tmpdir, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) require.NoError(t, err) - require.Equal(t, 3, len(db.Blocks()), "initial block count mismatch") + require.Len(t, db.Blocks(), 3, "initial block count mismatch") require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial compaction counter mismatch") db.compactc <- struct{}{} // Trigger a compaction. for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.PopulatingBlocks) <= 0 { @@ -1482,7 +1482,7 @@ func TestCancelCompactions(t *testing.T) { { db, err := open(tmpdirCopy, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) require.NoError(t, err) - require.Equal(t, 3, len(db.Blocks()), "initial block count mismatch") + require.Len(t, db.Blocks(), 3, "initial block count mismatch") require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial compaction counter mismatch") db.compactc <- struct{}{} // Trigger a compaction. @@ -1495,7 +1495,7 @@ func TestCancelCompactions(t *testing.T) { actT := time.Since(start) expT := timeCompactionUninterrupted / 2 // Closing the db in the middle of compaction should less than half the time. - require.True(t, actT < expT, "closing the db took more than expected. exp: <%v, act: %v", expT, actT) + require.Less(t, actT, expT, "closing the db took more than expected. exp: <%v, act: %v", expT, actT) // Make sure that no blocks were marked as compaction failed. // This checks that the `context.Canceled` error is properly checked at all levels: @@ -1721,8 +1721,8 @@ func TestHeadCompactionWithHistograms(t *testing.T) { for tsMinute := from; tsMinute <= to; tsMinute++ { var err error if floatTest { - _, err = app.AppendHistogram(0, lbls, minute(tsMinute), nil, h.ToFloat()) - efh := h.ToFloat() + _, err = app.AppendHistogram(0, lbls, minute(tsMinute), nil, h.ToFloat(nil)) + efh := h.ToFloat(nil) if tsMinute == from { efh.CounterResetHint = histogram.UnknownCounterReset } else { diff --git a/tsdb/db.go b/tsdb/db.go index c1e7757adb..da8f42fe2b 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -16,6 +16,7 @@ package tsdb import ( "context" + "errors" "fmt" "io" "io/fs" @@ -30,7 +31,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "golang.org/x/exp/slices" @@ -440,7 +440,7 @@ type DBReadOnly struct { // OpenDBReadOnly opens DB in the given directory for read only operations. func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) { if _, err := os.Stat(dir); err != nil { - return nil, errors.Wrap(err, "opening the db dir") + return nil, fmt.Errorf("opening the db dir: %w", err) } if l == nil { @@ -461,7 +461,7 @@ func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) { func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) { blockReaders, err := db.Blocks() if err != nil { - return errors.Wrap(err, "read blocks") + return fmt.Errorf("read blocks: %w", err) } maxBlockTime := int64(math.MinInt64) if len(blockReaders) > 0 { @@ -486,15 +486,16 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) { return err } defer func() { - returnErr = tsdb_errors.NewMulti( - returnErr, - errors.Wrap(head.Close(), "closing Head"), - ).Err() + errs := tsdb_errors.NewMulti(returnErr) + if err := head.Close(); err != nil { + errs.Add(fmt.Errorf("closing Head: %w", err)) + } + returnErr = errs.Err() }() // Set the min valid time for the ingested wal samples // to be no lower than the maxt of the last block. if err := head.Init(maxBlockTime); err != nil { - return errors.Wrap(err, "read WAL") + return fmt.Errorf("read WAL: %w", err) } mint := head.MinTime() maxt := head.MaxTime() @@ -509,12 +510,15 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) { false, ) if err != nil { - return errors.Wrap(err, "create leveled compactor") + return fmt.Errorf("create leveled compactor: %w", err) } // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. _, err = compactor.Write(dir, rh, mint, maxt+1, nil) - return errors.Wrap(err, "writing WAL") + if err != nil { + return fmt.Errorf("writing WAL: %w", err) + } + return nil } func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQueryable, error) { @@ -573,7 +577,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue // Set the min valid time for the ingested wal samples // to be no lower than the maxt of the last block. if err := head.Init(maxBlockTime); err != nil { - return nil, errors.Wrap(err, "read WAL") + return nil, fmt.Errorf("read WAL: %w", err) } // Set the wal to nil to disable all wal operations. // This is mainly to avoid blocking when closing the head. @@ -635,7 +639,9 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { } errs := tsdb_errors.NewMulti() for ulid, err := range corrupted { - errs.Add(errors.Wrapf(err, "corrupted block %s", ulid.String())) + if err != nil { + errs.Add(fmt.Errorf("corrupted block %s: %w", ulid.String(), err)) + } } return nil, errs.Err() } @@ -819,7 +825,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs // Fixup bad format written by Prometheus 2.1. if err := repairBadIndexVersion(l, dir); err != nil { - return nil, errors.Wrap(err, "repair bad index version") + return nil, fmt.Errorf("repair bad index version: %w", err) } walDir := filepath.Join(dir, "wal") @@ -827,12 +833,12 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs // Migrate old WAL if one exists. if err := MigrateWAL(l, walDir); err != nil { - return nil, errors.Wrap(err, "migrate WAL") + return nil, fmt.Errorf("migrate WAL: %w", err) } for _, tmpDir := range []string{walDir, dir} { // Remove tmp dirs. if err := removeBestEffortTmpDirs(l, tmpDir); err != nil { - return nil, errors.Wrap(err, "remove tmp dirs") + return nil, fmt.Errorf("remove tmp dirs: %w", err) } } @@ -855,11 +861,11 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } close(db.donec) // DB is never run if it was an error, so close this channel here. - - returnedErr = tsdb_errors.NewMulti( - returnedErr, - errors.Wrap(db.Close(), "close DB after failed startup"), - ).Err() + errs := tsdb_errors.NewMulti(returnedErr) + if err := db.Close(); err != nil { + errs.Add(fmt.Errorf("close DB after failed startup: %w", err)) + } + returnedErr = errs.Err() }() if db.blocksToDelete == nil { @@ -881,7 +887,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs db.compactor, err = NewLeveledCompactorWithChunkSize(ctx, r, l, rngs, db.chunkPool, opts.MaxBlockChunkSegmentSize, nil, opts.AllowOverlappingCompaction) if err != nil { cancel() - return nil, errors.Wrap(err, "create leveled compactor") + return nil, fmt.Errorf("create leveled compactor: %w", err) } db.compactCancel = cancel @@ -969,17 +975,17 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs if initErr := db.head.Init(minValidTime); initErr != nil { db.head.metrics.walCorruptionsTotal.Inc() - e, ok := initErr.(*errLoadWbl) - if ok { + var e *errLoadWbl + if errors.As(initErr, &e) { level.Warn(db.logger).Log("msg", "Encountered WBL read error, attempting repair", "err", initErr) if err := wbl.Repair(e.err); err != nil { - return nil, errors.Wrap(err, "repair corrupted WBL") + return nil, fmt.Errorf("repair corrupted WBL: %w", err) } level.Info(db.logger).Log("msg", "Successfully repaired WBL") } else { level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr) if err := wal.Repair(initErr); err != nil { - return nil, errors.Wrap(err, "repair corrupted WAL") + return nil, fmt.Errorf("repair corrupted WAL: %w", err) } level.Info(db.logger).Log("msg", "Successfully repaired WAL") } @@ -1195,10 +1201,11 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { lastBlockMaxt := int64(math.MinInt64) defer func() { - returnErr = tsdb_errors.NewMulti( - returnErr, - errors.Wrap(db.head.truncateWAL(lastBlockMaxt), "WAL truncation in Compact defer"), - ).Err() + errs := tsdb_errors.NewMulti(returnErr) + if err := db.head.truncateWAL(lastBlockMaxt); err != nil { + errs.Add(fmt.Errorf("WAL truncation in Compact defer: %w", err)) + } + returnErr = errs.Err() }() start := time.Now() @@ -1232,7 +1239,7 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { db.head.WaitForAppendersOverlapping(rh.MaxTime()) if err := db.compactHead(rh); err != nil { - return errors.Wrap(err, "compact head") + return fmt.Errorf("compact head: %w", err) } // Consider only successful compactions for WAL truncation. lastBlockMaxt = maxt @@ -1241,7 +1248,7 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { // Clear some disk space before compacting blocks, especially important // when Head compaction happened over a long time range. if err := db.head.truncateWAL(lastBlockMaxt); err != nil { - return errors.Wrap(err, "WAL truncation in Compact") + return fmt.Errorf("WAL truncation in Compact: %w", err) } compactionDuration := time.Since(start) @@ -1256,7 +1263,7 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { if lastBlockMaxt != math.MinInt64 { // The head was compacted, so we compact OOO head as well. if err := db.compactOOOHead(ctx); err != nil { - return errors.Wrap(err, "compact ooo head") + return fmt.Errorf("compact ooo head: %w", err) } } @@ -1269,11 +1276,11 @@ func (db *DB) CompactHead(head *RangeHead) error { defer db.cmtx.Unlock() if err := db.compactHead(head); err != nil { - return errors.Wrap(err, "compact head") + return fmt.Errorf("compact head: %w", err) } if err := db.head.truncateWAL(head.BlockMaxTime()); err != nil { - return errors.Wrap(err, "WAL truncation") + return fmt.Errorf("WAL truncation: %w", err) } return nil } @@ -1292,12 +1299,12 @@ func (db *DB) compactOOOHead(ctx context.Context) error { } oooHead, err := NewOOOCompactionHead(ctx, db.head) if err != nil { - return errors.Wrap(err, "get ooo compaction head") + return fmt.Errorf("get ooo compaction head: %w", err) } ulids, err := db.compactOOO(db.dir, oooHead) if err != nil { - return errors.Wrap(err, "compact ooo head") + return fmt.Errorf("compact ooo head: %w", err) } if err := db.reloadBlocks(); err != nil { errs := tsdb_errors.NewMulti(err) @@ -1306,7 +1313,7 @@ func (db *DB) compactOOOHead(ctx context.Context) error { errs.Add(errRemoveAll) } } - return errors.Wrap(errs.Err(), "reloadBlocks blocks after failed compact ooo head") + return fmt.Errorf("reloadBlocks blocks after failed compact ooo head: %w", errs.Err()) } lastWBLFile, minOOOMmapRef := oooHead.LastWBLFile(), oooHead.LastMmapRef() @@ -1326,7 +1333,7 @@ func (db *DB) compactOOOHead(ctx context.Context) error { } if err := db.head.truncateOOO(lastWBLFile, minOOOMmapRef); err != nil { - return errors.Wrap(err, "truncate ooo wbl") + return fmt.Errorf("truncate ooo wbl: %w", err) } } @@ -1362,12 +1369,12 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID blockDir := filepath.Join(dest, uid.String()) meta, _, err := readMetaFile(blockDir) if err != nil { - return ulids, errors.Wrap(err, "read meta") + return ulids, fmt.Errorf("read meta: %w", err) } meta.Compaction.SetOutOfOrder() _, err = writeMetaFile(db.logger, blockDir, meta) if err != nil { - return ulids, errors.Wrap(err, "write meta") + return ulids, fmt.Errorf("write meta: %w", err) } } } @@ -1393,20 +1400,20 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID func (db *DB) compactHead(head *RangeHead) error { uid, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil) if err != nil { - return errors.Wrap(err, "persist head block") + return fmt.Errorf("persist head block: %w", err) } if err := db.reloadBlocks(); err != nil { if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil { return tsdb_errors.NewMulti( - errors.Wrap(err, "reloadBlocks blocks"), - errors.Wrapf(errRemoveAll, "delete persisted head block after failed db reloadBlocks:%s", uid), + fmt.Errorf("reloadBlocks blocks: %w", err), + fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll), ).Err() } - return errors.Wrap(err, "reloadBlocks blocks") + return fmt.Errorf("reloadBlocks blocks: %w", err) } if err = db.head.truncateMemory(head.BlockMaxTime()); err != nil { - return errors.Wrap(err, "head memory truncate") + return fmt.Errorf("head memory truncate: %w", err) } return nil } @@ -1418,7 +1425,7 @@ func (db *DB) compactBlocks() (err error) { for { plan, err := db.compactor.Plan(db.dir) if err != nil { - return errors.Wrap(err, "plan compaction") + return fmt.Errorf("plan compaction: %w", err) } if len(plan) == 0 { break @@ -1432,14 +1439,14 @@ func (db *DB) compactBlocks() (err error) { uid, err := db.compactor.Compact(db.dir, plan, db.blocks) if err != nil { - return errors.Wrapf(err, "compact %s", plan) + return fmt.Errorf("compact %s: %w", plan, err) } if err := db.reloadBlocks(); err != nil { if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil { - return errors.Wrapf(err, "delete compacted block after failed db reloadBlocks:%s", uid) + return fmt.Errorf("delete compacted block after failed db reloadBlocks:%s: %w", uid, err) } - return errors.Wrap(err, "reloadBlocks blocks") + return fmt.Errorf("reloadBlocks blocks: %w", err) } } @@ -1460,14 +1467,14 @@ func getBlock(allBlocks []*Block, id ulid.ULID) (*Block, bool) { // reload reloads blocks and truncates the head and its WAL. func (db *DB) reload() error { if err := db.reloadBlocks(); err != nil { - return errors.Wrap(err, "reloadBlocks") + return fmt.Errorf("reloadBlocks: %w", err) } maxt, ok := db.inOrderBlocksMaxTime() if !ok { return nil } if err := db.head.Truncate(maxt); err != nil { - return errors.Wrap(err, "head truncate") + return fmt.Errorf("head truncate: %w", err) } return nil } @@ -1521,7 +1528,9 @@ func (db *DB) reloadBlocks() (err error) { } errs := tsdb_errors.NewMulti() for ulid, err := range corrupted { - errs.Add(errors.Wrapf(err, "corrupted block %s", ulid.String())) + if err != nil { + errs.Add(fmt.Errorf("corrupted block %s: %w", ulid.String(), err)) + } } return errs.Err() } @@ -1573,7 +1582,7 @@ func (db *DB) reloadBlocks() (err error) { } } if err := db.deleteBlocks(deletable); err != nil { - return errors.Wrapf(err, "delete %v blocks", len(deletable)) + return fmt.Errorf("delete %v blocks: %w", len(deletable), err) } return nil } @@ -1581,7 +1590,7 @@ func (db *DB) reloadBlocks() (err error) { func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool, cache *hashcache.SeriesHashCache, postingsCacheTTL time.Duration, postingsCacheMaxItems int, postingsCacheMaxBytes int64, postingsCacheForce bool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { bDirs, err := blockDirs(dir) if err != nil { - return nil, nil, errors.Wrap(err, "find blocks") + return nil, nil, fmt.Errorf("find blocks: %w", err) } corrupted = make(map[ulid.ULID]error) @@ -1720,16 +1729,16 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { // Noop. continue case err != nil: - return errors.Wrapf(err, "stat dir %v", toDelete) + return fmt.Errorf("stat dir %v: %w", toDelete, err) } // Replace atomically to avoid partial block when process would crash during deletion. tmpToDelete := filepath.Join(db.dir, fmt.Sprintf("%s%s", ulid, tmpForDeletionBlockDirSuffix)) if err := fileutil.Replace(toDelete, tmpToDelete); err != nil { - return errors.Wrapf(err, "replace of obsolete block for deletion %s", ulid) + return fmt.Errorf("replace of obsolete block for deletion %s: %w", ulid, err) } if err := os.RemoveAll(tmpToDelete); err != nil { - return errors.Wrapf(err, "delete obsolete block %s", ulid) + return fmt.Errorf("delete obsolete block %s: %w", ulid, err) } level.Info(db.logger).Log("msg", "Deleting obsolete block", "block", ulid) } @@ -1937,7 +1946,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { level.Info(db.logger).Log("msg", "Snapshotting block", "block", b) if err := b.Snapshot(dir); err != nil { - return errors.Wrapf(err, "error snapshotting block: %s", b.Dir()) + return fmt.Errorf("error snapshotting block: %s: %w", b.Dir(), err) } } if !withHead { @@ -1950,7 +1959,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. if _, err := db.compactor.Write(dir, head, mint, maxt+1, nil); err != nil { - return errors.Wrap(err, "snapshot head block") + return fmt.Errorf("snapshot head block: %w", err) } return nil } @@ -1985,7 +1994,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { var err error inOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open block querier for head %s", rh) + return nil, fmt.Errorf("open block querier for head %s: %w", rh, err) } // Getting the querier above registers itself in the queue that the truncation waits on. @@ -1994,7 +2003,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt) if shouldClose { if err := inOrderHeadQuerier.Close(); err != nil { - return nil, errors.Wrapf(err, "closing head block querier %s", rh) + return nil, fmt.Errorf("closing head block querier %s: %w", rh, err) } inOrderHeadQuerier = nil } @@ -2002,7 +2011,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { rh := NewRangeHead(db.head, newMint, maxt) inOrderHeadQuerier, err = NewBlockQuerier(rh, newMint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open block querier for head while getting new querier %s", rh) + return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err) } } @@ -2019,7 +2028,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { // If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead. rh.isoState.Close() - return nil, errors.Wrapf(err, "open block querier for ooo head %s", rh) + return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err) } blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) @@ -2028,7 +2037,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { for _, b := range blocks { q, err := NewBlockQuerier(b, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for block %s", b) + return nil, fmt.Errorf("open querier for block %s: %w", b, err) } blockQueriers = append(blockQueriers, q) } @@ -2066,7 +2075,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer rh := NewRangeHead(db.head, mint, maxt) inOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for head %s", rh) + return nil, fmt.Errorf("open querier for head %s: %w", rh, err) } // Getting the querier above registers itself in the queue that the truncation waits on. @@ -2075,7 +2084,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt) if shouldClose { if err := inOrderHeadQuerier.Close(); err != nil { - return nil, errors.Wrapf(err, "closing head querier %s", rh) + return nil, fmt.Errorf("closing head querier %s: %w", rh, err) } inOrderHeadQuerier = nil } @@ -2083,7 +2092,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer rh := NewRangeHead(db.head, newMint, maxt) inOrderHeadQuerier, err = NewBlockChunkQuerier(rh, newMint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for head while getting new querier %s", rh) + return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err) } } @@ -2096,7 +2105,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) outOfOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open block chunk querier for ooo head %s", rh) + return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err) } blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) @@ -2105,7 +2114,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer for _, b := range blocks { q, err := NewBlockChunkQuerier(b, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for block %s", b) + return nil, fmt.Errorf("open querier for block %s: %w", b, err) } blockQueriers = append(blockQueriers, q) } @@ -2184,7 +2193,7 @@ func (db *DB) CleanTombstones() (err error) { for _, pb := range db.Blocks() { uid, safeToDelete, cleanErr := pb.CleanTombstones(db.Dir(), db.compactor) if cleanErr != nil { - return errors.Wrapf(cleanErr, "clean tombstones: %s", pb.Dir()) + return fmt.Errorf("clean tombstones: %s: %w", pb.Dir(), cleanErr) } if !safeToDelete { // There was nothing to clean. @@ -2212,7 +2221,10 @@ func (db *DB) CleanTombstones() (err error) { level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) } } - return errors.Wrap(err, "reload blocks") + if err != nil { + return fmt.Errorf("reload blocks: %w", err) + } + return nil } } return nil diff --git a/tsdb/db_test.go b/tsdb/db_test.go index d8dfeb5420..1bd62d8b27 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -34,7 +34,6 @@ import ( "github.com/go-kit/log" "github.com/oklog/ulid" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/require" @@ -134,7 +133,7 @@ func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[str result[name] = samples } require.NoError(t, ss.Err()) - require.Equal(t, 0, len(ss.Warnings())) + require.Empty(t, ss.Warnings()) return result } @@ -184,7 +183,7 @@ func queryChunks(t testing.TB, q storage.ChunkQuerier, matchers ...*labels.Match result[name] = chks } require.NoError(t, ss.Err()) - require.Equal(t, 0, len(ss.Warnings())) + require.Empty(t, ss.Warnings()) return result } @@ -207,7 +206,7 @@ func TestDB_reloadOrder(t *testing.T) { require.NoError(t, db.reloadBlocks()) blocks := db.Blocks() - require.Equal(t, 3, len(blocks)) + require.Len(t, blocks, 3) require.Equal(t, metas[1].MinTime, blocks[0].Meta().MinTime) require.Equal(t, metas[1].MaxTime, blocks[0].Meta().MaxTime) require.Equal(t, metas[0].MinTime, blocks[1].Meta().MinTime) @@ -362,7 +361,7 @@ func TestDBAppenderAddRef(t *testing.T) { // Missing labels & invalid refs should fail. _, err = app2.Append(9999999, labels.EmptyLabels(), 1, 1) - require.Equal(t, ErrInvalidSample, errors.Cause(err)) + require.ErrorIs(t, err, ErrInvalidSample) require.NoError(t, app2.Commit()) @@ -478,7 +477,7 @@ Outer: require.Equal(t, eok, rok) if !eok { - require.Equal(t, 0, len(res.Warnings())) + require.Empty(t, res.Warnings()) continue Outer } sexp := expss.At() @@ -526,7 +525,7 @@ func TestAmendHistogramDatapointCausesError(t *testing.T) { }, PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, } - fh := h.ToFloat() + fh := h.ToFloat(nil) app = db.Appender(ctx) _, err = app.AppendHistogram(0, labels.FromStrings("a", "c"), 0, h.Copy(), nil) @@ -686,7 +685,7 @@ func TestDB_Snapshot(t *testing.T) { require.NoError(t, series.Err()) } require.NoError(t, seriesSet.Err()) - require.Equal(t, 0, len(seriesSet.Warnings())) + require.Empty(t, seriesSet.Warnings()) require.Equal(t, 1000.0, sum) } @@ -735,7 +734,7 @@ func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) { require.NoError(t, series.Err()) } require.NoError(t, seriesSet.Err()) - require.Equal(t, 0, len(seriesSet.Warnings())) + require.Empty(t, seriesSet.Warnings()) // Since we snapshotted with MaxTime - 10, so expect 10 less samples. require.Equal(t, 1000.0-10, sum) @@ -811,7 +810,7 @@ Outer: require.Equal(t, eok, rok) if !eok { - require.Equal(t, 0, len(res.Warnings())) + require.Empty(t, res.Warnings()) continue Outer } sexp := expss.At() @@ -979,7 +978,7 @@ func TestDB_e2e(t *testing.T) { } require.NoError(t, ss.Err()) - require.Equal(t, 0, len(ss.Warnings())) + require.Empty(t, ss.Warnings()) require.Equal(t, expected, result) q.Close() @@ -1011,7 +1010,7 @@ func TestWALFlushedOnDBClose(t *testing.T) { values, ws, err := q.LabelValues(ctx, "labelname") require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, []string{"labelvalue"}, values) } @@ -1048,7 +1047,7 @@ func TestWALSegmentSizeOptions(t *testing.T) { files = append(files, fi) } } - require.Greater(t, len(files), 1, "current WALSegmentSize should result in more than a single WAL file.") + require.NotEmpty(t, files, "current WALSegmentSize should result in more than a single WAL file.") // All the full segment files (all but the last) should match the segment size option. for _, f := range files[:len(files)-1] { require.Equal(t, int64(segmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name()) @@ -1261,7 +1260,7 @@ func TestTombstoneClean(t *testing.T) { require.Equal(t, errExp, errRes) require.Equal(t, smplExp, smplRes) } - require.Equal(t, 0, len(res.Warnings())) + require.Empty(t, res.Warnings()) for _, b := range db.Blocks() { require.Equal(t, tombstones.NewMemTombstones(), b.tombstones) @@ -1309,7 +1308,7 @@ func TestTombstoneCleanResultEmptyBlock(t *testing.T) { // After cleaning tombstones that covers the entire block, no blocks should be left behind. actualBlockDirs, err := blockDirs(db.dir) require.NoError(t, err) - require.Equal(t, 0, len(actualBlockDirs)) + require.Empty(t, actualBlockDirs) } // TestTombstoneCleanFail tests that a failing TombstoneClean doesn't leave any blocks behind. @@ -1355,7 +1354,7 @@ func TestTombstoneCleanFail(t *testing.T) { require.NoError(t, err) // Only one block should have been replaced by a new block. require.Equal(t, len(oldBlockDirs), len(actualBlockDirs)) - require.Equal(t, len(intersection(oldBlockDirs, actualBlockDirs)), len(actualBlockDirs)-1) + require.Len(t, intersection(oldBlockDirs, actualBlockDirs), len(actualBlockDirs)-1) } // TestTombstoneCleanRetentionLimitsRace tests that a CleanTombstones operation @@ -1647,9 +1646,9 @@ func TestSizeRetention(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, actRetentionCount, "metric retention count mismatch") - require.Equal(t, actSize, expSize, "metric db size doesn't match actual disk size") + require.Equal(t, expSize, actSize, "metric db size doesn't match actual disk size") require.LessOrEqual(t, expSize, sizeLimit, "actual size (%v) is expected to be less than or equal to limit (%v)", expSize, sizeLimit) - require.Equal(t, len(blocks)-1, len(actBlocks), "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1) + require.Len(t, actBlocks, len(blocks)-1, "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1) require.Equal(t, expBlocks[0].MaxTime, actBlocks[0].meta.MaxTime, "maxT mismatch of the first block") require.Equal(t, expBlocks[len(expBlocks)-1].MaxTime, actBlocks[len(actBlocks)-1].meta.MaxTime, "maxT mismatch of the last block") } @@ -1673,7 +1672,7 @@ func TestSizeRetentionMetric(t *testing.T) { }() actMaxBytes := int64(prom_testutil.ToFloat64(db.metrics.maxBytes)) - require.Equal(t, actMaxBytes, c.expMaxBytes, "metric retention limit bytes mismatch") + require.Equal(t, c.expMaxBytes, actMaxBytes, "metric retention limit bytes mismatch") } } @@ -1746,7 +1745,7 @@ func TestNotMatcherSelectsLabelsUnsetSeries(t *testing.T) { ss := q.Select(ctx, false, nil, c.selector...) lres, _, ws, err := expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, c.series, lres) } } @@ -1779,7 +1778,7 @@ func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) { metas[i] = BlockMeta{MinTime: int64(i * 10), MaxTime: int64((i + 1) * 10)} } - require.Equal(t, 0, len(OverlappingBlocks(metas)), "we found unexpected overlaps") + require.Empty(t, OverlappingBlocks(metas), "we found unexpected overlaps") // Add overlapping blocks. We've to establish order again since we aren't interested // in trivial overlaps caused by unorderedness. @@ -2078,7 +2077,7 @@ func TestNoEmptyBlocks(t *testing.T) { actBlocks, err := blockDirs(db.Dir()) require.NoError(t, err) require.Equal(t, len(db.Blocks()), len(actBlocks)) - require.Equal(t, 0, len(actBlocks)) + require.Empty(t, actBlocks) require.Equal(t, 0, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "no compaction should be triggered here") }) @@ -2098,7 +2097,7 @@ func TestNoEmptyBlocks(t *testing.T) { actBlocks, err := blockDirs(db.Dir()) require.NoError(t, err) require.Equal(t, len(db.Blocks()), len(actBlocks)) - require.Equal(t, 0, len(actBlocks)) + require.Empty(t, actBlocks) app = db.Appender(ctx) _, err = app.Append(0, defaultLabel, 1, 0) @@ -2119,7 +2118,7 @@ func TestNoEmptyBlocks(t *testing.T) { actBlocks, err = blockDirs(db.Dir()) require.NoError(t, err) require.Equal(t, len(db.Blocks()), len(actBlocks)) - require.Equal(t, 1, len(actBlocks), "No blocks created when compacting with >0 samples") + require.Len(t, actBlocks, 1, "No blocks created when compacting with >0 samples") }) t.Run(`When no new block is created from head, and there are some blocks on disk @@ -2151,8 +2150,8 @@ func TestNoEmptyBlocks(t *testing.T) { } oldBlocks := db.Blocks() - require.NoError(t, db.reloadBlocks()) // Reload the db to register the new blocks. - require.Equal(t, len(blocks)+len(oldBlocks), len(db.Blocks())) // Ensure all blocks are registered. + require.NoError(t, db.reloadBlocks()) // Reload the db to register the new blocks. + require.Len(t, db.Blocks(), len(blocks)+len(oldBlocks)) // Ensure all blocks are registered. require.NoError(t, db.Delete(ctx, math.MinInt64, math.MaxInt64, defaultMatcher)) require.NoError(t, db.Compact(ctx)) require.Equal(t, 5, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here once for each block that have tombstones") @@ -2160,7 +2159,7 @@ func TestNoEmptyBlocks(t *testing.T) { actBlocks, err := blockDirs(db.Dir()) require.NoError(t, err) require.Equal(t, len(db.Blocks()), len(actBlocks)) - require.Equal(t, 1, len(actBlocks), "All samples are deleted. Only the most recent block should remain after compaction.") + require.Len(t, actBlocks, 1, "All samples are deleted. Only the most recent block should remain after compaction.") }) } @@ -2261,7 +2260,7 @@ func TestDB_LabelNames(t *testing.T) { var ws annotations.Annotations labelNames, ws, err = q.LabelNames(ctx) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.NoError(t, q.Close()) require.Equal(t, tst.exp2, labelNames) } @@ -2290,7 +2289,7 @@ func TestCorrectNumTombstones(t *testing.T) { err := db.Compact(ctx) require.NoError(t, err) - require.Equal(t, 1, len(db.blocks)) + require.Len(t, db.blocks, 1) require.NoError(t, db.Delete(ctx, 0, 1, defaultMatcher)) require.Equal(t, uint64(1), db.blocks[0].meta.Stats.NumTombstones) @@ -2353,7 +2352,7 @@ func TestBlockRanges(t *testing.T) { } time.Sleep(100 * time.Millisecond) } - require.Equal(t, 2, len(db.Blocks()), "no new block created after the set timeout") + require.Len(t, db.Blocks(), 2, "no new block created after the set timeout") if db.Blocks()[0].Meta().MaxTime > db.Blocks()[1].Meta().MinTime { t.Fatalf("new block overlaps old:%v,new:%v", db.Blocks()[0].Meta(), db.Blocks()[1].Meta()) @@ -2381,7 +2380,7 @@ func TestBlockRanges(t *testing.T) { require.NoError(t, err) defer db.Close() - require.Equal(t, 3, len(db.Blocks()), "db doesn't include expected number of blocks") + require.Len(t, db.Blocks(), 3, "db doesn't include expected number of blocks") require.Equal(t, db.Blocks()[2].Meta().MaxTime, thirdBlockMaxt, "unexpected maxt of the last block") app = db.Appender(ctx) @@ -2395,7 +2394,7 @@ func TestBlockRanges(t *testing.T) { time.Sleep(100 * time.Millisecond) } - require.Equal(t, 4, len(db.Blocks()), "no new block created after the set timeout") + require.Len(t, db.Blocks(), 4, "no new block created after the set timeout") if db.Blocks()[2].Meta().MaxTime > db.Blocks()[3].Meta().MinTime { t.Fatalf("new block overlaps old:%v,new:%v", db.Blocks()[2].Meta(), db.Blocks()[3].Meta()) @@ -2576,7 +2575,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) { defer func() { require.NoError(t, db.Close()) }() blocks, err := db.Blocks() require.NoError(t, err) - require.Equal(t, len(blocks), 1) + require.Len(t, blocks, 1) querier, err := db.Querier(0, int64(maxt)-1) require.NoError(t, err) @@ -2596,7 +2595,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) { require.NoError(t, series.Err()) } require.NoError(t, seriesSet.Err()) - require.Equal(t, 0, len(seriesSet.Warnings())) + require.Empty(t, seriesSet.Warnings()) require.Equal(t, 1000.0, sum) } @@ -2655,7 +2654,7 @@ func TestDBCannotSeePartialCommits(t *testing.T) { ss := querier.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) _, seriesSet, ws, err := expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) values := map[float64]struct{}{} for _, series := range seriesSet { @@ -2699,13 +2698,13 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) { ss := querierBeforeAdd.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) _, seriesSet, ws, err := expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, map[string][]sample{}, seriesSet) ss = querierAfterAddButBeforeCommit.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) _, seriesSet, ws, err = expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, map[string][]sample{}, seriesSet) // This commit is after the queriers are created, so should not be returned. @@ -2716,14 +2715,14 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) { ss = querierBeforeAdd.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) _, seriesSet, ws, err = expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, map[string][]sample{}, seriesSet) // Series exists but has no samples for querier created after Add. ss = querierAfterAddButBeforeCommit.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) _, seriesSet, ws, err = expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, map[string][]sample{`{foo="bar"}`: {}}, seriesSet) querierAfterCommit, err := db.Querier(0, 1000000) @@ -2734,7 +2733,7 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) { ss = querierAfterCommit.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) _, seriesSet, ws, err = expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, map[string][]sample{`{foo="bar"}`: {{t: 0, f: 0}}}, seriesSet) } @@ -2881,7 +2880,7 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) { files, err := os.ReadDir(tempDir) require.NoError(t, err) - require.Equal(t, test.expSegmentsCount, len(files), "expected segments count mismatch") + require.Len(t, files, test.expSegmentsCount, "expected segments count mismatch") // Verify that all data is written to the segments. sizeExp := 0 @@ -3031,7 +3030,7 @@ func TestCompactHead(t *testing.T) { require.NoError(t, deleteNonBlocks(db.Dir())) db, err = Open(dbDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) require.NoError(t, err) - require.Equal(t, 1, len(db.Blocks())) + require.Len(t, db.Blocks(), 1) require.Equal(t, int64(maxt), db.Head().MinTime()) defer func() { require.NoError(t, db.Close()) }() querier, err := db.Querier(0, int64(maxt)-1) @@ -3193,7 +3192,7 @@ func TestOpen_VariousBlockStates(t *testing.T) { } loaded++ } - require.Equal(t, len(expectedLoadedDirs), loaded) + require.Len(t, expectedLoadedDirs, loaded) require.NoError(t, db.Close()) files, err := os.ReadDir(tmpDir) @@ -3208,7 +3207,7 @@ func TestOpen_VariousBlockStates(t *testing.T) { ignored++ } } - require.Equal(t, len(expectedIgnoredDirs), ignored) + require.Len(t, expectedIgnoredDirs, ignored) _, err = os.Stat(tmpCheckpointDir) require.True(t, os.IsNotExist(err)) _, err = os.Stat(tmpChunkSnapshotDir) @@ -3261,7 +3260,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) { require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.checkpointCreationTotal)) // As the data spans for 59 blocks, 58 go to disk and 1 remains in Head. - require.Equal(t, 58, len(db.Blocks())) + require.Len(t, db.Blocks(), 58) // Though WAL was truncated only once, head should be truncated after each compaction. require.Equal(t, 58.0, prom_testutil.ToFloat64(db.head.metrics.headTruncateTotal)) @@ -3294,7 +3293,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) { db.DisableCompactions() // 1 block more. - require.Equal(t, 59, len(db.Blocks())) + require.Len(t, db.Blocks(), 59) // No series in Head because of this new block. require.Equal(t, 0, int(db.head.NumSeries())) @@ -3319,7 +3318,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) { require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.checkpointCreationTotal)) // No new blocks should be created as there was not data in between the new samples and the blocks. - require.Equal(t, 59, len(db.Blocks())) + require.Len(t, db.Blocks(), 59) // The compaction should have only truncated first 2/3 of WAL (while also rotating the files). first, last, err = wlog.Segments(db.head.wal.Dir()) @@ -3456,7 +3455,7 @@ func testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t iterators = append(iterators, it) } require.NoError(t, seriesSet.Err()) - require.Equal(t, actualSeries, numSeries) + require.Equal(t, numSeries, actualSeries) // Compact the TSDB head again. require.NoError(t, db.Compact(ctx)) @@ -3590,7 +3589,7 @@ func testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChun } } require.NoError(t, seriesSet.Err()) - require.Equal(t, actualSeries, numSeries) + require.Equal(t, numSeries, actualSeries) // Compact the TSDB head again. require.NoError(t, db.Compact(ctx)) @@ -4113,9 +4112,9 @@ func TestDBPanicOnMmappingHeadChunk(t *testing.T) { numSamples := int(170*time.Minute/time.Millisecond) / int(itvl) addSamples(numSamples) - require.Len(t, db.Blocks(), 0) + require.Empty(t, db.Blocks()) require.NoError(t, db.Compact(ctx)) - require.Len(t, db.Blocks(), 0) + require.Empty(t, db.Blocks()) // Restarting. require.NoError(t, db.Close()) @@ -4128,7 +4127,7 @@ func TestDBPanicOnMmappingHeadChunk(t *testing.T) { numSamples = int(20*time.Minute/time.Millisecond) / int(itvl) addSamples(numSamples) - require.Len(t, db.Blocks(), 0) + require.Empty(t, db.Blocks()) require.NoError(t, db.Compact(ctx)) require.Len(t, db.Blocks(), 1) @@ -4474,13 +4473,13 @@ func TestOOOCompaction(t *testing.T) { require.NoError(t, err) require.False(t, created) require.Greater(t, ms.ooo.oooHeadChunk.chunk.NumSamples(), 0) - require.Equal(t, 14, len(ms.ooo.oooMmappedChunks)) // 7 original, 7 duplicate. + require.Len(t, ms.ooo.oooMmappedChunks, 14) // 7 original, 7 duplicate. } checkNonEmptyOOOChunk(series1) checkNonEmptyOOOChunk(series2) // No blocks before compaction. - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) // There is a 0th WBL file. require.NoError(t, db.head.wbl.Sync()) // syncing to make sure wbl is flushed in windows @@ -4496,7 +4495,7 @@ func TestOOOCompaction(t *testing.T) { require.NoError(t, db.CompactOOOHead(ctx)) // 3 blocks exist now. [0, 120), [120, 240), [240, 360) - require.Equal(t, len(db.Blocks()), 3) + require.Len(t, db.Blocks(), 3) verifyDBSamples() // Blocks created out of OOO head now. @@ -4548,7 +4547,7 @@ func TestOOOCompaction(t *testing.T) { // Since this is a forced compaction, this block is not aligned with 2h. err = db.CompactHead(NewRangeHead(db.head, 250*time.Minute.Milliseconds(), 350*time.Minute.Milliseconds())) require.NoError(t, err) - require.Equal(t, len(db.Blocks()), 4) // [0, 120), [120, 240), [240, 360), [250, 351) + require.Len(t, db.Blocks(), 4) // [0, 120), [120, 240), [240, 360), [250, 351) verifySamples(db.Blocks()[3], 250, 350) verifyDBSamples() // Blocks created out of normal and OOO head now. But not merged. @@ -4563,7 +4562,7 @@ func TestOOOCompaction(t *testing.T) { // This will merge overlapping block. require.NoError(t, db.Compact(ctx)) - require.Equal(t, len(db.Blocks()), 3) // [0, 120), [120, 240), [240, 360) + require.Len(t, db.Blocks(), 3) // [0, 120), [120, 240), [240, 360) verifySamples(db.Blocks()[0], 90, 119) verifySamples(db.Blocks()[1], 120, 239) verifySamples(db.Blocks()[2], 240, 350) // Merged block. @@ -4619,19 +4618,19 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) { // If the normal Head is not compacted, the OOO head compaction does not take place. require.NoError(t, db.Compact(ctx)) - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) // Add more in-order samples in future that would trigger the compaction. addSamples(400, 450) // No blocks before compaction. - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) // Compacts normal and OOO head. require.NoError(t, db.Compact(ctx)) // 2 blocks exist now. [0, 120), [250, 360) - require.Equal(t, len(db.Blocks()), 2) + require.Len(t, db.Blocks(), 2) require.Equal(t, int64(0), db.Blocks()[0].MinTime()) require.Equal(t, 120*time.Minute.Milliseconds(), db.Blocks()[0].MaxTime()) require.Equal(t, 250*time.Minute.Milliseconds(), db.Blocks()[1].MinTime()) @@ -4720,19 +4719,19 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) { // If the normal Head is not compacted, the OOO head compaction does not take place. require.NoError(t, db.Compact(ctx)) - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) // Add more in-order samples in future that would trigger the compaction. addSamples(400, 450) // No blocks before compaction. - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) // Compacts normal and OOO head. require.NoError(t, db.Compact(ctx)) // 2 blocks exist now. [0, 120), [250, 360) - require.Equal(t, len(db.Blocks()), 2) + require.Len(t, db.Blocks(), 2) require.Equal(t, int64(0), db.Blocks()[0].MinTime()) require.Equal(t, 120*time.Minute.Milliseconds(), db.Blocks()[0].MaxTime()) require.Equal(t, 250*time.Minute.Milliseconds(), db.Blocks()[1].MinTime()) @@ -4816,7 +4815,7 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) { ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) require.NoError(t, err) require.False(t, created) - require.Equal(t, 2, len(ms.ooo.oooMmappedChunks)) + require.Len(t, ms.ooo.oooMmappedChunks, 2) require.NotNil(t, ms.ooo.oooHeadChunk) } @@ -4835,7 +4834,7 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) { ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) require.NoError(t, err) require.False(t, created) - require.Equal(t, 2, len(ms.ooo.oooMmappedChunks)) + require.Len(t, ms.ooo.oooMmappedChunks, 2) require.Equal(t, 109*time.Minute.Milliseconds(), ms.ooo.oooMmappedChunks[1].maxTime) require.Nil(t, ms.ooo.oooHeadChunk) // Because of missing wbl. } @@ -4864,9 +4863,9 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) { verifySamples(90, 109) // Compaction should also work fine. - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) require.NoError(t, db.CompactOOOHead(ctx)) - require.Equal(t, len(db.Blocks()), 1) // One block from OOO data. + require.Len(t, db.Blocks(), 1) // One block from OOO data. require.Equal(t, int64(0), db.Blocks()[0].MinTime()) require.Equal(t, 120*time.Minute.Milliseconds(), db.Blocks()[0].MaxTime()) @@ -4885,7 +4884,6 @@ func Test_Querier_OOOQuery(t *testing.T) { opts := DefaultOptions() opts.OutOfOrderCapMax = 30 opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds() - opts.AllowOverlappingCompaction = false series1 := labels.FromStrings("foo", "bar1") @@ -4959,7 +4957,7 @@ func Test_Querier_OOOQuery(t *testing.T) { seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")) require.NotNil(t, seriesSet[series1.String()]) - require.Equal(t, 1, len(seriesSet)) + require.Len(t, seriesSet, 1) require.Equal(t, expSamples, seriesSet[series1.String()]) require.GreaterOrEqual(t, float64(oooSamples), prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended), "number of ooo appended samples mismatch") }) @@ -4970,7 +4968,6 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { opts := DefaultOptions() opts.OutOfOrderCapMax = 30 opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds() - opts.AllowOverlappingCompaction = false series1 := labels.FromStrings("foo", "bar1") @@ -5044,7 +5041,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { chks := queryChunks(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")) require.NotNil(t, chks[series1.String()]) - require.Equal(t, 1, len(chks)) + require.Len(t, chks, 1) require.Equal(t, float64(oooSamples), prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended), "number of ooo appended samples mismatch") var gotSamples []chunks.Sample for _, chunk := range chks[series1.String()] { @@ -5319,7 +5316,7 @@ func TestWBLAndMmapReplay(t *testing.T) { s1MmapSamples = append(s1MmapSamples, sample{t: ts, f: val}) } } - require.Greater(t, len(s1MmapSamples), 0) + require.NotEmpty(t, s1MmapSamples) require.NoError(t, db.Close()) @@ -5468,7 +5465,7 @@ func TestOOOCompactionFailure(t *testing.T) { addSample(90, 310) // No blocks before compaction. - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) // There is a 0th WBL file. verifyFirstWBLFileIs0 := func(count int) { @@ -5501,7 +5498,7 @@ func TestOOOCompactionFailure(t *testing.T) { for i := 0; i < 5; i++ { require.Error(t, db.CompactOOOHead(ctx)) } - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) // M-map files don't change after failed compaction. verifyMmapFiles("000001") @@ -5512,7 +5509,7 @@ func TestOOOCompactionFailure(t *testing.T) { db.compactor = originalCompactor require.NoError(t, db.CompactOOOHead(ctx)) oldBlocks := db.Blocks() - require.Equal(t, len(db.Blocks()), 3) + require.Len(t, db.Blocks(), 3) // Check that the ooo chunks were removed. ms, created, err := db.head.getOrCreate(series1.Hash(), series1) @@ -5523,7 +5520,7 @@ func TestOOOCompactionFailure(t *testing.T) { // The failed compaction should not have left the ooo Head corrupted. // Hence, expect no new blocks with another OOO compaction call. require.NoError(t, db.CompactOOOHead(ctx)) - require.Equal(t, len(db.Blocks()), 3) + require.Len(t, db.Blocks(), 3) require.Equal(t, oldBlocks, db.Blocks()) // There should be a single m-map file @@ -5565,7 +5562,7 @@ func TestOOOCompactionFailure(t *testing.T) { // Since this is a forced compaction, this block is not aligned with 2h. err = db.CompactHead(NewRangeHead(db.head, 250*time.Minute.Milliseconds(), 350*time.Minute.Milliseconds())) require.NoError(t, err) - require.Equal(t, len(db.Blocks()), 4) // [0, 120), [120, 240), [240, 360), [250, 351) + require.Len(t, db.Blocks(), 4) // [0, 120), [120, 240), [240, 360), [250, 351) verifySamples(db.Blocks()[3], 250, 350) // The compaction also clears out the old m-map files. Including @@ -5910,9 +5907,9 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { require.NoError(t, err) require.Greater(t, size, int64(0)) - require.Len(t, db.Blocks(), 0) + require.Empty(t, db.Blocks()) require.NoError(t, db.compactOOOHead(ctx)) - require.Greater(t, len(db.Blocks()), 0) + require.NotEmpty(t, db.Blocks()) // WBL is empty. size, err = db.head.wbl.Size() @@ -5932,7 +5929,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { // OOO of 59m old fails. s := addSamples(t, db, 251, 260, false, nil) - require.Len(t, s, 0) + require.Empty(t, s) verifySamples(t, db, allSamples) oldWblPtr := fmt.Sprintf("%p", db.head.wbl) @@ -5967,7 +5964,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { // OOO of 49m old fails. s := addSamples(t, db, 261, 270, false, nil) - require.Len(t, s, 0) + require.Empty(t, s) // WBL does not change. newWblPtr := fmt.Sprintf("%p", db.head.wbl) @@ -5998,7 +5995,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { // OOO fails. s := addSamples(t, db, 251, 260, false, nil) - require.Len(t, s, 0) + require.Empty(t, s) verifySamples(t, db, allSamples) require.Nil(t, db.head.wbl) @@ -6035,7 +6032,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { // OOO within old time window fails. s := addSamples(t, db, 290, 309, false, nil) - require.Len(t, s, 0) + require.Empty(t, s) // WBL does not change and is not removed. newWblPtr := fmt.Sprintf("%p", db.head.wbl) @@ -6057,7 +6054,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { // OOO fails. s := addSamples(t, db, 290, 309, false, nil) - require.Len(t, s, 0) + require.Empty(t, s) verifySamples(t, db, allSamples) require.Nil(t, db.head.wbl) @@ -6067,7 +6064,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { // OOO still fails. s = addSamples(t, db, 290, 309, false, nil) - require.Len(t, s, 0) + require.Empty(t, s) verifySamples(t, db, allSamples) require.Nil(t, db.head.wbl) }) @@ -6327,7 +6324,7 @@ func TestDiskFillingUpAfterDisablingOOO(t *testing.T) { db.DisableCompactions() ms := db.head.series.getByHash(series1.Hash(), series1) - require.Greater(t, len(ms.ooo.oooMmappedChunks), 0, "OOO mmap chunk was not replayed") + require.NotEmpty(t, ms.ooo.oooMmappedChunks, "OOO mmap chunk was not replayed") checkMmapFileContents := func(contains, notContains []string) { mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot) @@ -6399,8 +6396,8 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { var err error app := db.Appender(ctx) if floatHistogram { - _, err = app.AppendHistogram(0, lbls, minute(tsMinute), nil, h.ToFloat()) - efh := h.ToFloat() + _, err = app.AppendHistogram(0, lbls, minute(tsMinute), nil, h.ToFloat(nil)) + efh := h.ToFloat(nil) efh.CounterResetHint = expCRH *exp = append(*exp, sample{t: minute(tsMinute), fh: efh}) } else { @@ -6654,7 +6651,6 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { t.Helper() opts := DefaultOptions() - opts.AllowOverlappingCompaction = true // TODO(jesusvazquez): This replaced AllowOverlappingBlocks, make sure that works. db := openTestDB(t, opts, nil) t.Cleanup(func() { require.NoError(t, db.Close()) @@ -6691,7 +6687,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { } } - require.Len(t, db.Blocks(), 0) + require.Empty(t, db.Blocks()) require.NoError(t, db.reload()) require.Len(t, db.Blocks(), len(blockSeries)) @@ -6821,20 +6817,20 @@ func TestNativeHistogramFlag(t *testing.T) { // Disabled by default. _, err = app.AppendHistogram(0, l, 100, h, nil) require.Equal(t, storage.ErrNativeHistogramsDisabled, err) - _, err = app.AppendHistogram(0, l, 105, nil, h.ToFloat()) + _, err = app.AppendHistogram(0, l, 105, nil, h.ToFloat(nil)) require.Equal(t, storage.ErrNativeHistogramsDisabled, err) // Enable and append. db.EnableNativeHistograms() _, err = app.AppendHistogram(0, l, 200, h, nil) require.NoError(t, err) - _, err = app.AppendHistogram(0, l, 205, nil, h.ToFloat()) + _, err = app.AppendHistogram(0, l, 205, nil, h.ToFloat(nil)) require.NoError(t, err) db.DisableNativeHistograms() _, err = app.AppendHistogram(0, l, 300, h, nil) require.Equal(t, storage.ErrNativeHistogramsDisabled, err) - _, err = app.AppendHistogram(0, l, 305, nil, h.ToFloat()) + _, err = app.AppendHistogram(0, l, 305, nil, h.ToFloat(nil)) require.Equal(t, storage.ErrNativeHistogramsDisabled, err) require.NoError(t, app.Commit()) @@ -6843,7 +6839,7 @@ func TestNativeHistogramFlag(t *testing.T) { require.NoError(t, err) act := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) require.Equal(t, map[string][]chunks.Sample{ - l.String(): {sample{t: 200, h: h}, sample{t: 205, fh: h.ToFloat()}}, + l.String(): {sample{t: 200, h: h}, sample{t: 205, fh: h.ToFloat(nil)}}, }, act) } diff --git a/tsdb/errors/errors.go b/tsdb/errors/errors.go index 6a8e72f049..ff230c44b1 100644 --- a/tsdb/errors/errors.go +++ b/tsdb/errors/errors.go @@ -25,7 +25,7 @@ import ( type multiError []error // NewMulti returns multiError with provided errors added if not nil. -func NewMulti(errs ...error) multiError { +func NewMulti(errs ...error) multiError { //nolint:revive // unexported-return. m := multiError{} m.Add(errs...) return m diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 8eaf42653c..805de70da3 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -15,6 +15,7 @@ package tsdb import ( "context" + "errors" "sync" "unicode/utf8" @@ -363,7 +364,7 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp err := ce.validateExemplar(seriesLabels, e, true) if err != nil { - if err == storage.ErrDuplicateExemplar { + if errors.Is(err, storage.ErrDuplicateExemplar) { // Duplicate exemplar, noop. return nil } diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go index 64121cbfc5..24b46e0fae 100644 --- a/tsdb/exemplar_test.go +++ b/tsdb/exemplar_test.go @@ -88,7 +88,7 @@ func TestAddExemplar(t *testing.T) { } require.NoError(t, es.AddExemplar(l, e)) - require.Equal(t, es.index[string(l.Bytes(nil))].newest, 0, "exemplar was not stored correctly") + require.Equal(t, 0, es.index[string(l.Bytes(nil))].newest, "exemplar was not stored correctly") e2 := exemplar.Exemplar{ Labels: labels.FromStrings("traceID", "zxcvb"), @@ -97,7 +97,7 @@ func TestAddExemplar(t *testing.T) { } require.NoError(t, es.AddExemplar(l, e2)) - require.Equal(t, es.index[string(l.Bytes(nil))].newest, 1, "exemplar was not stored correctly, location of newest exemplar for series in index did not update") + require.Equal(t, 1, es.index[string(l.Bytes(nil))].newest, "exemplar was not stored correctly, location of newest exemplar for series in index did not update") require.True(t, es.exemplars[es.index[string(l.Bytes(nil))].newest].exemplar.Equals(e2), "exemplar was not stored correctly, expected %+v got: %+v", e2, es.exemplars[es.index[string(l.Bytes(nil))].newest].exemplar) require.NoError(t, es.AddExemplar(l, e2), "no error is expected attempting to add duplicate exemplar") @@ -145,7 +145,7 @@ func TestStorageOverflow(t *testing.T) { require.NoError(t, err, "error creating label matcher for exemplar query") ret, err := es.Select(100, 110, []*labels.Matcher{m}) require.NoError(t, err) - require.True(t, len(ret) == 1, "select should have returned samples for a single series only") + require.Len(t, ret, 1, "select should have returned samples for a single series only") require.True(t, reflect.DeepEqual(eList[1:], ret[0].Exemplars), "select did not return expected exemplars\n\texpected: %+v\n\tactual: %+v\n", eList[1:], ret[0].Exemplars) } @@ -171,7 +171,7 @@ func TestSelectExemplar(t *testing.T) { require.NoError(t, err, "error creating label matcher for exemplar query") ret, err := es.Select(0, 100, []*labels.Matcher{m}) require.NoError(t, err) - require.True(t, len(ret) == 1, "select should have returned samples for a single series only") + require.Len(t, ret, 1, "select should have returned samples for a single series only") expectedResult := []exemplar.Exemplar{e} require.True(t, reflect.DeepEqual(expectedResult, ret[0].Exemplars), "select did not return expected exemplars\n\texpected: %+v\n\tactual: %+v\n", expectedResult, ret[0].Exemplars) @@ -209,15 +209,15 @@ func TestSelectExemplar_MultiSeries(t *testing.T) { require.NoError(t, err, "error creating label matcher for exemplar query") ret, err := es.Select(100, 200, []*labels.Matcher{m}) require.NoError(t, err) - require.True(t, len(ret) == 1, "select should have returned samples for a single series only") - require.True(t, len(ret[0].Exemplars) == 3, "didn't get expected 8 exemplars, got %d", len(ret[0].Exemplars)) + require.Len(t, ret, 1, "select should have returned samples for a single series only") + require.Len(t, ret[0].Exemplars, 3, "didn't get expected 8 exemplars, got %d", len(ret[0].Exemplars)) m, err = labels.NewMatcher(labels.MatchEqual, labels.MetricName, l1Name) require.NoError(t, err, "error creating label matcher for exemplar query") ret, err = es.Select(100, 200, []*labels.Matcher{m}) require.NoError(t, err) - require.True(t, len(ret) == 1, "select should have returned samples for a single series only") - require.True(t, len(ret[0].Exemplars) == 2, "didn't get expected 8 exemplars, got %d", len(ret[0].Exemplars)) + require.Len(t, ret, 1, "select should have returned samples for a single series only") + require.Len(t, ret[0].Exemplars, 2, "didn't get expected 8 exemplars, got %d", len(ret[0].Exemplars)) } func TestSelectExemplar_TimeRange(t *testing.T) { @@ -243,8 +243,8 @@ func TestSelectExemplar_TimeRange(t *testing.T) { require.NoError(t, err, "error creating label matcher for exemplar query") ret, err := es.Select(102, 104, []*labels.Matcher{m}) require.NoError(t, err) - require.True(t, len(ret) == 1, "select should have returned samples for a single series only") - require.True(t, len(ret[0].Exemplars) == 3, "didn't get expected two exemplars %d, %+v", len(ret[0].Exemplars), ret) + require.Len(t, ret, 1, "select should have returned samples for a single series only") + require.Len(t, ret[0].Exemplars, 3, "didn't get expected two exemplars %d, %+v", len(ret[0].Exemplars), ret) } // Test to ensure that even though a series matches more than one matcher from the @@ -281,7 +281,7 @@ func TestSelectExemplar_DuplicateSeries(t *testing.T) { ret, err := es.Select(0, 100, m...) require.NoError(t, err) - require.True(t, len(ret) == 1, "select should have returned samples for a single series only") + require.Len(t, ret, 1, "select should have returned samples for a single series only") } func TestIndexOverwrite(t *testing.T) { diff --git a/tsdb/head.go b/tsdb/head.go index 7d3b759e08..279a086c06 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -15,18 +15,19 @@ package tsdb import ( "context" + "errors" "fmt" "io" "math" "path/filepath" "runtime" + "strconv" "sync" "time" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" @@ -124,7 +125,8 @@ type Head struct { cardinalityMutex sync.Mutex cardinalityCache *index.PostingsStats // Posting stats cache which will expire after 30sec. - lastPostingsStatsCall time.Duration // Last posting stats call (PostingsCardinalityStats()) time for caching. + cardinalityCacheKey string + lastPostingsStatsCall time.Duration // Last posting stats call (PostingsCardinalityStats()) time for caching. // chunkDiskMapper is used to write and read Head chunks to/from disk. chunkDiskMapper chunkDiskMapper @@ -163,6 +165,10 @@ type HeadOptions struct { // EnableNativeHistograms enables the ingestion of native histograms. EnableNativeHistograms atomic.Bool + // EnableCreatedTimestampZeroIngestion enables the ingestion of the created timestamp as a synthetic zero sample. + // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md + EnableCreatedTimestampZeroIngestion bool + ChunkRange int64 // ChunkDirRoot is the parent directory of the chunks directory. ChunkDirRoot string @@ -658,11 +664,11 @@ func (h *Head) Init(minValidTime int64) error { if h.wal != nil { _, endAt, err := wlog.Segments(h.wal.Dir()) if err != nil { - return errors.Wrap(err, "finding WAL segments") + return fmt.Errorf("finding WAL segments: %w", err) } _, idx, _, err := LastChunkSnapshot(h.opts.ChunkDirRoot) - if err != nil && err != record.ErrNotFound { + if err != nil && !errors.Is(err, record.ErrNotFound) { level.Error(h.logger).Log("msg", "Could not find last snapshot", "err", err) } @@ -709,7 +715,8 @@ func (h *Head) Init(minValidTime int64) error { if err != nil { // TODO(codesome): clear out all m-map chunks here for refSeries. level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err) - if _, ok := errors.Cause(err).(*chunks.CorruptionErr); ok { + var cerr *chunks.CorruptionErr + if errors.As(err, &cerr) { h.metrics.mmapChunkCorruptionTotal.Inc() } @@ -736,14 +743,14 @@ func (h *Head) Init(minValidTime int64) error { checkpointReplayStart := time.Now() // Backfill the checkpoint first if it exists. dir, startFrom, err := wlog.LastCheckpoint(h.wal.Dir()) - if err != nil && err != record.ErrNotFound { - return errors.Wrap(err, "find last checkpoint") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return fmt.Errorf("find last checkpoint: %w", err) } // Find the last segment. _, endAt, e := wlog.Segments(h.wal.Dir()) if e != nil { - return errors.Wrap(e, "finding WAL segments") + return fmt.Errorf("finding WAL segments: %w", e) } h.startWALReplayStatus(startFrom, endAt) @@ -752,7 +759,7 @@ func (h *Head) Init(minValidTime int64) error { if err == nil && startFrom >= snapIdx { sr, err := wlog.NewSegmentsReader(dir) if err != nil { - return errors.Wrap(err, "open checkpoint") + return fmt.Errorf("open checkpoint: %w", err) } defer func() { if err := sr.Close(); err != nil { @@ -763,7 +770,7 @@ func (h *Head) Init(minValidTime int64) error { // A corrupted checkpoint is a hard error for now and requires user // intervention. There's likely little data that can be recovered anyway. if err := h.loadWAL(wlog.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks); err != nil { - return errors.Wrap(err, "backfill checkpoint") + return fmt.Errorf("backfill checkpoint: %w", err) } h.updateWALReplayStatusRead(startFrom) startFrom++ @@ -780,7 +787,7 @@ func (h *Head) Init(minValidTime int64) error { for i := startFrom; i <= endAt; i++ { s, err := wlog.OpenReadSegment(wlog.SegmentName(h.wal.Dir(), i)) if err != nil { - return errors.Wrap(err, fmt.Sprintf("open WAL segment: %d", i)) + return fmt.Errorf("open WAL segment: %d: %w", i, err) } offset := 0 @@ -793,7 +800,7 @@ func (h *Head) Init(minValidTime int64) error { continue } if err != nil { - return errors.Wrapf(err, "segment reader (offset=%d)", offset) + return fmt.Errorf("segment reader (offset=%d): %w", offset, err) } err = h.loadWAL(wlog.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks) if err := sr.Close(); err != nil { @@ -812,14 +819,14 @@ func (h *Head) Init(minValidTime int64) error { // Replay WBL. startFrom, endAt, e = wlog.Segments(h.wbl.Dir()) if e != nil { - return &errLoadWbl{errors.Wrap(e, "finding WBL segments")} + return &errLoadWbl{fmt.Errorf("finding WBL segments: %w", e)} } h.startWALReplayStatus(startFrom, endAt) for i := startFrom; i <= endAt; i++ { s, err := wlog.OpenReadSegment(wlog.SegmentName(h.wbl.Dir(), i)) if err != nil { - return &errLoadWbl{errors.Wrap(err, fmt.Sprintf("open WBL segment: %d", i))} + return &errLoadWbl{fmt.Errorf("open WBL segment: %d: %w", i, err)} } sr := wlog.NewSegmentBufReader(s) @@ -940,7 +947,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) return nil }); err != nil { // secondLastRef because the lastRef caused an error. - return nil, nil, secondLastRef, errors.Wrap(err, "iterate on on-disk chunks") + return nil, nil, secondLastRef, fmt.Errorf("iterate on on-disk chunks: %w", err) } return mmappedChunks, oooMmappedChunks, lastRef, nil } @@ -1029,16 +1036,23 @@ func (h *Head) DisableNativeHistograms() { // PostingsCardinalityStats returns highest cardinality stats by label and value names. func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats { + cacheKey := statsByLabelName + ";" + strconv.Itoa(limit) + h.cardinalityMutex.Lock() defer h.cardinalityMutex.Unlock() - currentTime := time.Duration(time.Now().Unix()) * time.Second - seconds := currentTime - h.lastPostingsStatsCall - if seconds > cardinalityCacheExpirationTime { + if h.cardinalityCacheKey != cacheKey { h.cardinalityCache = nil + } else { + currentTime := time.Duration(time.Now().Unix()) * time.Second + seconds := currentTime - h.lastPostingsStatsCall + if seconds > cardinalityCacheExpirationTime { + h.cardinalityCache = nil + } } if h.cardinalityCache != nil { return h.cardinalityCache } + h.cardinalityCacheKey = cacheKey h.cardinalityCache = h.postings.Stats(statsByLabelName, limit) h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second @@ -1252,12 +1266,12 @@ func (h *Head) truncateWAL(mint int64) error { first, last, err := wlog.Segments(h.wal.Dir()) if err != nil { - return errors.Wrap(err, "get segment range") + return fmt.Errorf("get segment range: %w", err) } // Start a new segment, so low ingestion volume TSDB don't have more WAL than // needed. if _, err := h.wal.NextSegment(); err != nil { - return errors.Wrap(err, "next segment") + return fmt.Errorf("next segment: %w", err) } last-- // Never consider last segment for checkpoint. if last < 0 { @@ -1284,10 +1298,11 @@ func (h *Head) truncateWAL(mint int64) error { h.metrics.checkpointCreationTotal.Inc() if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, keep, mint); err != nil { h.metrics.checkpointCreationFail.Inc() - if _, ok := errors.Cause(err).(*wlog.CorruptionErr); ok { + var cerr *chunks.CorruptionErr + if errors.As(err, &cerr) { h.metrics.walCorruptionsTotal.Inc() } - return errors.Wrap(err, "create checkpoint") + return fmt.Errorf("create checkpoint: %w", err) } if err := h.wal.Truncate(last + 1); err != nil { // If truncating fails, we'll just try again at the next checkpoint. @@ -1380,7 +1395,7 @@ func (h *Head) truncateSeriesAndChunkDiskMapper(caller string) error { // Truncate the chunk m-mapper. if err := h.chunkDiskMapper.Truncate(uint32(minMmapFile)); err != nil { - return errors.Wrap(err, "truncate chunks.HeadReadWriter by file number") + return fmt.Errorf("truncate chunks.HeadReadWriter by file number: %w", err) } return nil } @@ -1495,13 +1510,13 @@ func (h *Head) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Match p, err := ir.PostingsForMatchers(ctx, false, ms...) if err != nil { - return errors.Wrap(err, "select series") + return fmt.Errorf("select series: %w", err) } var stones []tombstones.Stone for p.Next() { if err := ctx.Err(); err != nil { - return errors.Wrap(err, "select series") + return fmt.Errorf("select series: %w", err) } series := h.series.getByID(chunks.HeadSeriesRef(p.At())) @@ -1523,8 +1538,8 @@ func (h *Head) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Match if p.Err() != nil { return p.Err() } - if ctx.Err() != nil { - return errors.Wrap(err, "select series") + if err := ctx.Err(); err != nil { + return fmt.Errorf("select series: %w", err) } if h.wal != nil { diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 779e28f236..9f058f0638 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -15,11 +15,11 @@ package tsdb import ( "context" + "errors" "fmt" "math" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -87,6 +87,17 @@ func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m return a.app.UpdateMetadata(ref, l, m) } +func (a *initAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64) (storage.SeriesRef, error) { + if a.app != nil { + return a.app.AppendCTZeroSample(ref, lset, t, ct) + } + + a.head.initTime(t) + a.app = a.head.appender() + + return a.app.AppendCTZeroSample(ref, lset, t, ct) +} + // initTime initializes a head with the first timestamp. This only needs to be called // for a completely fresh head with an empty WAL. func (h *Head) initTime(t int64) { @@ -326,28 +337,11 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) if s == nil { - // Ensure no empty labels have gotten through. - lset = lset.WithoutEmpty() - if lset.IsEmpty() { - return 0, errors.Wrap(ErrInvalidSample, "empty labelset") - } - - if l, dup := lset.HasDuplicateLabelNames(); dup { - return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l)) - } - - var created bool var err error - s, created, err = a.head.getOrCreate(lset.Hash(), lset) + s, err = a.getOrCreate(lset) if err != nil { return 0, err } - if created { - a.series = append(a.series, record.RefSeries{ - Ref: s.ref, - Labels: lset, - }) - } } if value.IsStaleNaN(v) { @@ -371,10 +365,10 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 a.head.metrics.oooHistogram.Observe(float64(delta) / 1000) } if err != nil { - switch err { - case storage.ErrOutOfOrderSample: + switch { + case errors.Is(err, storage.ErrOutOfOrderSample): a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Inc() - case storage.ErrTooOldSample: + case errors.Is(err, storage.ErrTooOldSample): a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Inc() } return 0, err @@ -396,6 +390,71 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 return storage.SeriesRef(s.ref), nil } +// AppendCTZeroSample appends synthetic zero sample for ct timestamp. It returns +// error when sample can't be appended. See +// storage.CreatedTimestampAppender.AppendCTZeroSample for further documentation. +func (a *headAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64) (storage.SeriesRef, error) { + if ct >= t { + return 0, fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") + } + + s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) + if s == nil { + var err error + s, err = a.getOrCreate(lset) + if err != nil { + return 0, err + } + } + + // Check if CT wouldn't be OOO vs samples we already might have for this series. + // NOTE(bwplotka): This will be often hit as it's expected for long living + // counters to share the same CT. + s.Lock() + isOOO, _, err := s.appendable(ct, 0, a.headMaxt, a.minValidTime, a.oooTimeWindow) + if err == nil { + s.pendingCommit = true + } + s.Unlock() + if err != nil { + return 0, err + } + if isOOO { + return storage.SeriesRef(s.ref), storage.ErrOutOfOrderCT + } + + if ct > a.maxt { + a.maxt = ct + } + a.samples = append(a.samples, record.RefSample{Ref: s.ref, T: ct, V: 0.0}) + a.sampleSeries = append(a.sampleSeries, s) + return storage.SeriesRef(s.ref), nil +} + +func (a *headAppender) getOrCreate(lset labels.Labels) (*memSeries, error) { + // Ensure no empty labels have gotten through. + lset = lset.WithoutEmpty() + if lset.IsEmpty() { + return nil, fmt.Errorf("empty labelset: %w", ErrInvalidSample) + } + if l, dup := lset.HasDuplicateLabelNames(); dup { + return nil, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample) + } + var created bool + var err error + s, created, err := a.head.getOrCreate(lset.Hash(), lset) + if err != nil { + return nil, err + } + if created { + a.series = append(a.series, record.RefSeries{ + Ref: s.ref, + Labels: lset, + }) + } + return s, nil +} + // appendable checks whether the given sample is valid for appending to the series. (if we return false and no error) // The sample belongs to the out of order chunk if we return true and no error. // An error signifies the sample cannot be handled. @@ -505,7 +564,7 @@ func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, err := a.head.exemplars.ValidateExemplar(s.lset, e) if err != nil { - if err == storage.ErrDuplicateExemplar || err == storage.ErrExemplarsDisabled { + if errors.Is(err, storage.ErrDuplicateExemplar) || errors.Is(err, storage.ErrExemplarsDisabled) { // Duplicate, don't return an error but don't accept the exemplar. return 0, nil } @@ -544,11 +603,11 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels // Ensure no empty labels have gotten through. lset = lset.WithoutEmpty() if lset.IsEmpty() { - return 0, errors.Wrap(ErrInvalidSample, "empty labelset") + return 0, fmt.Errorf("empty labelset: %w", ErrInvalidSample) } if l, dup := lset.HasDuplicateLabelNames(); dup { - return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l)) + return 0, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample) } var created bool @@ -576,7 +635,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels s.Lock() if err := s.appendableHistogram(t, h); err != nil { s.Unlock() - if err == storage.ErrOutOfOrderSample { + if errors.Is(err, storage.ErrOutOfOrderSample) { a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() } return 0, err @@ -593,7 +652,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels s.Lock() if err := s.appendableFloatHistogram(t, fh); err != nil { s.Unlock() - if err == storage.ErrOutOfOrderSample { + if errors.Is(err, storage.ErrOutOfOrderSample) { a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() } return 0, err @@ -677,7 +736,7 @@ func (a *headAppender) log() error { buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log series") + return fmt.Errorf("log series: %w", err) } } if len(a.metadata) > 0 { @@ -685,7 +744,7 @@ func (a *headAppender) log() error { buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log metadata") + return fmt.Errorf("log metadata: %w", err) } } if len(a.samples) > 0 { @@ -693,21 +752,21 @@ func (a *headAppender) log() error { buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log samples") + return fmt.Errorf("log samples: %w", err) } } if len(a.histograms) > 0 { rec = enc.HistogramSamples(a.histograms, buf) buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log histograms") + return fmt.Errorf("log histograms: %w", err) } } if len(a.floatHistograms) > 0 { rec = enc.FloatHistogramSamples(a.floatHistograms, buf) buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log float histograms") + return fmt.Errorf("log float histograms: %w", err) } } // Exemplars should be logged after samples (float/native histogram/etc), @@ -719,7 +778,7 @@ func (a *headAppender) log() error { buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log exemplars") + return fmt.Errorf("log exemplars: %w", err) } } return nil @@ -748,7 +807,7 @@ func (a *headAppender) Commit() (err error) { if err := a.log(); err != nil { _ = a.Rollback() // Most likely the same error will happen again. - return errors.Wrap(err, "write to WAL") + return fmt.Errorf("write to WAL: %w", err) } if a.head.writeNotified != nil { @@ -766,7 +825,7 @@ func (a *headAppender) Commit() (err error) { } // We don't instrument exemplar appends here, all is instrumented by storage. if err := a.head.exemplars.AddExemplar(s.lset, e.exemplar); err != nil { - if err == storage.ErrOutOfOrderExemplar { + if errors.Is(err, storage.ErrOutOfOrderExemplar) { continue } level.Debug(a.head.logger).Log("msg", "Unknown error while adding exemplar", "err", err) @@ -846,16 +905,16 @@ func (a *headAppender) Commit() (err error) { series.Lock() oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow) - switch err { - case nil: + switch { + case err == nil: // Do nothing. - case storage.ErrOutOfOrderSample: + case errors.Is(err, storage.ErrOutOfOrderSample): samplesAppended-- oooRejected++ - case storage.ErrOutOfBounds: + case errors.Is(err, storage.ErrOutOfBounds): samplesAppended-- oobRejected++ - case storage.ErrTooOldSample: + case errors.Is(err, storage.ErrTooOldSample): samplesAppended-- tooOldRejected++ default: @@ -986,7 +1045,7 @@ func (a *headAppender) Commit() (err error) { for i, m := range a.metadata { series = a.metadataSeries[i] series.Lock() - series.meta = &metadata.Metadata{Type: record.ToTextparseMetricType(m.Type), Unit: m.Unit, Help: m.Help} + series.meta = &metadata.Metadata{Type: record.ToMetricType(m.Type), Unit: m.Unit, Help: m.Help} series.Unlock() } @@ -1462,7 +1521,7 @@ func (s *memSeries) mmapChunks(chunkDiskMapper chunkDiskMapper) (count int) { } func handleChunkWriteError(err error) { - if err != nil && err != chunks.ErrChunkDiskMapperClosed { + if err != nil && !errors.Is(err, chunks.ErrChunkDiskMapperClosed) { panic(err) } } diff --git a/tsdb/head_bench_test.go b/tsdb/head_bench_test.go index 8fdf94db0e..a037948100 100644 --- a/tsdb/head_bench_test.go +++ b/tsdb/head_bench_test.go @@ -14,10 +14,10 @@ package tsdb import ( + "errors" "strconv" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/require" "go.uber.org/atomic" diff --git a/tsdb/head_read.go b/tsdb/head_read.go index d62e03adb4..cc4ac973c0 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -15,11 +15,12 @@ package tsdb import ( "context" + "errors" + "fmt" "math" "sync" "github.com/go-kit/log/level" - "github.com/pkg/errors" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" @@ -137,7 +138,7 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { } } if err := p.Err(); err != nil { - return index.ErrPostings(errors.Wrap(err, "expand postings")) + return index.ErrPostings(fmt.Errorf("expand postings: %w", err)) } slices.SortFunc(series, func(a, b *memSeries) int { @@ -417,7 +418,8 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, cdm chunkDiskMapper, memChunkPo if ix < len(s.mmappedChunks) { chk, err := cdm.Chunk(s.mmappedChunks[ix].ref) if err != nil { - if _, ok := err.(*chunks.CorruptionErr); ok { + var cerr *chunks.CorruptionErr + if errors.As(err, &cerr) { panic(err) } return nil, false, false, err @@ -545,14 +547,15 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm chunkDiskMapper, mint, xor, err = s.ooo.oooHeadChunk.chunk.ToXORBetweenTimestamps(meta.OOOLastMinTime, meta.OOOLastMaxTime) } if err != nil { - return nil, errors.Wrap(err, "failed to convert ooo head chunk to xor chunk") + return nil, fmt.Errorf("failed to convert ooo head chunk to xor chunk: %w", err) } iterable = xor } else { chk, err := cdm.Chunk(c.ref) if err != nil { - if _, ok := err.(*chunks.CorruptionErr); ok { - return nil, errors.Wrap(err, "invalid ooo mmapped chunk") + var cerr *chunks.CorruptionErr + if errors.As(err, &cerr) { + return nil, fmt.Errorf("invalid ooo mmapped chunk: %w", err) } return nil, err } diff --git a/tsdb/head_read_test.go b/tsdb/head_read_test.go index 4384096d85..5d0b1acc75 100644 --- a/tsdb/head_read_test.go +++ b/tsdb/head_read_test.go @@ -152,7 +152,7 @@ func TestBoundedChunk(t *testing.T) { // it.Next() should keep returning no value. for i := 0; i < 10; i++ { - require.True(t, it.Next() == chunkenc.ValNone) + require.Equal(t, chunkenc.ValNone, it.Next()) } require.Equal(t, tc.expSamples, samples) @@ -216,8 +216,8 @@ func TestMemSeries_chunk(t *testing.T) { name: "firstChunkID > ix", setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { appendSamples(t, s, 0, chunkRange, cdm) - require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") s.firstChunkID = 5 @@ -229,8 +229,8 @@ func TestMemSeries_chunk(t *testing.T) { name: "call ix=0 on memSeries with no mmapped chunks", setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { appendSamples(t, s, 0, chunkRange, cdm) - require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -241,8 +241,8 @@ func TestMemSeries_chunk(t *testing.T) { name: "call ix=1 on memSeries with no mmapped chunks", setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { appendSamples(t, s, 0, chunkRange, cdm) - require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -253,8 +253,8 @@ func TestMemSeries_chunk(t *testing.T) { name: "call ix=10 on memSeries with no mmapped chunks", setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { appendSamples(t, s, 0, chunkRange, cdm) - require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -267,7 +267,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -280,7 +280,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -293,7 +293,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -306,7 +306,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") s.headChunks = nil @@ -320,7 +320,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") s.headChunks = nil @@ -334,7 +334,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") s.headChunks = nil @@ -348,7 +348,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") cdm.Close() @@ -362,7 +362,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") cdm.Close() @@ -374,8 +374,8 @@ func TestMemSeries_chunk(t *testing.T) { name: "call ix=0 on memSeries with 3 head chunks and no mmapped chunks", setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { appendSamples(t, s, 0, chunkRange*3, cdm) - require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -386,8 +386,8 @@ func TestMemSeries_chunk(t *testing.T) { name: "call ix=1 on memSeries with 3 head chunks and no mmapped chunks", setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { appendSamples(t, s, 0, chunkRange*3, cdm) - require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -398,8 +398,8 @@ func TestMemSeries_chunk(t *testing.T) { name: "call ix=10 on memSeries with 3 head chunks and no mmapped chunks", setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { appendSamples(t, s, 0, chunkRange*3, cdm) - require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -412,10 +412,10 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") @@ -429,10 +429,10 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") @@ -446,10 +446,10 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") @@ -463,10 +463,10 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") @@ -480,10 +480,10 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") @@ -498,10 +498,10 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 2e0a4ca88d..69dae42d0a 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -30,9 +30,9 @@ import ( "testing" "time" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/atomic" "golang.org/x/sync/errgroup" @@ -698,7 +698,7 @@ func TestHead_ReadWAL(t *testing.T) { require.NoError(t, err) e, err := q.Select(0, 1000, []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "1")}) require.NoError(t, err) - require.Equal(t, e[0].Exemplars[0], exemplar.Exemplar{Ts: 100, Value: 1, Labels: labels.FromStrings("traceID", "asdf")}) + require.Equal(t, exemplar.Exemplar{Ts: 100, Value: 1, Labels: labels.FromStrings("traceID", "asdf")}, e[0].Exemplars[0]) }) } } @@ -1088,12 +1088,12 @@ func TestMemSeries_truncateChunks_scenarios(t *testing.T) { } else { require.Nil(t, series.headChunks, "head chunk is present") } - require.Equal(t, tc.mmappedChunks, len(series.mmappedChunks), "wrong number of mmapped chunks") + require.Len(t, series.mmappedChunks, tc.mmappedChunks, "wrong number of mmapped chunks") truncated := series.truncateChunksBefore(tc.truncateBefore, 0) require.Equal(t, tc.expectedTruncated, truncated, "wrong number of truncated chunks returned") - require.Equal(t, tc.expectedMmap, len(series.mmappedChunks), "wrong number of mmappedChunks after truncation") + require.Len(t, series.mmappedChunks, tc.expectedMmap, "wrong number of mmappedChunks after truncation") if tc.expectedHead > 0 { require.NotNil(t, series.headChunks, "headChunks should is nil after truncation") @@ -1258,7 +1258,7 @@ func TestHeadDeleteSimple(t *testing.T) { if !eok { require.NoError(t, h.Close()) require.NoError(t, actSeriesSet.Err()) - require.Equal(t, 0, len(actSeriesSet.Warnings())) + require.Empty(t, actSeriesSet.Warnings()) continue Outer } expSeries := expSeriesSet.At() @@ -1306,7 +1306,7 @@ func TestDeleteUntilCurMax(t *testing.T) { for res.Next() { } require.NoError(t, res.Err()) - require.Equal(t, 0, len(res.Warnings())) + require.Empty(t, res.Warnings()) // Add again and test for presence. app = hb.Appender(context.Background()) @@ -1325,7 +1325,7 @@ func TestDeleteUntilCurMax(t *testing.T) { for res.Next() { } require.NoError(t, res.Err()) - require.Equal(t, 0, len(res.Warnings())) + require.Empty(t, res.Warnings()) } func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) { @@ -1526,7 +1526,7 @@ func TestDelete_e2e(t *testing.T) { require.Equal(t, smplExp, smplRes) } require.NoError(t, ss.Err()) - require.Equal(t, 0, len(ss.Warnings())) + require.Empty(t, ss.Warnings()) } } } @@ -1657,7 +1657,7 @@ func TestMemSeries_append(t *testing.T) { require.False(t, chunkCreated, "second sample should use same chunk") s.mmapChunks(chunkDiskMapper) - require.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk") + require.Len(t, s.mmappedChunks, 1, "there should be only 1 mmapped chunk") require.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range") require.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range") require.Equal(t, int64(1000), s.headChunks.minTime, "wrong chunk range") @@ -1725,7 +1725,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { require.False(t, chunkCreated, "second sample should use same chunk") s.mmapChunks(chunkDiskMapper) - require.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk") + require.Len(t, s.mmappedChunks, 1, "there should be only 1 mmapped chunk") require.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range") require.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range") require.Equal(t, int64(1000), s.headChunks.minTime, "wrong chunk range") @@ -1736,7 +1736,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { require.False(t, chunkCreated, "third sample should trigger a re-encoded chunk") s.mmapChunks(chunkDiskMapper) - require.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk") + require.Len(t, s.mmappedChunks, 1, "there should be only 1 mmapped chunk") require.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range") require.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range") require.Equal(t, int64(1000), s.headChunks.minTime, "wrong chunk range") @@ -1836,7 +1836,7 @@ func TestGCChunkAccess(t *testing.T) { require.NoError(t, idx.Series(1, &builder, &chunks)) require.Equal(t, labels.FromStrings("a", "1"), builder.Labels()) - require.Equal(t, 2, len(chunks)) + require.Len(t, chunks, 2) cr, err := h.chunksRange(0, 1500, nil) require.NoError(t, err) @@ -1895,7 +1895,7 @@ func TestGCSeriesAccess(t *testing.T) { require.NoError(t, idx.Series(1, &builder, &chunks)) require.Equal(t, labels.FromStrings("a", "1"), builder.Labels()) - require.Equal(t, 2, len(chunks)) + require.Len(t, chunks, 2) cr, err := h.chunksRange(0, 2000, nil) require.NoError(t, err) @@ -1937,11 +1937,11 @@ func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) { defer q.Close() ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1")) - require.Equal(t, true, ss.Next()) + require.True(t, ss.Next()) for ss.Next() { } require.NoError(t, ss.Err()) - require.Equal(t, 0, len(ss.Warnings())) + require.Empty(t, ss.Warnings()) } func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) { @@ -1966,8 +1966,8 @@ func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) { require.NoError(t, err) ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1")) - require.Equal(t, false, ss.Next()) - require.Equal(t, 0, len(ss.Warnings())) + require.False(t, ss.Next()) + require.Empty(t, ss.Warnings()) require.NoError(t, q.Close()) // Truncate again, this time the series should be deleted @@ -1990,7 +1990,7 @@ func TestHead_LogRollback(t *testing.T) { require.NoError(t, app.Rollback()) recs := readTestWAL(t, w.Dir()) - require.Equal(t, 1, len(recs)) + require.Len(t, recs, 1) series, ok := recs[0].([]record.RefSeries) require.True(t, ok, "expected series record but got %+v", recs[0]) @@ -2060,9 +2060,8 @@ func TestWalRepair_DecodingError(t *testing.T) { require.Equal(t, 0.0, prom_testutil.ToFloat64(h.metrics.walCorruptionsTotal)) initErr := h.Init(math.MinInt64) - err = errors.Cause(initErr) // So that we can pick up errors even if wrapped. - _, corrErr := err.(*wlog.CorruptionErr) - require.True(t, corrErr, "reading the wal didn't return corruption error") + var cerr *wlog.CorruptionErr + require.ErrorAs(t, initErr, &cerr, "reading the wal didn't return corruption error") require.NoError(t, h.Close()) // Head will close the wal as well. } @@ -2133,12 +2132,11 @@ func TestWblRepair_DecodingError(t *testing.T) { require.Equal(t, 0.0, prom_testutil.ToFloat64(h.metrics.walCorruptionsTotal)) initErr := h.Init(math.MinInt64) - _, ok := initErr.(*errLoadWbl) - require.True(t, ok) // Wbl errors are wrapped into errLoadWbl, make sure we can unwrap it. + var elb *errLoadWbl + require.ErrorAs(t, initErr, &elb) // Wbl errors are wrapped into errLoadWbl, make sure we can unwrap it. - err = errors.Cause(initErr) // So that we can pick up errors even if wrapped. - _, corrErr := err.(*wlog.CorruptionErr) - require.True(t, corrErr, "reading the wal didn't return corruption error") + var cerr *wlog.CorruptionErr + require.ErrorAs(t, initErr, &cerr, "reading the wal didn't return corruption error") require.NoError(t, h.Close()) // Head will close the wal as well. } @@ -2214,7 +2212,7 @@ func TestHeadReadWriterRepair(t *testing.T) { // take effect without another chunk being written. files, err := os.ReadDir(mmappedChunksDir(dir)) require.NoError(t, err) - require.Equal(t, 6, len(files)) + require.Len(t, files, 6) // Corrupt the 4th file by writing a random byte to series ref. f, err := os.OpenFile(filepath.Join(mmappedChunksDir(dir), files[3].Name()), os.O_WRONLY, 0o666) @@ -2240,7 +2238,7 @@ func TestHeadReadWriterRepair(t *testing.T) { { files, err := os.ReadDir(mmappedChunksDir(dir)) require.NoError(t, err) - require.Equal(t, 3, len(files)) + require.Len(t, files, 3) } } @@ -2326,7 +2324,7 @@ func TestMemSeriesIsolation(t *testing.T) { ss := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) _, seriesSet, ws, err := expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) for _, series := range seriesSet { return int(series[len(series)-1].f) @@ -2730,8 +2728,8 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) { require.NoError(t, err) } require.NoError(t, app.Commit()) - require.Equal(t, head.MinTime(), firstSeriesTimestamp) - require.Equal(t, head.MaxTime(), lastSeriesTimestamp) + require.Equal(t, firstSeriesTimestamp, head.MinTime()) + require.Equal(t, lastSeriesTimestamp, head.MaxTime()) testCases := []struct { name string @@ -3075,7 +3073,7 @@ func BenchmarkHeadLabelValuesWithMatchers(b *testing.B) { for benchIdx := 0; benchIdx < b.N; benchIdx++ { actualValues, err := headIdxReader.LabelValues(ctx, "b_tens", matchers...) require.NoError(b, err) - require.Equal(b, 9, len(actualValues)) + require.Len(b, actualValues, 9) } } @@ -3184,7 +3182,7 @@ func TestChunkNotFoundHeadGCRace(t *testing.T) { defer wg.Done() // Compacting head while the querier spans the compaction time. require.NoError(t, db.Compact(ctx)) - require.Greater(t, len(db.Blocks()), 0) + require.NotEmpty(t, db.Blocks()) }() // Give enough time for compaction to finish. @@ -3247,7 +3245,7 @@ func TestDataMissingOnQueryDuringCompaction(t *testing.T) { defer wg.Done() // Compacting head while the querier spans the compaction time. require.NoError(t, db.Compact(ctx)) - require.Greater(t, len(db.Blocks()), 0) + require.NotEmpty(t, db.Blocks()) }() // Give enough time for compaction to finish. @@ -3976,7 +3974,7 @@ func TestSnapshotError(t *testing.T) { require.NotNil(t, head.series.getByHash(lbls.Hash(), lbls)) tm, err := head.tombstones.Get(1) require.NoError(t, err) - require.NotEqual(t, 0, len(tm)) + require.NotEmpty(t, tm) head.opts.EnableMemorySnapshotOnShutdown = true require.NoError(t, head.Close()) // This will create a snapshot. @@ -4008,7 +4006,7 @@ func TestSnapshotError(t *testing.T) { require.Nil(t, head.series.getByHash(lbls.Hash(), lbls)) tm, err = head.tombstones.Get(1) require.NoError(t, err) - require.Equal(t, 0, len(tm)) + require.Empty(t, tm) } func TestHistogramMetrics(t *testing.T) { @@ -4151,8 +4149,8 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { for _, h := range tsdbutil.GenerateTestHistograms(numHistograms) { var err error if floatHistogram { - _, err = app.AppendHistogram(0, l, 100*int64(len(expHistograms)), nil, h.ToFloat()) - expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), fh: h.ToFloat()}) + _, err = app.AppendHistogram(0, l, 100*int64(len(expHistograms)), nil, h.ToFloat(nil)) + expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), fh: h.ToFloat(nil)}) } else { _, err = app.AppendHistogram(0, l, 100*int64(len(expHistograms)), h, nil) expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), h: h}) @@ -4173,8 +4171,8 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { s := head.series.getByHash(l.Hash(), l) require.NotNil(t, s) require.NotNil(t, s.headChunks) - require.Equal(t, s.headChunks.len(), 1) - require.Equal(t, 0, len(s.mmappedChunks)) + require.Equal(t, 1, s.headChunks.len()) + require.Empty(t, s.mmappedChunks) testQuery(1) // Adding stale in different appender and continuing series after a stale sample. @@ -4182,8 +4180,8 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { for _, h := range tsdbutil.GenerateTestHistograms(2 * numHistograms)[numHistograms:] { var err error if floatHistogram { - _, err = app.AppendHistogram(0, l, 100*int64(len(expHistograms)), nil, h.ToFloat()) - expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), fh: h.ToFloat()}) + _, err = app.AppendHistogram(0, l, 100*int64(len(expHistograms)), nil, h.ToFloat(nil)) + expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), fh: h.ToFloat(nil)}) } else { _, err = app.AppendHistogram(0, l, 100*int64(len(expHistograms)), h, nil) expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), h: h}) @@ -4208,8 +4206,8 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { s = head.series.getByHash(l.Hash(), l) require.NotNil(t, s) require.NotNil(t, s.headChunks) - require.Equal(t, s.headChunks.len(), 1) - require.Equal(t, 1, len(s.mmappedChunks)) + require.Equal(t, 1, s.headChunks.len()) + require.Len(t, s.mmappedChunks, 1) testQuery(2) } @@ -4229,7 +4227,7 @@ func TestHistogramCounterResetHeader(t *testing.T) { app := head.Appender(context.Background()) var err error if floatHisto { - _, err = app.AppendHistogram(0, l, ts, nil, h.ToFloat()) + _, err = app.AppendHistogram(0, l, ts, nil, h.ToFloat(nil)) } else { _, err = app.AppendHistogram(0, l, ts, h.Copy(), nil) } @@ -4549,7 +4547,7 @@ func TestChunkSnapshotReplayBug(t *testing.T) { labels.MustNewMatcher(labels.MatchEqual, "__name__", "request_duration"), labels.MustNewMatcher(labels.MatchNotEqual, "status_code", "200"), ) - require.Len(t, series, 0, "there should be no series found") + require.Empty(t, series, "there should be no series found") } func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) { @@ -4583,7 +4581,7 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) { // Verify the snapshot. name, idx, offset, err := LastChunkSnapshot(dir) require.NoError(t, err) - require.True(t, name != "") + require.NotEqual(t, "", name) require.Equal(t, 0, idx) require.Greater(t, offset, 0) } @@ -4942,7 +4940,7 @@ func TestReplayAfterMmapReplayError(t *testing.T) { } files, err := os.ReadDir(filepath.Join(dir, "chunks_head")) - require.Equal(t, 5, len(files)) + require.Len(t, files, 5) // Corrupt a m-map file. mmapFilePath := filepath.Join(dir, "chunks_head", "000002") @@ -4957,7 +4955,7 @@ func TestReplayAfterMmapReplayError(t *testing.T) { // There should be less m-map files due to corruption. files, err = os.ReadDir(filepath.Join(dir, "chunks_head")) - require.Equal(t, 2, len(files)) + require.Len(t, files, 2) // Querying should not panic. q, err := NewBlockQuerier(h, 0, lastTs) @@ -5776,3 +5774,116 @@ func TestSecondaryHashFunction(t *testing.T) { }) } } + +func TestPostingsCardinalityStats(t *testing.T) { + head := &Head{postings: index.NewMemPostings()} + head.postings.Add(1, labels.FromStrings(labels.MetricName, "t", "n", "v1")) + head.postings.Add(2, labels.FromStrings(labels.MetricName, "t", "n", "v2")) + + statsForMetricName := head.PostingsCardinalityStats(labels.MetricName, 10) + head.postings.Add(3, labels.FromStrings(labels.MetricName, "t", "n", "v3")) + // Using cache. + require.Equal(t, statsForMetricName, head.PostingsCardinalityStats(labels.MetricName, 10)) + + statsForSomeLabel := head.PostingsCardinalityStats("n", 10) + // Cache should be evicted because of the change of label name. + require.NotEqual(t, statsForMetricName, statsForSomeLabel) + head.postings.Add(4, labels.FromStrings(labels.MetricName, "t", "n", "v4")) + // Using cache. + require.Equal(t, statsForSomeLabel, head.PostingsCardinalityStats("n", 10)) + // Cache should be evicted because of the change of limit parameter. + statsForSomeLabel1 := head.PostingsCardinalityStats("n", 1) + require.NotEqual(t, statsForSomeLabel1, statsForSomeLabel) + // Using cache. + require.Equal(t, statsForSomeLabel1, head.PostingsCardinalityStats("n", 1)) +} + +func TestHeadAppender_AppendCTZeroSample(t *testing.T) { + type appendableSamples struct { + ts int64 + val float64 + ct int64 + } + for _, tc := range []struct { + name string + appendableSamples []appendableSamples + expectedSamples []model.Sample + }{ + { + name: "In order ct+normal sample", + appendableSamples: []appendableSamples{ + {ts: 100, val: 10, ct: 1}, + }, + expectedSamples: []model.Sample{ + {Timestamp: 1, Value: 0}, + {Timestamp: 100, Value: 10}, + }, + }, + { + name: "Consecutive appends with same ct ignore ct", + appendableSamples: []appendableSamples{ + {ts: 100, val: 10, ct: 1}, + {ts: 101, val: 10, ct: 1}, + }, + expectedSamples: []model.Sample{ + {Timestamp: 1, Value: 0}, + {Timestamp: 100, Value: 10}, + {Timestamp: 101, Value: 10}, + }, + }, + { + name: "Consecutive appends with newer ct do not ignore ct", + appendableSamples: []appendableSamples{ + {ts: 100, val: 10, ct: 1}, + {ts: 102, val: 10, ct: 101}, + }, + expectedSamples: []model.Sample{ + {Timestamp: 1, Value: 0}, + {Timestamp: 100, Value: 10}, + {Timestamp: 101, Value: 0}, + {Timestamp: 102, Value: 10}, + }, + }, + { + name: "CT equals to previous sample timestamp is ignored", + appendableSamples: []appendableSamples{ + {ts: 100, val: 10, ct: 1}, + {ts: 101, val: 10, ct: 100}, + }, + expectedSamples: []model.Sample{ + {Timestamp: 1, Value: 0}, + {Timestamp: 100, Value: 10}, + {Timestamp: 101, Value: 10}, + }, + }, + } { + h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) + defer func() { + require.NoError(t, h.Close()) + }() + a := h.Appender(context.Background()) + lbls := labels.FromStrings("foo", "bar") + for _, sample := range tc.appendableSamples { + _, err := a.AppendCTZeroSample(0, lbls, sample.ts, sample.ct) + require.NoError(t, err) + _, err = a.Append(0, lbls, sample.ts, sample.val) + require.NoError(t, err) + } + require.NoError(t, a.Commit()) + + q, err := NewBlockQuerier(h, math.MinInt64, math.MaxInt64) + require.NoError(t, err) + ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) + require.True(t, ss.Next()) + s := ss.At() + require.False(t, ss.Next()) + it := s.Iterator(nil) + for _, sample := range tc.expectedSamples { + require.Equal(t, chunkenc.ValFloat, it.Next()) + timestamp, value := it.At() + require.Equal(t, sample.Timestamp, model.Time(timestamp)) + require.Equal(t, sample.Value, model.SampleValue(value)) + } + require.Equal(t, chunkenc.ValNone, it.Next()) + } +} diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 259298adb7..0120903a97 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -14,6 +14,7 @@ package tsdb import ( + "errors" "fmt" "math" "os" @@ -24,7 +25,6 @@ import ( "time" "github.com/go-kit/log/level" - "github.com/pkg/errors" "go.uber.org/atomic" "github.com/prometheus/prometheus/model/exemplar" @@ -128,7 +128,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. // At the moment the only possible error here is out of order exemplars, which we shouldn't see when // replaying the WAL, so lets just log the error if it's not that type. err = h.exemplars.AddExemplar(ms.lset, exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels}) - if err != nil && err == storage.ErrOutOfOrderExemplar { + if err != nil && errors.Is(err, storage.ErrOutOfOrderExemplar) { level.Warn(h.logger).Log("msg", "Unexpected error when replaying WAL on exemplar record", "err", err) } } @@ -145,7 +145,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. series, err = dec.Series(rec, series) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode series"), + Err: fmt.Errorf("decode series: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -157,7 +157,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. samples, err = dec.Samples(rec, samples) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode samples"), + Err: fmt.Errorf("decode samples: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -169,7 +169,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. tstones, err = dec.Tombstones(rec, tstones) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode tombstones"), + Err: fmt.Errorf("decode tombstones: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -181,7 +181,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. exemplars, err = dec.Exemplars(rec, exemplars) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode exemplars"), + Err: fmt.Errorf("decode exemplars: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -193,7 +193,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. hists, err = dec.HistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode histograms"), + Err: fmt.Errorf("decode histograms: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -205,7 +205,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode float histograms"), + Err: fmt.Errorf("decode float histograms: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -217,7 +217,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. meta, err := dec.Metadata(rec, meta) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode metadata"), + Err: fmt.Errorf("decode metadata: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -388,7 +388,7 @@ Outer: continue } s.meta = &metadata.Metadata{ - Type: record.ToTextparseMetricType(m.Type), + Type: record.ToMetricType(m.Type), Unit: m.Unit, Help: m.Help, } @@ -416,8 +416,8 @@ Outer: close(exemplarsInput) wg.Wait() - if r.Err() != nil { - return errors.Wrap(r.Err(), "read records") + if err := r.Err(); err != nil { + return fmt.Errorf("read records: %w", err) } if unknownRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load() > 0 { @@ -710,7 +710,7 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. samples, err = dec.Samples(rec, samples) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode samples"), + Err: fmt.Errorf("decode samples: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -722,7 +722,7 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. markers, err = dec.MmapMarkers(rec, markers) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode mmap markers"), + Err: fmt.Errorf("decode mmap markers: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -808,8 +808,8 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. } wg.Wait() - if r.Err() != nil { - return errors.Wrap(r.Err(), "read records") + if err := r.Err(); err != nil { + return fmt.Errorf("read records: %w", err) } if unknownRefs.Load() > 0 || mmapMarkerUnknownRefs.Load() > 0 { @@ -997,7 +997,7 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh chk, err := chunkenc.FromData(enc, chunkBytesCopy) if err != nil { - return csr, errors.Wrap(err, "chunk from data") + return csr, fmt.Errorf("chunk from data: %w", err) } csr.mc.chunk = chk @@ -1032,7 +1032,7 @@ func encodeTombstonesToSnapshotRecord(tr tombstones.Reader) ([]byte, error) { buf.PutByte(chunkSnapshotRecordTypeTombstones) b, err := tombstones.Encode(tr) if err != nil { - return nil, errors.Wrap(err, "encode tombstones") + return nil, fmt.Errorf("encode tombstones: %w", err) } buf.PutUvarintBytes(b) @@ -1047,7 +1047,10 @@ func decodeTombstonesSnapshotRecord(b []byte) (tombstones.Reader, error) { } tr, err := tombstones.Decode(dec.UvarintBytes()) - return tr, errors.Wrap(err, "decode tombstones") + if err != nil { + return tr, fmt.Errorf("decode tombstones: %w", err) + } + return tr, nil } const chunkSnapshotPrefix = "chunk_snapshot." @@ -1074,13 +1077,13 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { stats := &ChunkSnapshotStats{} wlast, woffset, err := h.wal.LastSegmentAndOffset() - if err != nil && err != record.ErrNotFound { - return stats, errors.Wrap(err, "get last wal segment and offset") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return stats, fmt.Errorf("get last wal segment and offset: %w", err) } _, cslast, csoffset, err := LastChunkSnapshot(h.opts.ChunkDirRoot) - if err != nil && err != record.ErrNotFound { - return stats, errors.Wrap(err, "find last chunk snapshot") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return stats, fmt.Errorf("find last chunk snapshot: %w", err) } if wlast == cslast && woffset == csoffset { @@ -1095,11 +1098,11 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { stats.Dir = cpdir if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { - return stats, errors.Wrap(err, "create chunk snapshot dir") + return stats, fmt.Errorf("create chunk snapshot dir: %w", err) } cp, err := wlog.New(nil, nil, cpdirtmp, h.wal.CompressionType()) if err != nil { - return stats, errors.Wrap(err, "open chunk snapshot") + return stats, fmt.Errorf("open chunk snapshot: %w", err) } // Ensures that an early return caused by an error doesn't leave any tmp files. @@ -1128,7 +1131,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { if len(buf) > 10*1024*1024 { if err := cp.Log(recs...); err != nil { h.series.locks[i].RUnlock() - return stats, errors.Wrap(err, "flush records") + return stats, fmt.Errorf("flush records: %w", err) } buf, recs = buf[:0], recs[:0] } @@ -1141,16 +1144,16 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { // Add tombstones to the snapshot. tombstonesReader, err := h.Tombstones() if err != nil { - return stats, errors.Wrap(err, "get tombstones") + return stats, fmt.Errorf("get tombstones: %w", err) } rec, err := encodeTombstonesToSnapshotRecord(tombstonesReader) if err != nil { - return stats, errors.Wrap(err, "encode tombstones") + return stats, fmt.Errorf("encode tombstones: %w", err) } recs = append(recs, rec) // Flush remaining series records and tombstones. if err := cp.Log(recs...); err != nil { - return stats, errors.Wrap(err, "flush records") + return stats, fmt.Errorf("flush records: %w", err) } buf = buf[:0] @@ -1169,7 +1172,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { encbuf.PutByte(chunkSnapshotRecordTypeExemplars) enc.EncodeExemplarsIntoBuffer(batch, &encbuf) if err := cp.Log(encbuf.Get()); err != nil { - return errors.Wrap(err, "log exemplars") + return fmt.Errorf("log exemplars: %w", err) } buf, batch = buf[:0], batch[:0] return nil @@ -1177,7 +1180,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { err = h.exemplars.IterateExemplars(func(seriesLabels labels.Labels, e exemplar.Exemplar) error { if len(batch) >= maxExemplarsPerRecord { if err := flushExemplars(); err != nil { - return errors.Wrap(err, "flush exemplars") + return fmt.Errorf("flush exemplars: %w", err) } } @@ -1195,19 +1198,19 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { return nil }) if err != nil { - return stats, errors.Wrap(err, "iterate exemplars") + return stats, fmt.Errorf("iterate exemplars: %w", err) } // Flush remaining exemplars. if err := flushExemplars(); err != nil { - return stats, errors.Wrap(err, "flush exemplars at the end") + return stats, fmt.Errorf("flush exemplars at the end: %w", err) } if err := cp.Close(); err != nil { - return stats, errors.Wrap(err, "close chunk snapshot") + return stats, fmt.Errorf("close chunk snapshot: %w", err) } if err := fileutil.Replace(cpdirtmp, cpdir); err != nil { - return stats, errors.Wrap(err, "rename chunk snapshot directory") + return stats, fmt.Errorf("rename chunk snapshot directory: %w", err) } if err := DeleteChunkSnapshots(h.opts.ChunkDirRoot, wlast, woffset); err != nil { @@ -1231,7 +1234,10 @@ func (h *Head) performChunkSnapshot() error { if err == nil { level.Info(h.logger).Log("msg", "chunk snapshot complete", "duration", elapsed.String(), "num_series", stats.TotalSeries, "dir", stats.Dir) } - return errors.Wrap(err, "chunk snapshot") + if err != nil { + return fmt.Errorf("chunk snapshot: %w", err) + } + return nil } // ChunkSnapshotStats returns stats about a created chunk snapshot. @@ -1329,16 +1335,16 @@ func DeleteChunkSnapshots(dir string, maxIndex, maxOffset int) error { func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSeries, error) { dir, snapIdx, snapOffset, err := LastChunkSnapshot(h.opts.ChunkDirRoot) if err != nil { - if err == record.ErrNotFound { + if errors.Is(err, record.ErrNotFound) { return snapIdx, snapOffset, nil, nil } - return snapIdx, snapOffset, nil, errors.Wrap(err, "find last chunk snapshot") + return snapIdx, snapOffset, nil, fmt.Errorf("find last chunk snapshot: %w", err) } start := time.Now() sr, err := wlog.NewSegmentsReader(dir) if err != nil { - return snapIdx, snapOffset, nil, errors.Wrap(err, "open chunk snapshot") + return snapIdx, snapOffset, nil, fmt.Errorf("open chunk snapshot: %w", err) } defer func() { if err := sr.Close(); err != nil { @@ -1426,7 +1432,7 @@ Outer: numSeries++ csr, err := decodeSeriesFromChunkSnapshot(&dec, rec) if err != nil { - loopErr = errors.Wrap(err, "decode series record") + loopErr = fmt.Errorf("decode series record: %w", err) break Outer } recordChan <- csr @@ -1434,7 +1440,7 @@ Outer: case chunkSnapshotRecordTypeTombstones: tr, err := decodeTombstonesSnapshotRecord(rec) if err != nil { - loopErr = errors.Wrap(err, "decode tombstones") + loopErr = fmt.Errorf("decode tombstones: %w", err) break Outer } @@ -1442,7 +1448,7 @@ Outer: h.tombstones.AddInterval(ref, ivs...) return nil }); err != nil { - loopErr = errors.Wrap(err, "iterate tombstones") + loopErr = fmt.Errorf("iterate tombstones: %w", err) break Outer } @@ -1470,7 +1476,7 @@ Outer: exemplarBuf = exemplarBuf[:0] exemplarBuf, err = dec.ExemplarsFromBuffer(&decbuf, exemplarBuf) if err != nil { - loopErr = errors.Wrap(err, "exemplars from buffer") + loopErr = fmt.Errorf("exemplars from buffer: %w", err) break Outer } @@ -1486,7 +1492,7 @@ Outer: Value: e.V, Ts: e.T, }); err != nil { - loopErr = errors.Wrap(err, "add exemplar") + loopErr = fmt.Errorf("add exemplar: %w", err) break Outer } } @@ -1504,16 +1510,19 @@ Outer: } close(errChan) - merr := tsdb_errors.NewMulti(errors.Wrap(loopErr, "decode loop")) + merr := tsdb_errors.NewMulti() + if loopErr != nil { + merr.Add(fmt.Errorf("decode loop: %w", loopErr)) + } for err := range errChan { - merr.Add(errors.Wrap(err, "record processing")) + merr.Add(fmt.Errorf("record processing: %w", err)) } if err := merr.Err(); err != nil { return -1, -1, nil, err } - if r.Err() != nil { - return -1, -1, nil, errors.Wrap(r.Err(), "read records") + if err := r.Err(); err != nil { + return -1, -1, nil, fmt.Errorf("read records: %w", err) } if len(refSeries) == 0 { diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 84f8bb9660..09812ddf48 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -426,7 +426,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... // We add padding to 16 bytes to increase the addressable space we get through 4 byte // series references. if err := w.addPadding(16); err != nil { - return fmt.Errorf("failed to write padding bytes: %v", err) + return fmt.Errorf("failed to write padding bytes: %w", err) } if w.f.pos%16 != 0 { @@ -443,7 +443,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... if !ok { nameIndex, err = w.symbols.ReverseLookup(l.Name) if err != nil { - return fmt.Errorf("symbol entry for %q does not exist, %v", l.Name, err) + return fmt.Errorf("symbol entry for %q does not exist, %w", l.Name, err) } } w.labelNames[l.Name]++ @@ -453,7 +453,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... if !ok || cacheEntry.lastValue != l.Value { valueIndex, err = w.symbols.ReverseLookup(l.Value) if err != nil { - return fmt.Errorf("symbol entry for %q does not exist, %v", l.Value, err) + return fmt.Errorf("symbol entry for %q does not exist, %w", l.Value, err) } w.symbolCache[l.Name] = symbolCacheEntry{ index: nameIndex, diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index 6553769249..9010a7b3a5 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -205,7 +205,7 @@ func TestIndexRW_Postings(t *testing.T) { err := ir.Series(p.At(), &builder, &c) require.NoError(t, err) - require.Equal(t, 0, len(c)) + require.Empty(t, c) require.Equal(t, series[i], builder.Labels()) } require.NoError(t, p.Err()) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 0e55a6db76..e27ebf6e06 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -18,11 +18,13 @@ import ( "context" "encoding/binary" "fmt" + "math" "runtime" "sort" "strings" "sync" + "github.com/bboreham/go-loser" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" @@ -525,7 +527,7 @@ func (it *intersectPostings) Err() error { } // Merge returns a new iterator over the union of the input iterators. -func Merge(ctx context.Context, its ...Postings) Postings { +func Merge(_ context.Context, its ...Postings) Postings { if len(its) == 0 { return EmptyPostings() } @@ -533,122 +535,48 @@ func Merge(ctx context.Context, its ...Postings) Postings { return its[0] } - p, ok := newMergedPostings(ctx, its) + p, ok := newMergedPostings(its) if !ok { return EmptyPostings() } return p } -type postingsHeap []Postings - -func (h postingsHeap) Len() int { return len(h) } -func (h postingsHeap) Less(i, j int) bool { return h[i].At() < h[j].At() } -func (h *postingsHeap) Swap(i, j int) { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] } - -func (h *postingsHeap) Push(x interface{}) { - *h = append(*h, x.(Postings)) -} - -func (h *postingsHeap) Pop() interface{} { - old := *h - n := len(old) - x := old[n-1] - *h = old[0 : n-1] - return x -} - type mergedPostings struct { - h postingsHeap - initialized bool - cur storage.SeriesRef - err error + p []Postings + h *loser.Tree[storage.SeriesRef, Postings] + cur storage.SeriesRef } -func newMergedPostings(ctx context.Context, p []Postings) (m *mergedPostings, nonEmpty bool) { - ph := make(postingsHeap, 0, len(p)) - - for _, it := range p { - // NOTE: mergedPostings struct requires the user to issue an initial Next. - switch { - case ctx.Err() != nil: - return &mergedPostings{err: ctx.Err()}, true - case it.Next(): - ph = append(ph, it) - case it.Err() != nil: - return &mergedPostings{err: it.Err()}, true - } - } - - if len(ph) == 0 { - return nil, false - } - return &mergedPostings{h: ph}, true +func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) { + const maxVal = storage.SeriesRef(math.MaxUint64) // This value must be higher than all real values used in the tree. + lt := loser.New(p, maxVal) + return &mergedPostings{p: p, h: lt}, true } func (it *mergedPostings) Next() bool { - if it.h.Len() == 0 || it.err != nil { - return false - } - - // The user must issue an initial Next. - if !it.initialized { - heap.Init(&it.h) - it.cur = it.h[0].At() - it.initialized = true - return true - } - for { - cur := it.h[0] - if !cur.Next() { - heap.Pop(&it.h) - if cur.Err() != nil { - it.err = cur.Err() - return false - } - if it.h.Len() == 0 { - return false - } - } else { - // Value of top of heap has changed, re-heapify. - heap.Fix(&it.h, 0) + if !it.h.Next() { + return false } - - if it.h[0].At() != it.cur { - it.cur = it.h[0].At() + // Remove duplicate entries. + newItem := it.h.At() + if newItem != it.cur { + it.cur = newItem return true } } } func (it *mergedPostings) Seek(id storage.SeriesRef) bool { - if it.h.Len() == 0 || it.err != nil { + for !it.h.IsEmpty() && it.h.At() < id { + finished := !it.h.Winner().Seek(id) + it.h.Fix(finished) + } + if it.h.IsEmpty() { return false } - if !it.initialized { - if !it.Next() { - return false - } - } - for it.cur < id { - cur := it.h[0] - if !cur.Seek(id) { - heap.Pop(&it.h) - if cur.Err() != nil { - it.err = cur.Err() - return false - } - if it.h.Len() == 0 { - return false - } - } else { - // Value of top of heap has changed, re-heapify. - heap.Fix(&it.h, 0) - } - - it.cur = it.h[0].At() - } + it.cur = it.h.At() return true } @@ -657,7 +585,12 @@ func (it mergedPostings) At() storage.SeriesRef { } func (it mergedPostings) Err() error { - return it.err + for _, p := range it.p { + if err := p.Err(); err != nil { + return err + } + } + return nil } // Without returns a new postings list that contains all elements from the full list that diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index bd6406eb9f..95f3aa2c1e 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -380,6 +380,38 @@ func BenchmarkIntersect(t *testing.B) { }) } +func BenchmarkMerge(t *testing.B) { + var lps []*ListPostings + var refs [][]storage.SeriesRef + + // Create 100000 matchers(k=100000), making sure all memory allocation is done before starting the loop. + for i := 0; i < 100000; i++ { + var temp []storage.SeriesRef + for j := 1; j < 100; j++ { + temp = append(temp, storage.SeriesRef(i+j*100000)) + } + lps = append(lps, newListPostings(temp...)) + refs = append(refs, temp) + } + + its := make([]Postings, len(refs)) + for _, nSeries := range []int{1, 10, 100, 1000, 10000, 100000} { + t.Run(fmt.Sprint(nSeries), func(bench *testing.B) { + ctx := context.Background() + for i := 0; i < bench.N; i++ { + // Reset the ListPostings to their original values each time round the loop. + for j := range refs[:nSeries] { + lps[j].list = refs[j] + its[j] = lps[j] + } + if err := consumePostings(Merge(ctx, its[:nSeries]...)); err != nil { + bench.Fatal(err) + } + } + }) + } +} + func TestMultiMerge(t *testing.T) { i1 := newListPostings(1, 2, 3, 4, 5, 6, 1000, 1001) i2 := newListPostings(2, 4, 5, 6, 7, 8, 999, 1001) @@ -481,7 +513,7 @@ func TestMergedPostings(t *testing.T) { m := Merge(ctx, c.in...) if c.res == EmptyPostings() { - require.Equal(t, EmptyPostings(), m) + require.False(t, m.Next()) return } @@ -977,7 +1009,7 @@ func TestMemPostings_Delete(t *testing.T) { deleted := p.Get("lbl1", "b") expanded, err = ExpandPostings(deleted) require.NoError(t, err) - require.Equal(t, 0, len(expanded), "expected empty postings, got %v", expanded) + require.Empty(t, expanded, "expected empty postings, got %v", expanded) } func TestPostingsCloner(t *testing.T) { diff --git a/tsdb/index/postingsstats_test.go b/tsdb/index/postingsstats_test.go index 7ce51c795f..674e1c0524 100644 --- a/tsdb/index/postingsstats_test.go +++ b/tsdb/index/postingsstats_test.go @@ -33,7 +33,7 @@ func TestPostingsStats(t *testing.T) { stats.push(Stat{Name: "Stuff", Count: 3000000}) data := stats.get() - require.Equal(t, 10, len(data)) + require.Len(t, data, 10) for i := 0; i < heapLength; i++ { require.Equal(t, uint64(max-i), data[i].Count) } @@ -51,7 +51,7 @@ func TestPostingsStats2(t *testing.T) { data := stats.get() - require.Equal(t, 4, len(data)) + require.Len(t, data, 4) require.Equal(t, uint64(11), data[0].Count) } diff --git a/tsdb/mocks_test.go b/tsdb/mocks_test.go index 268017caa6..d7c2b0a4f3 100644 --- a/tsdb/mocks_test.go +++ b/tsdb/mocks_test.go @@ -14,7 +14,7 @@ package tsdb import ( - "github.com/pkg/errors" + "fmt" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -41,7 +41,7 @@ func (m *mockIndexWriter) AddSeries(_ storage.SeriesRef, l labels.Labels, chks . for i, chk := range chks { c, err := copyChunk(chk.Chunk) if err != nil { - return errors.Wrap(err, "mockIndexWriter: copy chunk") + return fmt.Errorf("mockIndexWriter: copy chunk: %w", err) } chksNew[i] = chunks.Meta{MaxTime: chk.MaxTime, MinTime: chk.MinTime, Chunk: c} } diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index 64577872f5..b50a268b71 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -491,7 +491,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { }) require.Nil(t, iterable) require.Equal(t, err, fmt.Errorf("not found")) - require.Equal(t, c, nil) + require.Nil(t, c) }) tests := []struct { diff --git a/tsdb/querier.go b/tsdb/querier.go index 95fc0c410e..b7701dde2b 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -47,18 +47,18 @@ type blockBaseQuerier struct { func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, error) { indexr, err := b.Index() if err != nil { - return nil, errors.Wrap(err, "open index reader") + return nil, fmt.Errorf("open index reader: %w", err) } chunkr, err := b.Chunks() if err != nil { indexr.Close() - return nil, errors.Wrap(err, "open chunk reader") + return nil, fmt.Errorf("open chunk reader: %w", err) } tombsr, err := b.Tombstones() if err != nil { indexr.Close() chunkr.Close() - return nil, errors.Wrap(err, "open tombstone reader") + return nil, fmt.Errorf("open tombstone reader: %w", err) } if tombsr == nil { @@ -386,12 +386,12 @@ const maxExpandedPostingsFactor = 100 // Division factor for maximum number of m func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, matchers ...*labels.Matcher) ([]string, error) { p, err := r.PostingsForMatchers(ctx, false, matchers...) if err != nil { - return nil, errors.Wrap(err, "fetching postings for matchers") + return nil, fmt.Errorf("fetching postings for matchers: %w", err) } allValues, err := r.LabelValues(ctx, name) if err != nil { - return nil, errors.Wrapf(err, "fetching values of label %s", name) + return nil, fmt.Errorf("fetching values of label %s: %w", name, err) } // If we have a matcher for the label name, we can filter out values that don't match @@ -429,7 +429,7 @@ func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, ma if len(expanded) <= maxExpandedPostings { // When we're here, p.Next() must have returned false, so we need to check for errors. if err := p.Err(); err != nil { - return nil, errors.Wrap(err, "expanding postings for matchers") + return nil, fmt.Errorf("expanding postings for matchers: %w", err) } // We have expanded all the postings -- all returned label values will be from these series only. @@ -445,12 +445,12 @@ func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, ma for i, value := range allValues { valuesPostings[i], err = r.Postings(ctx, name, value) if err != nil { - return nil, errors.Wrapf(err, "fetching postings for %s=%q", name, value) + return nil, fmt.Errorf("fetching postings for %s=%q: %w", name, value, err) } } indexes, err := index.FindIntersectingPostings(p, valuesPostings) if err != nil { - return nil, errors.Wrap(err, "intersecting postings") + return nil, fmt.Errorf("intersecting postings: %w", err) } values := make([]string, 0, len(indexes)) @@ -474,7 +474,7 @@ func labelValuesFromSeries(r IndexReader, labelName string, refs []storage.Serie continue } if err != nil { - return nil, errors.Wrapf(err, "label values for label %s", labelName) + return nil, fmt.Errorf("label values for label %s: %w", labelName, err) } v := builder.Labels().Get(labelName) @@ -552,8 +552,8 @@ func labelNamesWithMatchers(ctx context.Context, r IndexReader, matchers ...*lab for p.Next() { postings = append(postings, p.At()) } - if p.Err() != nil { - return nil, errors.Wrapf(p.Err(), "postings for label names with matchers") + if err := p.Err(); err != nil { + return nil, fmt.Errorf("postings for label names with matchers: %w", err) } return r.LabelNamesFor(ctx, postings...) @@ -592,10 +592,10 @@ func (b *blockBaseSeriesSet) Next() bool { for b.p.Next() { if err := b.index.Series(b.p.At(), &b.builder, &b.bufChks); err != nil { // Postings may be stale. Skip if no underlying series exists. - if errors.Cause(err) == storage.ErrNotFound { + if errors.Is(err, storage.ErrNotFound) { continue } - b.err = errors.Wrapf(err, "get series %d", b.p.At()) + b.err = fmt.Errorf("get series %d: %w", b.p.At(), err) return false } @@ -605,7 +605,7 @@ func (b *blockBaseSeriesSet) Next() bool { intervals, err := b.tombstones.Get(b.p.At()) if err != nil { - b.err = errors.Wrap(err, "get tombstones") + b.err = fmt.Errorf("get tombstones: %w", err) return false } @@ -755,7 +755,7 @@ func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool { } if p.err != nil { - p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currMeta.Ref, p.blockID.String()) + p.err = fmt.Errorf("cannot populate chunk %d from block %s: %w", p.currMeta.Ref, p.blockID.String(), p.err) return false } @@ -923,24 +923,32 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { } // Move to the next chunk/deletion iterator. - if !p.next(true) { - return false - } - - if p.currMeta.Chunk != nil { + // This is a for loop as if the current p.currDelIter returns no samples + // (which means a chunk won't be created), there still might be more + // samples/chunks from the rest of p.metas. + for p.next(true) { if p.currDelIter == nil { p.currMetaWithChunk = p.currMeta return true } - // If ChunkOrIterable() returned a non-nil chunk, the samples in - // p.currDelIter will only form one chunk, as the only change - // p.currDelIter might make is deleting some samples. - return p.populateCurrForSingleChunk() - } - // If ChunkOrIterable() returned an iterable, multiple chunks may be - // created from the samples in p.currDelIter. - return p.populateChunksFromIterable() + if p.currMeta.Chunk != nil { + // If ChunkOrIterable() returned a non-nil chunk, the samples in + // p.currDelIter will only form one chunk, as the only change + // p.currDelIter might make is deleting some samples. + if p.populateCurrForSingleChunk() { + return true + } + } else { + // If ChunkOrIterable() returned an iterable, multiple chunks may be + // created from the samples in p.currDelIter. + if p.populateChunksFromIterable() { + return true + } + } + + } + return false } // populateCurrForSingleChunk sets the fields within p.currMetaWithChunk. This @@ -949,7 +957,7 @@ func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool { valueType := p.currDelIter.Next() if valueType == chunkenc.ValNone { if err := p.currDelIter.Err(); err != nil { - p.err = errors.Wrap(err, "iterate chunk while re-encoding") + p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err) } return false } @@ -1017,11 +1025,11 @@ func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool { } if err != nil { - p.err = errors.Wrap(err, "iterate chunk while re-encoding") + p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err) return false } if err := p.currDelIter.Err(); err != nil { - p.err = errors.Wrap(err, "iterate chunk while re-encoding") + p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err) return false } @@ -1040,7 +1048,7 @@ func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool { firstValueType := p.currDelIter.Next() if firstValueType == chunkenc.ValNone { if err := p.currDelIter.Err(); err != nil { - p.err = errors.Wrap(err, "populateChunksFromIterable: no samples could be read") + p.err = fmt.Errorf("populateChunksFromIterable: no samples could be read: %w", err) return false } return false @@ -1124,11 +1132,11 @@ func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool { } if err != nil { - p.err = errors.Wrap(err, "populateChunksFromIterable: error when writing new chunks") + p.err = fmt.Errorf("populateChunksFromIterable: error when writing new chunks: %w", err) return false } if err = p.currDelIter.Err(); err != nil { - p.err = errors.Wrap(err, "populateChunksFromIterable: currDelIter error when writing new chunks") + p.err = fmt.Errorf("populateChunksFromIterable: currDelIter error when writing new chunks: %w", err) return false } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 47365aaae6..ecaf0b26aa 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -15,6 +15,7 @@ package tsdb import ( "context" + "errors" "fmt" "math" "math/rand" @@ -26,7 +27,6 @@ import ( "time" "github.com/oklog/ulid" - "github.com/pkg/errors" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" @@ -213,7 +213,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C require.Equal(t, eok, rok) if !eok { - require.Equal(t, 0, len(res.Warnings())) + require.Empty(t, res.Warnings()) break } sexp := c.exp.At() @@ -248,7 +248,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C require.Equal(t, eok, rok) if !eok { - require.Equal(t, 0, len(res.Warnings())) + require.Empty(t, res.Warnings()) break } sexpChks := c.expChks.At() @@ -1078,6 +1078,24 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) { }, expectedMinMaxTimes: []minMaxTimes{{1, 3}, {9, 9}}, }, + { + name: "two chunks with first chunk deleted", + samples: [][]chunks.Sample{ + {sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}}, + {sample{7, 89, nil, nil}, sample{9, 8, nil, nil}}, + }, + intervals: tombstones.Intervals{{Mint: 1, Maxt: 6}}, + + expected: []chunks.Sample{ + sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, + }, + expectedChks: []chunks.Meta{ + assureChunkFromSamples(t, []chunks.Sample{ + sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, + }), + }, + expectedMinMaxTimes: []minMaxTimes{{7, 9}}, + }, // Deletion with seek. { name: "two chunks with trimmed first and last samples from edge chunks, seek from middle of first chunk", @@ -2090,7 +2108,7 @@ func BenchmarkMergedSeriesSet(b *testing.B) { i++ } require.NoError(b, ms.Err()) - require.Equal(b, len(lbls), i) + require.Len(b, lbls, i) } }) } @@ -2339,7 +2357,7 @@ func (m mockIndex) Postings(ctx context.Context, name string, values ...string) func (m mockIndex) SortedPostings(p index.Postings) index.Postings { ep, err := index.ExpandPostings(p) if err != nil { - return index.ErrPostings(errors.Wrap(err, "expand postings")) + return index.ErrPostings(fmt.Errorf("expand postings: %w", err)) } sort.Slice(ep, func(i, j int) bool { @@ -2567,7 +2585,7 @@ func BenchmarkQuerySeek(b *testing.B) { require.NoError(b, it.Err()) } require.NoError(b, ss.Err()) - require.Equal(b, 0, len(ss.Warnings())) + require.Empty(b, ss.Warnings()) }) } } @@ -2695,7 +2713,7 @@ func BenchmarkSetMatcher(b *testing.B) { for ss.Next() { } require.NoError(b, ss.Err()) - require.Equal(b, 0, len(ss.Warnings())) + require.Empty(b, ss.Warnings()) } }) } @@ -3234,7 +3252,7 @@ func benchQuery(b *testing.B, expExpansions int, q storage.Querier, selectors la actualExpansions++ } require.NoError(b, ss.Err()) - require.Equal(b, 0, len(ss.Warnings())) + require.Empty(b, ss.Warnings()) require.Equal(b, expExpansions, actualExpansions) require.NoError(b, ss.Err()) } @@ -3424,7 +3442,7 @@ func TestBlockBaseSeriesSet(t *testing.T) { i++ } - require.Equal(t, len(tc.expIdxs), i) + require.Len(t, tc.expIdxs, i) require.NoError(t, bcs.Err()) } } @@ -3726,3 +3744,80 @@ func (f *indexReaderCountingPostingsForMatchersCalls) PostingsForMatchers(ctx co f.postingsForMatchersCalls++ return f.IndexReader.PostingsForMatchers(ctx, concurrent, ms...) } + +func TestQueryWithOneChunkCompletelyDeleted(t *testing.T) { + ctx := context.Background() + db := openTestDB(t, nil, nil) + defer func() { + require.NoError(t, db.Close()) + }() + + db.EnableNativeHistograms() + appender := db.Appender(context.Background()) + + var ( + err error + seriesRef storage.SeriesRef + ) + lbs := labels.FromStrings("__name__", "test") + + // Create an int histogram chunk with samples between 0 - 20 and 30 - 40. + for i := 0; i < 20; i++ { + h := tsdbutil.GenerateTestHistogram(1) + seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), h, nil) + require.NoError(t, err) + } + for i := 30; i < 40; i++ { + h := tsdbutil.GenerateTestHistogram(1) + seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), h, nil) + require.NoError(t, err) + } + + // Append some float histograms - float histograms are a different encoding + // type from int histograms so a new chunk is created. + for i := 60; i < 100; i++ { + fh := tsdbutil.GenerateTestFloatHistogram(1) + seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), nil, fh) + require.NoError(t, err) + } + + err = appender.Commit() + require.NoError(t, err) + + matcher, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test") + require.NoError(t, err) + + // Delete all samples from the int histogram chunk. The deletion intervals + // doesn't cover the entire histogram chunk, but does cover all the samples + // in the chunk. This case was previously not handled properly. + err = db.Delete(ctx, 0, 20, matcher) + require.NoError(t, err) + err = db.Delete(ctx, 30, 40, matcher) + require.NoError(t, err) + + chunkQuerier, err := db.ChunkQuerier(0, 100) + require.NoError(t, err) + + css := chunkQuerier.Select(context.Background(), false, nil, matcher) + + seriesCount := 0 + for css.Next() { + seriesCount++ + series := css.At() + + sampleCount := 0 + it := series.Iterator(nil) + for it.Next() { + chk := it.At() + cit := chk.Chunk.Iterator(nil) + for vt := cit.Next(); vt != chunkenc.ValNone; vt = cit.Next() { + require.Equal(t, chunkenc.ValFloatHistogram, vt, "Only float histograms expected, other sample types should have been deleted.") + sampleCount++ + } + } + require.NoError(t, it.Err()) + require.Equal(t, 40, sampleCount) + } + require.NoError(t, css.Err()) + require.Equal(t, 1, seriesCount) +} diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 42a656dfe8..3931ad05d6 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -20,9 +20,10 @@ import ( "fmt" "math" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/encoding" @@ -90,45 +91,45 @@ const ( Stateset MetricType = 7 ) -func GetMetricType(t textparse.MetricType) uint8 { +func GetMetricType(t model.MetricType) uint8 { switch t { - case textparse.MetricTypeCounter: + case model.MetricTypeCounter: return uint8(Counter) - case textparse.MetricTypeGauge: + case model.MetricTypeGauge: return uint8(Gauge) - case textparse.MetricTypeHistogram: + case model.MetricTypeHistogram: return uint8(HistogramSample) - case textparse.MetricTypeGaugeHistogram: + case model.MetricTypeGaugeHistogram: return uint8(GaugeHistogram) - case textparse.MetricTypeSummary: + case model.MetricTypeSummary: return uint8(Summary) - case textparse.MetricTypeInfo: + case model.MetricTypeInfo: return uint8(Info) - case textparse.MetricTypeStateset: + case model.MetricTypeStateset: return uint8(Stateset) default: return uint8(UnknownMT) } } -func ToTextparseMetricType(m uint8) textparse.MetricType { +func ToMetricType(m uint8) model.MetricType { switch m { case uint8(Counter): - return textparse.MetricTypeCounter + return model.MetricTypeCounter case uint8(Gauge): - return textparse.MetricTypeGauge + return model.MetricTypeGauge case uint8(HistogramSample): - return textparse.MetricTypeHistogram + return model.MetricTypeHistogram case uint8(GaugeHistogram): - return textparse.MetricTypeGaugeHistogram + return model.MetricTypeGaugeHistogram case uint8(Summary): - return textparse.MetricTypeSummary + return model.MetricTypeSummary case uint8(Info): - return textparse.MetricTypeInfo + return model.MetricTypeInfo case uint8(Stateset): - return textparse.MetricTypeStateset + return model.MetricTypeStateset default: - return textparse.MetricTypeUnknown + return model.MetricTypeUnknown } } diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index 9111350a73..57599ef6d5 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -15,7 +15,6 @@ package record import ( - "errors" "math/rand" "testing" @@ -159,7 +158,7 @@ func TestRecord_EncodeDecode(t *testing.T) { floatHistograms[i] = RefFloatHistogramSample{ Ref: h.Ref, T: h.T, - FH: h.H.ToFloat(), + FH: h.H.ToFloat(nil), } } decFloatHistograms, err := dec.FloatHistogramSamples(enc.FloatHistogramSamples(floatHistograms, nil), nil) @@ -209,7 +208,7 @@ func TestRecord_Corrupted(t *testing.T) { corrupted := enc.Samples(samples, nil)[:8] _, err := dec.Samples(corrupted, nil) - require.True(t, errors.Is(err, encoding.ErrInvalidSize)) + require.ErrorIs(t, err, encoding.ErrInvalidSize) }) t.Run("Test corrupted tombstone record", func(t *testing.T) { @@ -232,7 +231,7 @@ func TestRecord_Corrupted(t *testing.T) { corrupted := enc.Exemplars(exemplars, nil)[:8] _, err := dec.Exemplars(corrupted, nil) - require.True(t, errors.Is(err, encoding.ErrInvalidSize)) + require.ErrorIs(t, err, encoding.ErrInvalidSize) }) t.Run("Test corrupted metadata record", func(t *testing.T) { @@ -242,7 +241,7 @@ func TestRecord_Corrupted(t *testing.T) { corrupted := enc.Metadata(meta, nil)[:8] _, err := dec.Metadata(corrupted, nil) - require.True(t, errors.Is(err, encoding.ErrInvalidSize)) + require.ErrorIs(t, err, encoding.ErrInvalidSize) }) t.Run("Test corrupted histogram record", func(t *testing.T) { @@ -267,7 +266,7 @@ func TestRecord_Corrupted(t *testing.T) { corrupted := enc.HistogramSamples(histograms, nil)[:8] _, err := dec.HistogramSamples(corrupted, nil) - require.True(t, errors.Is(err, encoding.ErrInvalidSize)) + require.ErrorIs(t, err, encoding.ErrInvalidSize) }) } diff --git a/tsdb/repair.go b/tsdb/repair.go index 0811164541..9d2c5738d1 100644 --- a/tsdb/repair.go +++ b/tsdb/repair.go @@ -22,7 +22,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" @@ -35,7 +34,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { // We must actually set the index file version to 2 and revert the meta.json version back to 1. dirs, err := blockDirs(dir) if err != nil { - return errors.Wrapf(err, "list block dirs in %q", dir) + return fmt.Errorf("list block dirs in %q: %w", dir, err) } tmpFiles := make([]string, 0, len(dirs)) @@ -71,44 +70,54 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { repl, err := os.Create(filepath.Join(d, "index.repaired")) if err != nil { - return errors.Wrapf(err, "create index.repaired for block dir: %v", d) + return fmt.Errorf("create index.repaired for block dir: %v: %w", d, err) } tmpFiles = append(tmpFiles, repl.Name()) broken, err := os.Open(filepath.Join(d, indexFilename)) if err != nil { - return errors.Wrapf(err, "open broken index for block dir: %v", d) + return fmt.Errorf("open broken index for block dir: %v: %w", d, err) } if _, err := io.Copy(repl, broken); err != nil { - return errors.Wrapf(err, "copy content of index to index.repaired for block dir: %v", d) + return fmt.Errorf("copy content of index to index.repaired for block dir: %v: %w", d, err) } // Set the 5th byte to 2 to indicate the correct file format version. if _, err := repl.WriteAt([]byte{2}, 4); err != nil { - return tsdb_errors.NewMulti( - errors.Wrapf(err, "rewrite of index.repaired for block dir: %v", d), - errors.Wrap(repl.Close(), "close"), - ).Err() + errs := tsdb_errors.NewMulti( + fmt.Errorf("rewrite of index.repaired for block dir: %v: %w", d, err)) + if err := repl.Close(); err != nil { + errs.Add(fmt.Errorf("close: %w", err)) + } + return errs.Err() } if err := repl.Sync(); err != nil { - return tsdb_errors.NewMulti( - errors.Wrapf(err, "sync of index.repaired for block dir: %v", d), - errors.Wrap(repl.Close(), "close"), - ).Err() + errs := tsdb_errors.NewMulti( + fmt.Errorf("sync of index.repaired for block dir: %v: %w", d, err)) + if err := repl.Close(); err != nil { + errs.Add(fmt.Errorf("close: %w", err)) + } + return errs.Err() } if err := repl.Close(); err != nil { - return errors.Wrapf(repl.Close(), "close repaired index for block dir: %v", d) + return fmt.Errorf("close repaired index for block dir: %v: %w", d, err) } if err := broken.Close(); err != nil { - return errors.Wrapf(repl.Close(), "close broken index for block dir: %v", d) + if err := repl.Close(); err != nil { + return fmt.Errorf("close broken index for block dir: %v: %w", d, err) + } } if err := fileutil.Replace(repl.Name(), broken.Name()); err != nil { - return errors.Wrapf(repl.Close(), "replaced broken index with index.repaired for block dir: %v", d) + if err := repl.Close(); err != nil { + return fmt.Errorf("replaced broken index with index.repaired for block dir: %v: %w", d, err) + } } // Reset version of meta.json to 1. meta.Version = metaVersion1 if _, err := writeMetaFile(logger, d, meta); err != nil { - return errors.Wrapf(repl.Close(), "write meta for block dir: %v", d) + if err := repl.Close(); err != nil { + return fmt.Errorf("write meta for block dir: %v: %w", d, err) + } } } return nil diff --git a/tsdb/tsdbutil/histogram.go b/tsdb/tsdbutil/histogram.go index 0847f81a8a..bb8d49b202 100644 --- a/tsdb/tsdbutil/histogram.go +++ b/tsdb/tsdbutil/histogram.go @@ -30,6 +30,14 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) { return r } +func GenerateTestHistogramsWithUnknownResetHint(n int) []*histogram.Histogram { + hs := GenerateTestHistograms(n) + for i := range hs { + hs[i].CounterResetHint = histogram.UnknownCounterReset + } + return hs +} + // GenerateTestHistogram but it is up to the user to set any known counter reset hint. func GenerateTestHistogram(i int) *histogram.Histogram { return &histogram.Histogram{ diff --git a/tsdb/wal.go b/tsdb/wal.go index bc7db35bf1..1509c9cd96 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -16,6 +16,7 @@ package tsdb import ( "bufio" "encoding/binary" + "errors" "fmt" "hash" "hash/crc32" @@ -28,7 +29,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/storage" @@ -210,7 +210,7 @@ func OpenSegmentWAL(dir string, logger log.Logger, flushInterval time.Duration, for _, fn := range fns[i:] { if err := os.Remove(fn); err != nil { - return w, errors.Wrap(err, "removing segment failed") + return w, fmt.Errorf("removing segment failed: %w", err) } } break @@ -237,8 +237,8 @@ func (r *repairingWALReader) Read( if err == nil { return nil } - cerr, ok := errors.Cause(err).(walCorruptionErr) - if !ok { + var cerr *walCorruptionErr + if !errors.As(err, &cerr) { return err } r.wal.metrics.corruptions.Inc() @@ -309,7 +309,7 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) // Past WAL files are closed. We have to reopen them for another read. f, err := w.openSegmentFile(sf.Name()) if err != nil { - return errors.Wrap(err, "open old WAL segment for read") + return fmt.Errorf("open old WAL segment for read: %w", err) } candidates = append(candidates, &segmentFile{ File: f, @@ -326,7 +326,7 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) // Create a new tmp file. f, err := w.createSegmentFile(filepath.Join(w.dirFile.Name(), "compact.tmp")) if err != nil { - return errors.Wrap(err, "create compaction segment") + return fmt.Errorf("create compaction segment: %w", err) } defer func() { if err := os.RemoveAll(f.Name()); err != nil { @@ -352,7 +352,7 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) err := r.decodeSeries(flag, byt, &decSeries) if err != nil { - return errors.Wrap(err, "decode samples while truncating") + return fmt.Errorf("decode samples while truncating: %w", err) } for _, s := range decSeries { if keep(s.Ref) { @@ -367,11 +367,11 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) w.putBuffer(buf) if err != nil { - return errors.Wrap(err, "write to compaction segment") + return fmt.Errorf("write to compaction segment: %w", err) } } - if r.Err() != nil { - return errors.Wrap(r.Err(), "read candidate WAL files") + if err := r.Err(); err != nil { + return fmt.Errorf("read candidate WAL files: %w", err) } off, err := csf.Seek(0, io.SeekCurrent) @@ -390,12 +390,12 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) _ = candidates[0].Close() // need close before remove on platform windows if err := fileutil.Replace(csf.Name(), candidates[0].Name()); err != nil { - return errors.Wrap(err, "rename compaction segment") + return fmt.Errorf("rename compaction segment: %w", err) } for _, f := range candidates[1:] { f.Close() // need close before remove on platform windows if err := os.RemoveAll(f.Name()); err != nil { - return errors.Wrap(err, "delete WAL segment file") + return fmt.Errorf("delete WAL segment file: %w", err) } } if err := w.dirFile.Sync(); err != nil { @@ -435,7 +435,7 @@ func (w *SegmentWAL) LogSeries(series []record.RefSeries) error { w.putBuffer(buf) if err != nil { - return errors.Wrap(err, "log series") + return fmt.Errorf("log series: %w", err) } tf := w.head() @@ -462,7 +462,7 @@ func (w *SegmentWAL) LogSamples(samples []record.RefSample) error { w.putBuffer(buf) if err != nil { - return errors.Wrap(err, "log series") + return fmt.Errorf("log series: %w", err) } tf := w.head() @@ -488,7 +488,7 @@ func (w *SegmentWAL) LogDeletes(stones []tombstones.Stone) error { w.putBuffer(buf) if err != nil { - return errors.Wrap(err, "log series") + return fmt.Errorf("log series: %w", err) } tf := w.head() @@ -523,7 +523,7 @@ func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) { switch n, err := f.Read(metab); { case err != nil: - return nil, errors.Wrapf(err, "validate meta %q", f.Name()) + return nil, fmt.Errorf("validate meta %q: %w", f.Name(), err) case n != 8: return nil, fmt.Errorf("invalid header size %d in %q", n, f.Name()) } @@ -573,16 +573,16 @@ func (w *SegmentWAL) cut() error { w.actorc <- func() error { off, err := hf.Seek(0, io.SeekCurrent) if err != nil { - return errors.Wrapf(err, "finish old segment %s", hf.Name()) + return fmt.Errorf("finish old segment %s: %w", hf.Name(), err) } if err := hf.Truncate(off); err != nil { - return errors.Wrapf(err, "finish old segment %s", hf.Name()) + return fmt.Errorf("finish old segment %s: %w", hf.Name(), err) } if err := hf.Sync(); err != nil { - return errors.Wrapf(err, "finish old segment %s", hf.Name()) + return fmt.Errorf("finish old segment %s: %w", hf.Name(), err) } if err := hf.Close(); err != nil { - return errors.Wrapf(err, "finish old segment %s", hf.Name()) + return fmt.Errorf("finish old segment %s: %w", hf.Name(), err) } return nil } @@ -600,7 +600,10 @@ func (w *SegmentWAL) cut() error { go func() { w.actorc <- func() error { - return errors.Wrap(w.dirFile.Sync(), "sync WAL directory") + if err := w.dirFile.Sync(); err != nil { + return fmt.Errorf("sync WAL directory: %w", err) + } + return nil } }() @@ -635,7 +638,7 @@ func (w *SegmentWAL) Sync() error { head = w.head() }() if err != nil { - return errors.Wrap(err, "flush buffer") + return fmt.Errorf("flush buffer: %w", err) } if head != nil { // But only fsync the head segment after releasing the mutex as it will block on disk I/O. @@ -726,11 +729,13 @@ func (w *SegmentWAL) Close() error { // only the current segment will still be open. if hf := w.head(); hf != nil { if err := hf.Close(); err != nil { - return errors.Wrapf(err, "closing WAL head %s", hf.Name()) + return fmt.Errorf("closing WAL head %s: %w", hf.Name(), err) } } - - return errors.Wrapf(w.dirFile.Close(), "closing WAL dir %s", w.dirFile.Name()) + if err := w.dirFile.Close(); err != nil { + return fmt.Errorf("closing WAL dir %s: %w", w.dirFile.Name(), err) + } + return nil } func (w *SegmentWAL) write(t WALEntryType, flag uint8, buf []byte) error { @@ -921,7 +926,7 @@ func (r *walReader) Read( err = r.decodeSeries(flag, b, &series) if err != nil { - err = errors.Wrap(err, "decode series entry") + err = fmt.Errorf("decode series entry: %w", err) break } datac <- series @@ -940,7 +945,7 @@ func (r *walReader) Read( err = r.decodeSamples(flag, b, &samples) if err != nil { - err = errors.Wrap(err, "decode samples entry") + err = fmt.Errorf("decode samples entry: %w", err) break } datac <- samples @@ -960,7 +965,7 @@ func (r *walReader) Read( err = r.decodeDeletes(flag, b, &deletes) if err != nil { - err = errors.Wrap(err, "decode delete entry") + err = fmt.Errorf("decode delete entry: %w", err) break } datac <- deletes @@ -982,8 +987,8 @@ func (r *walReader) Read( if err != nil { return err } - if r.Err() != nil { - return errors.Wrap(r.Err(), "read entry") + if err := r.Err(); err != nil { + return fmt.Errorf("read entry: %w", err) } return nil } @@ -1046,12 +1051,16 @@ type walCorruptionErr struct { lastOffset int64 } -func (e walCorruptionErr) Error() string { +func (e *walCorruptionErr) Error() string { return fmt.Sprintf("%s ", e.err, e.file, e.lastOffset) } +func (e *walCorruptionErr) Unwrap() error { + return e.err +} + func (r *walReader) corruptionErr(s string, args ...interface{}) error { - return walCorruptionErr{ + return &walCorruptionErr{ err: fmt.Errorf(s, args...), file: r.cur, lastOffset: r.lastOffset, @@ -1152,8 +1161,8 @@ func (r *walReader) decodeSamples(flag byte, b []byte, res *[]record.RefSample) }) } - if dec.Err() != nil { - return errors.Wrapf(dec.Err(), "decode error after %d samples", len(*res)) + if err := dec.Err(); err != nil { + return fmt.Errorf("decode error after %d samples: %w", len(*res), err) } if len(dec.B) > 0 { return fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) @@ -1185,7 +1194,7 @@ func deprecatedWALExists(logger log.Logger, dir string) (bool, error) { // Detect whether we still have the old WAL. fns, err := sequenceFiles(dir) if err != nil && !os.IsNotExist(err) { - return false, errors.Wrap(err, "list sequence files") + return false, fmt.Errorf("list sequence files: %w", err) } if len(fns) == 0 { return false, nil // No WAL at all yet. @@ -1194,13 +1203,13 @@ func deprecatedWALExists(logger log.Logger, dir string) (bool, error) { // old WAL. f, err := os.Open(fns[0]) if err != nil { - return false, errors.Wrap(err, "check first existing segment") + return false, fmt.Errorf("check first existing segment: %w", err) } defer f.Close() var hdr [4]byte - if _, err := f.Read(hdr[:]); err != nil && err != io.EOF { - return false, errors.Wrap(err, "read header from first segment") + if _, err := f.Read(hdr[:]); err != nil && !errors.Is(err, io.EOF) { + return false, fmt.Errorf("read header from first segment: %w", err) } // If we cannot read the magic header for segments of the old WAL, abort. // Either it's migrated already or there's a corruption issue with which @@ -1223,11 +1232,11 @@ func MigrateWAL(logger log.Logger, dir string) (err error) { tmpdir := dir + ".tmp" if err := os.RemoveAll(tmpdir); err != nil { - return errors.Wrap(err, "cleanup replacement dir") + return fmt.Errorf("cleanup replacement dir: %w", err) } repl, err := wlog.New(logger, nil, tmpdir, wlog.CompressionNone) if err != nil { - return errors.Wrap(err, "open new WAL") + return fmt.Errorf("open new WAL: %w", err) } // It should've already been closed as part of the previous finalization. @@ -1240,7 +1249,7 @@ func MigrateWAL(logger log.Logger, dir string) (err error) { w, err := OpenSegmentWAL(dir, logger, time.Minute, nil) if err != nil { - return errors.Wrap(err, "open old WAL") + return fmt.Errorf("open old WAL: %w", err) } defer w.Close() @@ -1271,22 +1280,22 @@ func MigrateWAL(logger log.Logger, dir string) (err error) { }, ) if decErr != nil { - return errors.Wrap(err, "decode old entries") + return fmt.Errorf("decode old entries: %w", err) } if err != nil { - return errors.Wrap(err, "write new entries") + return fmt.Errorf("write new entries: %w", err) } // We explicitly close even when there is a defer for Windows to be // able to delete it. The defer is in place to close it in-case there // are errors above. if err := w.Close(); err != nil { - return errors.Wrap(err, "close old WAL") + return fmt.Errorf("close old WAL: %w", err) } if err := repl.Close(); err != nil { - return errors.Wrap(err, "close new WAL") + return fmt.Errorf("close new WAL: %w", err) } if err := fileutil.Replace(tmpdir, dir); err != nil { - return errors.Wrap(err, "replace old WAL") + return fmt.Errorf("replace old WAL: %w", err) } return nil } diff --git a/tsdb/wal_test.go b/tsdb/wal_test.go index 5b2911131a..964763d7f1 100644 --- a/tsdb/wal_test.go +++ b/tsdb/wal_test.go @@ -49,7 +49,7 @@ func TestSegmentWAL_cut(t *testing.T) { require.NoError(t, w.cut()) // Cutting creates a new file. - require.Equal(t, 2, len(w.files)) + require.Len(t, w.files, 2) require.NoError(t, w.write(WALEntrySeries, 1, []byte("Hello World!!"))) @@ -409,7 +409,7 @@ func TestWALRestoreCorrupted(t *testing.T) { r := w2.Reader() serf := func(l []record.RefSeries) { - require.Equal(t, 0, len(l)) + require.Empty(t, l) } // Weird hack to check order of reads. diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go index 381e091861..bc5b4f20fa 100644 --- a/tsdb/wlog/checkpoint_test.go +++ b/tsdb/wlog/checkpoint_test.go @@ -229,7 +229,7 @@ func TestCheckpoint(t *testing.T) { // Only the new checkpoint should be left. files, err := os.ReadDir(dir) require.NoError(t, err) - require.Equal(t, 1, len(files)) + require.Len(t, files, 1) require.Equal(t, "checkpoint.00000106", files[0].Name()) sr, err := NewSegmentsReader(filepath.Join(dir, "checkpoint.00000106")) diff --git a/tsdb/wlog/reader_test.go b/tsdb/wlog/reader_test.go index 9d2ec50d22..0f510e0c1e 100644 --- a/tsdb/wlog/reader_test.go +++ b/tsdb/wlog/reader_test.go @@ -344,7 +344,7 @@ func TestReaderFuzz(t *testing.T) { r := reader.Record() // Expected value may come as nil or empty slice, so it requires special comparison. if len(expected) == 0 { - require.Len(t, r, 0) + require.Empty(t, r) } else { require.Equal(t, expected, r, "read wrong record") } @@ -395,7 +395,7 @@ func TestReaderFuzz_Live(t *testing.T) { require.True(t, ok, "unexpected record") // Expected value may come as nil or empty slice, so it requires special comparison. if len(expected) == 0 { - require.Len(t, rec, 0) + require.Empty(t, rec) } else { require.Equal(t, expected, rec, "record does not match expected") } diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index 5689602e74..1c76e38877 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -14,6 +14,7 @@ package wlog import ( + "errors" "fmt" "io" "math" @@ -25,7 +26,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "golang.org/x/exp/slices" @@ -65,7 +65,7 @@ type WriteTo interface { SeriesReset(int) } -// Used to notifier the watcher that data has been written so that it can read. +// Used to notify the watcher that data has been written so that it can read. type WriteNotified interface { Notify() } @@ -264,7 +264,7 @@ func (w *Watcher) loop() { func (w *Watcher) Run() error { _, lastSegment, err := w.firstAndLast() if err != nil { - return errors.Wrap(err, "wal.Segments") + return fmt.Errorf("wal.Segments: %w", err) } // We want to ensure this is false across iterations since @@ -275,13 +275,13 @@ func (w *Watcher) Run() error { // Backfill from the checkpoint first if it exists. lastCheckpoint, checkpointIndex, err := LastCheckpoint(w.walDir) - if err != nil && err != record.ErrNotFound { - return errors.Wrap(err, "tsdb.LastCheckpoint") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return fmt.Errorf("tsdb.LastCheckpoint: %w", err) } if err == nil { if err = w.readCheckpoint(lastCheckpoint, (*Watcher).readSegment); err != nil { - return errors.Wrap(err, "readCheckpoint") + return fmt.Errorf("readCheckpoint: %w", err) } } w.lastCheckpoint = lastCheckpoint @@ -371,7 +371,7 @@ func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, s // Ignore all errors reading to end of segment whilst replaying the WAL. if !tail { - if err != nil && errors.Cause(err) != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) } else if r.Offset() != size { level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size) @@ -380,7 +380,7 @@ func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, s } // Otherwise, when we are tailing, non-EOFs are fatal. - if errors.Cause(err) != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return err } return nil @@ -398,8 +398,16 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { reader := NewLiveReader(w.logger, w.readerMetrics, segment) - readTicker := time.NewTicker(readTimeout) - defer readTicker.Stop() + size := int64(math.MaxInt64) + if !tail { + var err error + size, err = getSegmentSize(w.walDir, segmentNum) + if err != nil { + return fmt.Errorf("getSegmentSize: %w", err) + } + + return w.readAndHandleError(reader, segmentNum, tail, size) + } checkpointTicker := time.NewTicker(checkpointPeriod) defer checkpointTicker.Stop() @@ -407,18 +415,8 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { segmentTicker := time.NewTicker(segmentCheckPeriod) defer segmentTicker.Stop() - // If we're replaying the segment we need to know the size of the file to know - // when to return from watch and move on to the next segment. - size := int64(math.MaxInt64) - if !tail { - segmentTicker.Stop() - checkpointTicker.Stop() - var err error - size, err = getSegmentSize(w.walDir, segmentNum) - if err != nil { - return errors.Wrap(err, "getSegmentSize") - } - } + readTicker := time.NewTicker(readTimeout) + defer readTicker.Stop() gcSem := make(chan struct{}, 1) for { @@ -449,7 +447,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { case <-segmentTicker.C: _, last, err := w.firstAndLast() if err != nil { - return errors.Wrap(err, "segments") + return fmt.Errorf("segments: %w", err) } // Check if new segments exists. @@ -461,7 +459,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // Ignore errors reading to end of segment whilst replaying the WAL. if !tail { switch { - case err != nil && errors.Cause(err) != io.EOF: + case err != nil && !errors.Is(err, io.EOF): level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err) case reader.Offset() != size: level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) @@ -470,7 +468,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { } // Otherwise, when we are tailing, non-EOFs are fatal. - if errors.Cause(err) != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return err } @@ -499,8 +497,8 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { func (w *Watcher) garbageCollectSeries(segmentNum int) error { dir, _, err := LastCheckpoint(w.walDir) - if err != nil && err != record.ErrNotFound { - return errors.Wrap(err, "tsdb.LastCheckpoint") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return fmt.Errorf("tsdb.LastCheckpoint: %w", err) } if dir == "" || dir == w.lastCheckpoint { @@ -510,7 +508,7 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { index, err := checkpointNum(dir) if err != nil { - return errors.Wrap(err, "error parsing checkpoint filename") + return fmt.Errorf("error parsing checkpoint filename: %w", err) } if index >= segmentNum { @@ -521,7 +519,7 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { level.Debug(w.logger).Log("msg", "New checkpoint detected", "new", dir, "currentSegment", segmentNum) if err = w.readCheckpoint(dir, (*Watcher).readSegmentForGC); err != nil { - return errors.Wrap(err, "readCheckpoint") + return fmt.Errorf("readCheckpoint: %w", err) } // Clear series with a checkpoint or segment index # lower than the checkpoint we just read. @@ -660,7 +658,10 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { w.recordDecodeFailsMetric.Inc() } } - return errors.Wrapf(r.Err(), "segment %d: %v", segmentNum, r.Err()) + if err := r.Err(); err != nil { + return fmt.Errorf("segment %d: %w", segmentNum, err) + } + return nil } // Go through all series in a segment updating the segmentNum, so we can delete older series. @@ -693,7 +694,10 @@ func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error w.recordDecodeFailsMetric.Inc() } } - return errors.Wrapf(r.Err(), "segment %d: %v", segmentNum, r.Err()) + if err := r.Err(); err != nil { + return fmt.Errorf("segment %d: %w", segmentNum, err) + } + return nil } func (w *Watcher) SetStartTime(t time.Time) { @@ -708,29 +712,29 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err level.Debug(w.logger).Log("msg", "Reading checkpoint", "dir", checkpointDir) index, err := checkpointNum(checkpointDir) if err != nil { - return errors.Wrap(err, "checkpointNum") + return fmt.Errorf("checkpointNum: %w", err) } // Ensure we read the whole contents of every segment in the checkpoint dir. segs, err := w.segments(checkpointDir) if err != nil { - return errors.Wrap(err, "Unable to get segments checkpoint dir") + return fmt.Errorf("Unable to get segments checkpoint dir: %w", err) } for _, seg := range segs { size, err := getSegmentSize(checkpointDir, seg) if err != nil { - return errors.Wrap(err, "getSegmentSize") + return fmt.Errorf("getSegmentSize: %w", err) } sr, err := OpenReadSegment(SegmentName(checkpointDir, seg)) if err != nil { - return errors.Wrap(err, "unable to open segment") + return fmt.Errorf("unable to open segment: %w", err) } defer sr.Close() r := NewLiveReader(w.logger, w.readerMetrics, sr) - if err := readFn(w, r, index, false); errors.Cause(err) != io.EOF && err != nil { - return errors.Wrap(err, "readSegment") + if err := readFn(w, r, index, false); err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("readSegment: %w", err) } if r.Offset() != size { diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index bc6a10126e..b30dce91a3 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -199,7 +199,7 @@ func TestTailSamples(t *testing.T) { floatHistogram := enc.FloatHistogramSamples([]record.RefFloatHistogramSample{{ Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, - FH: hist.ToFloat(), + FH: hist.ToFloat(nil), }}, nil) require.NoError(t, w.Log(floatHistogram)) } @@ -630,3 +630,61 @@ func TestCheckpointSeriesReset(t *testing.T) { }) } } + +func TestRun_StartupTime(t *testing.T) { + const pageSize = 32 * 1024 + const segments = 10 + const seriesCount = 20 + const samplesCount = 300 + + for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { + t.Run(string(compress), func(t *testing.T) { + dir := t.TempDir() + + wdir := path.Join(dir, "wal") + err := os.Mkdir(wdir, 0o777) + require.NoError(t, err) + + enc := record.Encoder{} + w, err := NewSize(nil, nil, wdir, pageSize, compress) + require.NoError(t, err) + + for i := 0; i < segments; i++ { + for j := 0; j < seriesCount; j++ { + ref := j + (i * 100) + series := enc.Series([]record.RefSeries{ + { + Ref: chunks.HeadSeriesRef(ref), + Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)), + }, + }, nil) + require.NoError(t, w.Log(series)) + + for k := 0; k < samplesCount; k++ { + inner := rand.Intn(ref + 1) + sample := enc.Samples([]record.RefSample{ + { + Ref: chunks.HeadSeriesRef(inner), + T: int64(i), + V: float64(i), + }, + }, nil) + require.NoError(t, w.Log(sample)) + } + } + } + require.NoError(t, w.Close()) + + wt := newWriteToMock() + watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false) + watcher.MaxSegment = segments + + watcher.setMetrics() + startTime := time.Now() + + err = watcher.Run() + require.Less(t, time.Since(startTime), readTimeout) + require.NoError(t, err) + }) + } +} diff --git a/tsdb/wlog/wlog.go b/tsdb/wlog/wlog.go index c4305bcbf1..fdea756945 100644 --- a/tsdb/wlog/wlog.go +++ b/tsdb/wlog/wlog.go @@ -116,6 +116,10 @@ func (e *CorruptionErr) Error() string { return fmt.Sprintf("corruption in segment %s at %d: %s", SegmentName(e.Dir, e.Segment), e.Offset, e.Err) } +func (e *CorruptionErr) Unwrap() error { + return e.Err +} + // OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends. func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { segName := SegmentName(dir, k) @@ -970,7 +974,7 @@ type segmentBufReader struct { off int // Offset of read data into current segment. } -func NewSegmentBufReader(segs ...*Segment) *segmentBufReader { +func NewSegmentBufReader(segs ...*Segment) io.ReadCloser { if len(segs) == 0 { return &segmentBufReader{} } @@ -981,15 +985,16 @@ func NewSegmentBufReader(segs ...*Segment) *segmentBufReader { } } -func NewSegmentBufReaderWithOffset(offset int, segs ...*Segment) (sbr *segmentBufReader, err error) { +func NewSegmentBufReaderWithOffset(offset int, segs ...*Segment) (io.ReadCloser, error) { if offset == 0 || len(segs) == 0 { return NewSegmentBufReader(segs...), nil } - sbr = &segmentBufReader{ + sbr := &segmentBufReader{ buf: bufio.NewReaderSize(segs[0], 16*pageSize), segs: segs, } + var err error if offset > 0 { _, err = sbr.buf.Discard(offset) } diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go index ba2a506981..3aeebd099e 100644 --- a/tsdb/wlog/wlog_test.go +++ b/tsdb/wlog/wlog_test.go @@ -188,7 +188,7 @@ func TestWALRepair_ReadingError(t *testing.T) { result = append(result, append(b, r.Record()...)) } require.NoError(t, r.Err()) - require.Equal(t, test.intactRecs, len(result), "Wrong number of intact records") + require.Len(t, result, test.intactRecs, "Wrong number of intact records") for i, r := range result { if !bytes.Equal(records[i], r) { @@ -282,7 +282,7 @@ func TestCorruptAndCarryOn(t *testing.T) { reader := NewReader(sr) i := 0 for ; i < 4 && reader.Next(); i++ { - require.Equal(t, recordSize, len(reader.Record())) + require.Len(t, reader.Record(), recordSize) } require.Equal(t, 4, i, "not enough records") require.False(t, reader.Next(), "unexpected record") @@ -300,8 +300,8 @@ func TestCorruptAndCarryOn(t *testing.T) { require.NoError(t, err) // Ensure that we have a completely clean slate after repairing. - require.Equal(t, w.segment.Index(), 1) // We corrupted segment 0. - require.Equal(t, w.donePages, 0) + require.Equal(t, 1, w.segment.Index()) // We corrupted segment 0. + require.Equal(t, 0, w.donePages) for i := 0; i < 5; i++ { buf := make([]byte, recordSize) @@ -324,11 +324,11 @@ func TestCorruptAndCarryOn(t *testing.T) { reader := NewReader(sr) i := 0 for ; i < 9 && reader.Next(); i++ { - require.Equal(t, recordSize, len(reader.Record())) + require.Len(t, reader.Record(), recordSize) } require.Equal(t, 9, i, "wrong number of records") require.False(t, reader.Next(), "unexpected record") - require.Equal(t, nil, reader.Err()) + require.NoError(t, reader.Err()) sr.Close() } } @@ -455,7 +455,7 @@ func TestLogPartialWrite(t *testing.T) { for i := 1; i <= testData.numRecords; i++ { if err := w.Log(record); i == testData.faultyRecord { - require.Error(t, io.ErrShortWrite, err) + require.ErrorIs(t, io.ErrShortWrite, err) } else { require.NoError(t, err) } diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go index 2041d02178..180d408430 100644 --- a/util/annotations/annotations.go +++ b/util/annotations/annotations.go @@ -94,6 +94,7 @@ func (a Annotations) AsStrings(query string, maxAnnos int) []string { return arr } +//nolint:revive // error-naming. var ( // Currently there are only 2 types, warnings and info. // For now, info are visually identical with warnings as we have not updated @@ -130,7 +131,7 @@ func (e annoErr) Unwrap() error { // NewInvalidQuantileWarning is used when the user specifies an invalid quantile // value, i.e. a float that is outside the range [0, 1] or NaN. -func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) annoErr { +func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w, got %g", InvalidQuantileWarning, q), @@ -139,7 +140,7 @@ func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) annoErr { // NewBadBucketLabelWarning is used when there is an error parsing the bucket label // of a classic histogram. -func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRange) annoErr { +func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w of %q for metric name %q", BadBucketLabelWarning, label, metricName), @@ -149,7 +150,7 @@ func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRan // NewMixedFloatsHistogramsWarning is used when the queried series includes both // float samples and histogram samples for functions that do not support mixed // samples. -func NewMixedFloatsHistogramsWarning(metricName string, pos posrange.PositionRange) annoErr { +func NewMixedFloatsHistogramsWarning(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", MixedFloatsHistogramsWarning, metricName), @@ -158,7 +159,7 @@ func NewMixedFloatsHistogramsWarning(metricName string, pos posrange.PositionRan // NewMixedClassicNativeHistogramsWarning is used when the queried series includes // both classic and native histograms. -func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.PositionRange) annoErr { +func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", MixedClassicNativeHistogramsWarning, metricName), @@ -167,7 +168,7 @@ func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.Posi // NewPossibleNonCounterInfo is used when a named counter metric with only float samples does not // have the suffixes _total, _sum, _count, or _bucket. -func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) annoErr { +func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", PossibleNonCounterInfo, metricName), @@ -176,7 +177,7 @@ func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) an // NewHistogramQuantileForcedMonotonicityInfo is used when the input (classic histograms) to // histogram_quantile needs to be forced to be monotonic. -func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.PositionRange) annoErr { +func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", HistogramQuantileForcedMonotonicityInfo, metricName), diff --git a/util/fmtutil/format_test.go b/util/fmtutil/format_test.go index 0f052f5e75..c592630fe8 100644 --- a/util/fmtutil/format_test.go +++ b/util/fmtutil/format_test.go @@ -204,7 +204,7 @@ func TestParseAndPushMetricsTextAndFormat(t *testing.T) { expected, err := MetricTextToWriteRequest(input, labels) require.NoError(t, err) - require.Equal(t, writeRequestFixture, expected) + require.Equal(t, expected, writeRequestFixture) } func TestMetricTextToWriteRequestErrorParsingFloatValue(t *testing.T) { @@ -217,7 +217,7 @@ func TestMetricTextToWriteRequestErrorParsingFloatValue(t *testing.T) { labels := map[string]string{"job": "promtool"} _, err := MetricTextToWriteRequest(input, labels) - require.Equal(t, err.Error(), "text format parsing error in line 4: expected float as value, got \"1027Error\"") + require.Equal(t, "text format parsing error in line 4: expected float as value, got \"1027Error\"", err.Error()) } func TestMetricTextToWriteRequestErrorParsingMetricType(t *testing.T) { @@ -229,5 +229,5 @@ func TestMetricTextToWriteRequestErrorParsingMetricType(t *testing.T) { labels := map[string]string{"job": "promtool"} _, err := MetricTextToWriteRequest(input, labels) - require.Equal(t, err.Error(), "text format parsing error in line 3: unknown metric type \"info\"") + require.Equal(t, "text format parsing error in line 3: unknown metric type \"info\"", err.Error()) } diff --git a/util/runutil/runutil.go b/util/runutil/runutil.go new file mode 100644 index 0000000000..5a77c332ba --- /dev/null +++ b/util/runutil/runutil.go @@ -0,0 +1,37 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copied from https://github.com/efficientgo/core/blob/a21078e2c723b69e05f95c65dbc5058712b4edd8/runutil/runutil.go#L39 +// and adjusted. + +package runutil + +import "time" + +// Retry executes f every interval seconds until timeout or no error is returned from f. +func Retry(interval time.Duration, stopc <-chan struct{}, f func() error) error { + tick := time.NewTicker(interval) + defer tick.Stop() + + var err error + for { + if err = f(); err == nil { + return nil + } + select { + case <-stopc: + return err + case <-tick.C: + } + } +} diff --git a/util/zeropool/pool_test.go b/util/zeropool/pool_test.go index 638a035885..fea8200226 100644 --- a/util/zeropool/pool_test.go +++ b/util/zeropool/pool_test.go @@ -28,19 +28,19 @@ func TestPool(t *testing.T) { t.Run("provides correct values", func(t *testing.T) { pool := zeropool.New(func() []byte { return make([]byte, 1024) }) item1 := pool.Get() - require.Equal(t, 1024, len(item1)) + require.Len(t, item1, 1024) item2 := pool.Get() - require.Equal(t, 1024, len(item2)) + require.Len(t, item2, 1024) pool.Put(item1) pool.Put(item2) item1 = pool.Get() - require.Equal(t, 1024, len(item1)) + require.Len(t, item1, 1024) item2 = pool.Get() - require.Equal(t, 1024, len(item2)) + require.Len(t, item2, 1024) }) t.Run("is not racy", func(t *testing.T) { diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 671df78872..dd35d1fe99 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -41,7 +41,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -1114,7 +1114,7 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { } // If no metric is specified, get the full list for the target. if metric == "" { - for _, md := range t.MetadataList() { + for _, md := range t.ListMetadata() { res = append(res, metricMetadata{ Target: t.Labels(), Metric: md.Metric, @@ -1126,7 +1126,7 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { continue } // Get metadata for the specified metric. - if md, ok := t.Metadata(metric); ok { + if md, ok := t.GetMetadata(metric); ok { res = append(res, metricMetadata{ Target: t.Labels(), Type: md.Type, @@ -1141,11 +1141,11 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { } type metricMetadata struct { - Target labels.Labels `json:"target"` - Metric string `json:"metric,omitempty"` - Type textparse.MetricType `json:"type"` - Help string `json:"help"` - Unit string `json:"unit"` + Target labels.Labels `json:"target"` + Metric string `json:"metric,omitempty"` + Type model.MetricType `json:"type"` + Help string `json:"help"` + Unit string `json:"unit"` } // AlertmanagerDiscovery has all the active Alertmanagers. @@ -1221,14 +1221,8 @@ func rulesAlertsToAPIAlerts(rulesAlerts []*rules.Alert) []*Alert { return apiAlerts } -type metadata struct { - Type textparse.MetricType `json:"type"` - Help string `json:"help"` - Unit string `json:"unit"` -} - func (api *API) metricMetadata(r *http.Request) apiFuncResult { - metrics := map[string]map[metadata]struct{}{} + metrics := map[string]map[metadata.Metadata]struct{}{} limit := -1 if s := r.FormValue("limit"); s != "" { @@ -1249,8 +1243,8 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { for _, tt := range api.targetRetriever(r.Context()).TargetsActive() { for _, t := range tt { if metric == "" { - for _, mm := range t.MetadataList() { - m := metadata{Type: mm.Type, Help: mm.Help, Unit: mm.Unit} + for _, mm := range t.ListMetadata() { + m := metadata.Metadata{Type: mm.Type, Help: mm.Help, Unit: mm.Unit} ms, ok := metrics[mm.Metric] if limitPerMetric > 0 && len(ms) >= limitPerMetric { @@ -1258,7 +1252,7 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { } if !ok { - ms = map[metadata]struct{}{} + ms = map[metadata.Metadata]struct{}{} metrics[mm.Metric] = ms } ms[m] = struct{}{} @@ -1266,8 +1260,8 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { continue } - if md, ok := t.Metadata(metric); ok { - m := metadata{Type: md.Type, Help: md.Help, Unit: md.Unit} + if md, ok := t.GetMetadata(metric); ok { + m := metadata.Metadata{Type: md.Type, Help: md.Help, Unit: md.Unit} ms, ok := metrics[md.Metric] if limitPerMetric > 0 && len(ms) >= limitPerMetric { @@ -1275,7 +1269,7 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { } if !ok { - ms = map[metadata]struct{}{} + ms = map[metadata.Metadata]struct{}{} metrics[md.Metric] = ms } ms[m] = struct{}{} @@ -1284,13 +1278,13 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { } // Put the elements from the pseudo-set into a slice for marshaling. - res := map[string][]metadata{} + res := map[string][]metadata.Metadata{} for name, set := range metrics { if limit >= 0 && len(res) >= limit { break } - s := []metadata{} + s := []metadata.Metadata{} for metadata := range set { s = append(s, metadata) } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index a5dd8640b3..c9ab84087e 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -44,7 +44,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -872,7 +872,7 @@ func TestStats(t *testing.T) { name: "stats is blank", param: "", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &QueryData{}) + require.IsType(t, &QueryData{}, i) qd := i.(*QueryData) require.Nil(t, qd.Stats) }, @@ -881,7 +881,7 @@ func TestStats(t *testing.T) { name: "stats is true", param: "true", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &QueryData{}) + require.IsType(t, &QueryData{}, i) qd := i.(*QueryData) require.NotNil(t, qd.Stats) qs := qd.Stats.Builtin() @@ -896,7 +896,7 @@ func TestStats(t *testing.T) { name: "stats is all", param: "all", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &QueryData{}) + require.IsType(t, &QueryData{}, i) qd := i.(*QueryData) require.NotNil(t, qd.Stats) qs := qd.Stats.Builtin() @@ -917,12 +917,12 @@ func TestStats(t *testing.T) { }, param: "known", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &QueryData{}) + require.IsType(t, &QueryData{}, i) qd := i.(*QueryData) require.NotNil(t, qd.Stats) j, err := json.Marshal(qd.Stats) require.NoError(t, err) - require.JSONEq(t, string(j), `{"custom":"Custom Value"}`) + require.JSONEq(t, `{"custom":"Custom Value"}`, string(j)) }, }, } { @@ -1584,7 +1584,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Number of OS threads created.", Unit: "", }, @@ -1597,7 +1597,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "job": "test", }), Help: "Number of OS threads created.", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Unit: "", }, }, @@ -1614,7 +1614,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "prometheus_tsdb_storage_blocks_bytes", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "The number of bytes that are currently used for local storage by all blocks.", Unit: "", }, @@ -1628,7 +1628,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }), Metric: "prometheus_tsdb_storage_blocks_bytes", Help: "The number of bytes that are currently used for local storage by all blocks.", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Unit: "", }, }, @@ -1642,7 +1642,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Number of OS threads created.", Unit: "", }, @@ -1653,7 +1653,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "prometheus_tsdb_storage_blocks_bytes", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "The number of bytes that are currently used for local storage by all blocks.", Unit: "", }, @@ -1667,7 +1667,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }), Metric: "go_threads", Help: "Number of OS threads created.", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Unit: "", }, { @@ -1676,7 +1676,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }), Metric: "prometheus_tsdb_storage_blocks_bytes", Help: "The number of bytes that are currently used for local storage by all blocks.", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Unit: "", }, }, @@ -1719,22 +1719,22 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "prometheus_engine_query_duration_seconds", - Type: textparse.MetricTypeSummary, + Type: model.MetricTypeSummary, Help: "Query timings", Unit: "", }, { Metric: "go_info", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Information about the Go environment.", Unit: "", }, }, }, }, - response: map[string][]metadata{ - "prometheus_engine_query_duration_seconds": {{textparse.MetricTypeSummary, "Query timings", ""}}, - "go_info": {{textparse.MetricTypeGauge, "Information about the Go environment.", ""}}, + response: map[string][]metadata.Metadata{ + "prometheus_engine_query_duration_seconds": {{Type: model.MetricTypeSummary, Help: "Query timings", Unit: ""}}, + "go_info": {{Type: model.MetricTypeGauge, Help: "Information about the Go environment.", Unit: ""}}, }, }, // With duplicate metadata for a metric that comes from different targets. @@ -1746,7 +1746,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Number of OS threads created", Unit: "", }, @@ -1757,15 +1757,15 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Number of OS threads created", Unit: "", }, }, }, }, - response: map[string][]metadata{ - "go_threads": {{textparse.MetricTypeGauge, "Number of OS threads created", ""}}, + response: map[string][]metadata.Metadata{ + "go_threads": {{Type: model.MetricTypeGauge, Help: "Number of OS threads created"}}, }, }, // With non-duplicate metadata for the same metric from different targets. @@ -1777,7 +1777,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Number of OS threads created", Unit: "", }, @@ -1788,21 +1788,21 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Number of OS threads that were created.", Unit: "", }, }, }, }, - response: map[string][]metadata{ + response: map[string][]metadata.Metadata{ "go_threads": { - {textparse.MetricTypeGauge, "Number of OS threads created", ""}, - {textparse.MetricTypeGauge, "Number of OS threads that were created.", ""}, + {Type: model.MetricTypeGauge, Help: "Number of OS threads created"}, + {Type: model.MetricTypeGauge, Help: "Number of OS threads that were created."}, }, }, sorter: func(m interface{}) { - v := m.(map[string][]metadata)["go_threads"] + v := m.(map[string][]metadata.Metadata)["go_threads"] sort.Slice(v, func(i, j int) bool { return v[i].Help < v[j].Help @@ -1821,13 +1821,13 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Number of OS threads created", Unit: "", }, { Metric: "prometheus_engine_query_duration_seconds", - Type: textparse.MetricTypeSummary, + Type: model.MetricTypeSummary, Help: "Query Timmings.", Unit: "", }, @@ -1838,7 +1838,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_gc_duration_seconds", - Type: textparse.MetricTypeSummary, + Type: model.MetricTypeSummary, Help: "A summary of the GC invocation durations.", Unit: "", }, @@ -1857,31 +1857,31 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Number of OS threads created", Unit: "", }, { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Repeated metadata", Unit: "", }, { Metric: "go_gc_duration_seconds", - Type: textparse.MetricTypeSummary, + Type: model.MetricTypeSummary, Help: "A summary of the GC invocation durations.", Unit: "", }, }, }, }, - response: map[string][]metadata{ + response: map[string][]metadata.Metadata{ "go_threads": { - {textparse.MetricTypeGauge, "Number of OS threads created", ""}, + {Type: model.MetricTypeGauge, Help: "Number of OS threads created"}, }, "go_gc_duration_seconds": { - {textparse.MetricTypeSummary, "A summary of the GC invocation durations.", ""}, + {Type: model.MetricTypeSummary, Help: "A summary of the GC invocation durations."}, }, }, }, @@ -1895,19 +1895,19 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Number of OS threads created", Unit: "", }, { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Repeated metadata", Unit: "", }, { Metric: "go_gc_duration_seconds", - Type: textparse.MetricTypeSummary, + Type: model.MetricTypeSummary, Help: "A summary of the GC invocation durations.", Unit: "", }, @@ -1928,19 +1928,19 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Number of OS threads created", Unit: "", }, { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Repeated metadata", Unit: "", }, { Metric: "go_gc_duration_seconds", - Type: textparse.MetricTypeSummary, + Type: model.MetricTypeSummary, Help: "A summary of the GC invocation durations.", Unit: "", }, @@ -1951,13 +1951,13 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Number of OS threads created, but from a different target", Unit: "", }, { Metric: "go_gc_duration_seconds", - Type: textparse.MetricTypeSummary, + Type: model.MetricTypeSummary, Help: "A summary of the GC invocation durations, but from a different target.", Unit: "", }, @@ -1977,7 +1977,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Number of OS threads created", Unit: "", }, @@ -1988,27 +1988,27 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_gc_duration_seconds", - Type: textparse.MetricTypeSummary, + Type: model.MetricTypeSummary, Help: "A summary of the GC invocation durations.", Unit: "", }, { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Number of OS threads that were created.", Unit: "", }, }, }, }, - response: map[string][]metadata{ + response: map[string][]metadata.Metadata{ "go_threads": { - {textparse.MetricTypeGauge, "Number of OS threads created", ""}, - {textparse.MetricTypeGauge, "Number of OS threads that were created.", ""}, + {Type: model.MetricTypeGauge, Help: "Number of OS threads created"}, + {Type: model.MetricTypeGauge, Help: "Number of OS threads that were created."}, }, }, sorter: func(m interface{}) { - v := m.(map[string][]metadata)["go_threads"] + v := m.(map[string][]metadata.Metadata)["go_threads"] sort.Slice(v, func(i, j int) bool { return v[i].Help < v[j].Help @@ -2025,19 +2025,19 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E metadata: []scrape.MetricMetadata{ { Metric: "go_threads", - Type: textparse.MetricTypeGauge, + Type: model.MetricTypeGauge, Help: "Number of OS threads created", Unit: "", }, }, }, }, - response: map[string][]metadata{}, + response: map[string][]metadata.Metadata{}, }, // With no available metadata. { endpoint: api.metricMetadata, - response: map[string][]metadata{}, + response: map[string][]metadata.Metadata{}, }, { endpoint: api.serveConfig, @@ -2931,7 +2931,7 @@ func assertAPIResponseMetadataLen(t *testing.T, got interface{}, expLen int) { t.Helper() var gotLen int - response := got.(map[string][]metadata) + response := got.(map[string][]metadata.Metadata) for _, m := range response { gotLen += len(m) } diff --git a/web/federate.go b/web/federate.go index 2b79d00532..2e7bac21d9 100644 --- a/web/federate.go +++ b/web/federate.go @@ -138,7 +138,7 @@ Loop: case chunkenc.ValFloat: f = sample.F() case chunkenc.ValHistogram: - fh = sample.H().ToFloat() + fh = sample.H().ToFloat(nil) case chunkenc.ValFloatHistogram: fh = sample.FH() default: diff --git a/web/federate_test.go b/web/federate_test.go index 80539861d9..94783a7399 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -354,7 +354,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { _, err = app.AppendHistogram(0, l, 100*60*1000, histWithoutZeroBucket.Copy(), nil) expVec = append(expVec, promql.Sample{ T: 100 * 60 * 1000, - H: histWithoutZeroBucket.ToFloat(), + H: histWithoutZeroBucket.ToFloat(nil), Metric: expL, }) default: @@ -363,7 +363,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { _, err = app.AppendHistogram(0, l, 100*60*1000, hist.Copy(), nil) expVec = append(expVec, promql.Sample{ T: 100 * 60 * 1000, - H: hist.ToFloat(), + H: hist.ToFloat(nil), Metric: expL, }) } diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 408891028c..20777a38a3 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.48.0-rc.2", + "version": "0.48.0", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,20 +29,20 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.48.0-rc.2", + "@prometheus-io/lezer-promql": "0.48.0", "lru-cache": "^7.18.3" }, "devDependencies": { - "@codemirror/autocomplete": "^6.7.1", - "@codemirror/language": "^6.7.0", - "@codemirror/lint": "^6.2.2", - "@codemirror/state": "^6.2.1", - "@codemirror/view": "^6.13.0", - "@lezer/common": "^1.0.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6", + "@codemirror/autocomplete": "^6.11.1", + "@codemirror/language": "^6.9.3", + "@codemirror/lint": "^6.4.2", + "@codemirror/state": "^6.3.3", + "@codemirror/view": "^6.22.1", + "@lezer/common": "^1.1.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14", "isomorphic-fetch": "^3.0.0", - "nock": "^13.3.1" + "nock": "^13.4.0" }, "peerDependencies": { "@codemirror/autocomplete": "^6.4.0", diff --git a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts index 5df81fe103..963fc95f2f 100644 --- a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts +++ b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts @@ -317,6 +317,12 @@ export const functionIdentifierTerms = [ info: 'Calculate base-2 logarithm of input series', type: 'function', }, + { + label: 'mad_over_time', + detail: 'function', + info: 'Return the median absolute deviation over time for input series', + type: 'function', + }, { label: 'max_over_time', detail: 'function', @@ -427,6 +433,18 @@ export const functionIdentifierTerms = [ info: 'Sort input series descendingly by value', type: 'function', }, + { + label: 'sort_by_label', + detail: 'function', + info: 'Sort input series ascendingly by label value', + type: 'function', + }, + { + label: 'sort_by_label_desc', + detail: 'function', + info: 'Sort input series descendingly by value value', + type: 'function', + }, { label: 'sqrt', detail: 'function', diff --git a/web/ui/module/codemirror-promql/src/parser/parser.test.ts b/web/ui/module/codemirror-promql/src/parser/parser.test.ts index 5ef9c1f907..78195a5c64 100644 --- a/web/ui/module/codemirror-promql/src/parser/parser.test.ts +++ b/web/ui/module/codemirror-promql/src/parser/parser.test.ts @@ -95,6 +95,11 @@ describe('promql operations', () => { expectedValueType: ValueType.vector, expectedDiag: [] as Diagnostic[], }, + { + expr: 'mad_over_time(rate(metric_name[5m])[1h:] offset 1m)', + expectedValueType: ValueType.vector, + expectedDiag: [] as Diagnostic[], + }, { expr: 'max_over_time(rate(metric_name[5m])[1h:] offset 1m)', expectedValueType: ValueType.vector, diff --git a/web/ui/module/codemirror-promql/src/types/function.ts b/web/ui/module/codemirror-promql/src/types/function.ts index 649cbad337..3694781586 100644 --- a/web/ui/module/codemirror-promql/src/types/function.ts +++ b/web/ui/module/codemirror-promql/src/types/function.ts @@ -56,6 +56,7 @@ import { Ln, Log10, Log2, + MadOverTime, MaxOverTime, MinOverTime, Minute, @@ -74,6 +75,8 @@ import { Sinh, Sort, SortDesc, + SortByLabel, + SortByLabelDesc, Sqrt, StddevOverTime, StdvarOverTime, @@ -368,6 +371,12 @@ const promqlFunctions: { [key: number]: PromQLFunction } = { variadic: 0, returnType: ValueType.vector, }, + [MadOverTime]: { + name: 'mad_over_time', + argTypes: [ValueType.matrix], + variadic: 0, + returnType: ValueType.vector, + }, [MaxOverTime]: { name: 'max_over_time', argTypes: [ValueType.matrix], @@ -476,6 +485,18 @@ const promqlFunctions: { [key: number]: PromQLFunction } = { variadic: 0, returnType: ValueType.vector, }, + [SortByLabel]: { + name: 'sort_by_label', + argTypes: [ValueType.vector, ValueType.string], + variadic: -1, + returnType: ValueType.vector, + }, + [SortByLabelDesc]: { + name: 'sort_by_label_desc', + argTypes: [ValueType.vector, ValueType.string], + variadic: -1, + returnType: ValueType.vector, + }, [Sqrt]: { name: 'sqrt', argTypes: [ValueType.vector], diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 9f4a19d007..0792682b1f 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.48.0-rc.2", + "version": "0.48.0", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", @@ -30,9 +30,9 @@ "test": "NODE_OPTIONS=--experimental-vm-modules jest" }, "devDependencies": { - "@lezer/generator": "^1.2.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6" + "@lezer/generator": "^1.5.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14" }, "peerDependencies": { "@lezer/highlight": "^1.1.2", diff --git a/web/ui/module/lezer-promql/src/highlight.js b/web/ui/module/lezer-promql/src/highlight.js index 809f710beb..1bf342d28f 100644 --- a/web/ui/module/lezer-promql/src/highlight.js +++ b/web/ui/module/lezer-promql/src/highlight.js @@ -20,7 +20,7 @@ export const promQLHighLight = styleTags({ NumberLiteral: tags.number, Duration: tags.number, Identifier: tags.variableName, - 'Abs Absent AbsentOverTime Acos Acosh Asin Asinh Atan Atanh AvgOverTime Ceil Changes Clamp ClampMax ClampMin Cos Cosh CountOverTime DaysInMonth DayOfMonth DayOfWeek DayOfYear Deg Delta Deriv Exp Floor HistogramCount HistogramFraction HistogramQuantile HistogramSum HoltWinters Hour Idelta Increase Irate LabelReplace LabelJoin LastOverTime Ln Log10 Log2 MaxOverTime MinOverTime Minute Month Pi PredictLinear PresentOverTime QuantileOverTime Rad Rate Resets Round Scalar Sgn Sin Sinh Sort SortDesc Sqrt StddevOverTime StdvarOverTime SumOverTime Tan Tanh Time Timestamp Vector Year': + 'Abs Absent AbsentOverTime Acos Acosh Asin Asinh Atan Atanh AvgOverTime Ceil Changes Clamp ClampMax ClampMin Cos Cosh CountOverTime DaysInMonth DayOfMonth DayOfWeek DayOfYear Deg Delta Deriv Exp Floor HistogramCount HistogramFraction HistogramQuantile HistogramSum HoltWinters Hour Idelta Increase Irate LabelReplace LabelJoin LastOverTime Ln Log10 Log2 MaxOverTime MinOverTime Minute Month Pi PredictLinear PresentOverTime QuantileOverTime Rad Rate Resets Round Scalar Sgn Sin Sinh Sort SortDesc SortByLabel SortByLabelDesc Sqrt StddevOverTime StdvarOverTime SumOverTime Tan Tanh Time Timestamp Vector Year': tags.function(tags.variableName), 'Avg Bottomk Count Count_values Group Max Min Quantile Stddev Stdvar Sum Topk': tags.operatorKeyword, 'By Without Bool On Ignoring GroupLeft GroupRight Offset Start End': tags.modifier, diff --git a/web/ui/module/lezer-promql/src/promql.grammar b/web/ui/module/lezer-promql/src/promql.grammar index 37f9a39cd8..ab627c8296 100644 --- a/web/ui/module/lezer-promql/src/promql.grammar +++ b/web/ui/module/lezer-promql/src/promql.grammar @@ -149,6 +149,7 @@ FunctionIdentifier { Ln | Log10 | Log2 | + MadOverTime | MaxOverTime | MinOverTime | Minute | @@ -167,6 +168,8 @@ FunctionIdentifier { Sinh | Sort | SortDesc | + SortByLabel | + SortByLabelDesc | Sqrt | StddevOverTime | StdvarOverTime | @@ -378,6 +381,7 @@ NumberLiteral { Ln { condFn<"ln"> } Log10 { condFn<"log10"> } Log2 { condFn<"log2"> } + MadOverTime { condFn<"mad_over_time"> } MaxOverTime { condFn<"max_over_time"> } MinOverTime { condFn<"min_over_time"> } Minute { condFn<"minute"> } @@ -396,6 +400,8 @@ NumberLiteral { Sinh { condFn<"sinh"> } Sort { condFn<"sort"> } SortDesc { condFn<"sort_desc"> } + SortByLabel { condFn<"sort_by_label"> } + SortByLabelDesc { condFn<"sort_by_label_desc"> } Sqrt { condFn<"sqrt"> } StddevOverTime { condFn<"stddev_over_time"> } StdvarOverTime { condFn<"stdvar_over_time"> } diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index cfa5dec00a..51dd607499 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,27 +1,27 @@ { "name": "prometheus-io", - "version": "0.48.0-rc.2", + "version": "0.48.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.48.0-rc.2", + "version": "0.48.0", "workspaces": [ "react-app", "module/*" ], "devDependencies": { - "@types/jest": "^29.5.2", - "@types/node": "^17.0.45", - "eslint-config-prettier": "^8.8.0", + "@types/jest": "^29.5.11", + "@types/node": "^20.10.4", + "eslint-config-prettier": "^8.10.0", "eslint-config-react-app": "^7.0.1", "eslint-plugin-prettier": "^4.2.1", - "jest-canvas-mock": "^2.5.1", + "jest-canvas-mock": "^2.5.2", "jest-fetch-mock": "^3.0.3", "prettier": "^2.8.8", "react-scripts": "^5.0.1", - "ts-jest": "^29.1.0", + "ts-jest": "^29.1.1", "typescript": "^4.9.5" }, "engines": { @@ -30,23 +30,23 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.48.0-rc.2", + "version": "0.48.0", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.48.0-rc.2", + "@prometheus-io/lezer-promql": "0.48.0", "lru-cache": "^7.18.3" }, "devDependencies": { - "@codemirror/autocomplete": "^6.7.1", - "@codemirror/language": "^6.7.0", - "@codemirror/lint": "^6.2.2", - "@codemirror/state": "^6.2.1", - "@codemirror/view": "^6.13.0", - "@lezer/common": "^1.0.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6", + "@codemirror/autocomplete": "^6.11.1", + "@codemirror/language": "^6.9.3", + "@codemirror/lint": "^6.4.2", + "@codemirror/state": "^6.3.3", + "@codemirror/view": "^6.22.1", + "@lezer/common": "^1.1.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14", "isomorphic-fetch": "^3.0.0", - "nock": "^13.3.1" + "nock": "^13.4.0" }, "engines": { "node": ">=12.0.0" @@ -70,12 +70,12 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.48.0-rc.2", + "version": "0.48.0", "license": "Apache-2.0", "devDependencies": { - "@lezer/generator": "^1.2.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6" + "@lezer/generator": "^1.5.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14" }, "peerDependencies": { "@lezer/highlight": "^1.1.2", @@ -96,17 +96,89 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz", - "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "dev": true, "dependencies": { - "@babel/highlight": "^7.18.6" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" }, "engines": { "node": ">=6.9.0" } }, + "node_modules/@babel/code-frame/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/code-frame/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/code-frame/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/@babel/compat-data": { "version": "7.19.3", "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.19.3.tgz", @@ -174,13 +246,14 @@ } }, "node_modules/@babel/generator": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.19.3.tgz", - "integrity": "sha512-fqVZnmp1ncvZU757UzDheKZpfPgatqY59XtW2/j/18H7u76akb8xqvjw82f+i2UKd/ksYsSick/BCLQUUtJ/qQ==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.3.tgz", + "integrity": "sha512-keeZWAV4LU3tW0qRi19HRpabC/ilM0HRBBzf9/k8FFiG4KVpiv0FIy4hHfLfFQZNhziCTPTmd59zoyv6DNISzg==", "dev": true, "dependencies": { - "@babel/types": "^7.19.3", + "@babel/types": "^7.23.3", "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" }, "engines": { @@ -299,9 +372,9 @@ } }, "node_modules/@babel/helper-environment-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", - "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", "dev": true, "engines": { "node": ">=6.9.0" @@ -320,25 +393,25 @@ } }, "node_modules/@babel/helper-function-name": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz", - "integrity": "sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", "dev": true, "dependencies": { - "@babel/template": "^7.18.10", - "@babel/types": "^7.19.0" + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-hoist-variables": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", - "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", "dev": true, "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" @@ -467,30 +540,30 @@ } }, "node_modules/@babel/helper-split-export-declaration": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", - "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "dev": true, "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.18.10.tgz", - "integrity": "sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.19.1", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", - "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", "dev": true, "engines": { "node": ">=6.9.0" @@ -535,13 +608,13 @@ } }, "node_modules/@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", "dev": true, "dependencies": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, "engines": { @@ -620,9 +693,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.19.3.tgz", - "integrity": "sha512-pJ9xOlNWHiy9+FuFP09DEAFbAn4JskgRsVcc169w2xRBC3FRGuQEwjeIMMND9L2zc0iEhO/tGv4Zq+km+hxNpQ==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.3.tgz", + "integrity": "sha512-uVsWNvlVsIninV2prNz/3lHCb+5CJ+e+IUBfbjToAHODtfGYLfCFuY4AU7TskI+dAKk+njsPiBjq1gKTvZOBaw==", "dev": true, "bin": { "parser": "bin/babel-parser.js" @@ -2025,33 +2098,33 @@ } }, "node_modules/@babel/template": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz", - "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.18.10", - "@babel/types": "^7.18.10" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.19.3.tgz", - "integrity": "sha512-qh5yf6149zhq2sgIXmwjnsvmnNQC2iw70UFjp4olxucKrWd/dvlUsBI88VSLUsnMNF7/vnOiA+nk1+yLoCqROQ==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.3.tgz", + "integrity": "sha512-+K0yF1/9yR0oHdE0StHuEj3uTPzwwbrLGfNOndVJVV2TqA5+j3oljJUb4nmB954FLGjNem976+B+eDuLIjesiQ==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.19.3", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.19.3", - "@babel/types": "^7.19.3", + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.3", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.3", + "@babel/types": "^7.23.3", "debug": "^4.1.0", "globals": "^11.1.0" }, @@ -2069,13 +2142,13 @@ } }, "node_modules/@babel/types": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.19.3.tgz", - "integrity": "sha512-hGCaQzIY22DJlDh9CH7NOxgKkFjBk0Cw9xDO1Xmh2151ti7wiGfQ3LauXzL4HP1fmFlTX6XjpRETTpUcv7wQLw==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.3.tgz", + "integrity": "sha512-OZnvoH2l8PK5eUvEcUyCt/sXgr/h+UWpVuBbOljwcrAgUl6lpchoQ++PHGyQy1AtYnVA6CEq3y5xeEI10brpXw==", "dev": true, "dependencies": { - "@babel/helper-string-parser": "^7.18.10", - "@babel/helper-validator-identifier": "^7.19.1", + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" }, "engines": { @@ -2089,13 +2162,13 @@ "dev": true }, "node_modules/@codemirror/autocomplete": { - "version": "6.7.1", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.7.1.tgz", - "integrity": "sha512-hSxf9S0uB+GV+gBsjY1FZNo53e1FFdzPceRfCfD1gWOnV6o21GfB5J5Wg9G/4h76XZMPrF0A6OCK/Rz5+V1egg==", + "version": "6.11.1", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.11.1.tgz", + "integrity": "sha512-L5UInv8Ffd6BPw0P3EF7JLYAMeEbclY7+6Q11REt8vhih8RuLreKtPy/xk8wPxs4EQgYqzI7cdgpiYwWlbS/ow==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.6.0", + "@codemirror/view": "^6.17.0", "@lezer/common": "^1.0.0" }, "peerDependencies": { @@ -2106,33 +2179,33 @@ } }, "node_modules/@codemirror/commands": { - "version": "6.2.4", - "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.2.4.tgz", - "integrity": "sha512-42lmDqVH0ttfilLShReLXsDfASKLXzfyC36bzwcqzox9PlHulMcsUOfHXNo2X2aFMVNUoQ7j+d4q5bnfseYoOA==", + "version": "6.3.2", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.3.2.tgz", + "integrity": "sha512-tjoi4MCWDNxgIpoLZ7+tezdS9OEB6pkiDKhfKx9ReJ/XBcs2G2RXIu+/FxXBlWsPTsz6C9q/r4gjzrsxpcnqCQ==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.2.0", "@codemirror/view": "^6.0.0", - "@lezer/common": "^1.0.0" + "@lezer/common": "^1.1.0" } }, "node_modules/@codemirror/language": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.7.0.tgz", - "integrity": "sha512-4SMwe6Fwn57klCUsVN0y4/h/iWT+XIXFEmop2lIHHuWO0ubjCrF3suqSZLyOQlznxkNnNbOOfKe5HQbQGCAmTg==", + "version": "6.9.3", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.9.3.tgz", + "integrity": "sha512-qq48pYzoi6ldYWV/52+Z9Ou6QouVI+8YwvxFbUypI33NbjG2UeRHKENRyhwljTTiOqjQ33FjyZj6EREQ9apAOQ==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", - "@lezer/common": "^1.0.0", + "@lezer/common": "^1.1.0", "@lezer/highlight": "^1.0.0", "@lezer/lr": "^1.0.0", "style-mod": "^4.0.0" } }, "node_modules/@codemirror/lint": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.2.2.tgz", - "integrity": "sha512-kHGuynBHjqinp1Bx25D2hgH8a6Fh1m9rSmZFzBVTqPIXDIcZ6j3VI67DY8USGYpGrjrJys9R52eLxtfERGNozg==", + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.4.2.tgz", + "integrity": "sha512-wzRkluWb1ptPKdzlsrbwwjYCPLgzU6N88YBAmlZi8WFyuiEduSd05MnJYNogzyc8rPK7pj6m95ptUApc8sHKVA==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -2140,9 +2213,9 @@ } }, "node_modules/@codemirror/search": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.0.tgz", - "integrity": "sha512-64/M40YeJPToKvGO6p3fijo2vwUEj4nACEAXElCaYQ50HrXSvRaK+NHEhSh73WFBGdvIdhrV+lL9PdJy2RfCYA==", + "version": "6.5.5", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.5.tgz", + "integrity": "sha512-PIEN3Ke1buPod2EHbJsoQwlbpkz30qGZKcnmH1eihq9+bPQx8gelauUwLYaY4vBOuBAuEhmpDLii4rj/uO0yMA==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -2150,17 +2223,17 @@ } }, "node_modules/@codemirror/state": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.1.tgz", - "integrity": "sha512-RupHSZ8+OjNT38zU9fKH2sv+Dnlr8Eb8sl4NOnnqz95mCFTZUaiRP8Xv5MeeaG0px2b8Bnfe7YGwCV3nsBhbuw==" + "version": "6.3.3", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.3.3.tgz", + "integrity": "sha512-0wufKcTw2dEwEaADajjHf6hBy1sh3M6V0e+q4JKIhLuiMSe5td5HOWpUdvKth1fT1M9VYOboajoBHpkCd7PG7A==" }, "node_modules/@codemirror/view": { - "version": "6.13.0", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.13.0.tgz", - "integrity": "sha512-oXTfJzHJ5Tl7f6T8ZO0HKf981zubxgKohjddLobbntbNZHlOZGMRL+pPZGtclDWFaFJWtGBYRGyNdjQ6Xsx5yA==", + "version": "6.22.1", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.22.1.tgz", + "integrity": "sha512-38BRn1nPqZqiHbmWfI8zri23IbRVbmSpSmh1E/Ysvc+lIGGdBC17K8zlK7ZU6fhfy9x4De9Zyj5JQqScPq5DkA==", "dependencies": { "@codemirror/state": "^6.1.4", - "style-mod": "^4.0.0", + "style-mod": "^4.1.0", "w3c-keyname": "^2.2.4" } }, @@ -2485,33 +2558,33 @@ } }, "node_modules/@fortawesome/fontawesome-common-types": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.4.0.tgz", - "integrity": "sha512-HNii132xfomg5QVZw0HwXXpN22s7VBHQBv9CeOu9tfJnhsWQNd2lmTNi8CSrnw5B+5YOmzu1UoPAyxaXsJ6RgQ==", + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.5.1.tgz", + "integrity": "sha512-GkWzv+L6d2bI5f/Vk6ikJ9xtl7dfXtoRu3YGE6nq0p/FFqA1ebMOAWg3XgRyb0I6LYyYkiAo+3/KrwuBp8xG7A==", "hasInstallScript": true, "engines": { "node": ">=6" } }, "node_modules/@fortawesome/fontawesome-svg-core": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.4.0.tgz", - "integrity": "sha512-Bertv8xOiVELz5raB2FlXDPKt+m94MQ3JgDfsVbrqNpLU9+UE2E18GKjLKw+d3XbeYPqg1pzyQKGsrzbw+pPaw==", + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.5.1.tgz", + "integrity": "sha512-MfRCYlQPXoLlpem+egxjfkEuP9UQswTrlCOsknus/NcMoblTH2g0jPrapbcIb04KGA7E2GZxbAccGZfWoYgsrQ==", "hasInstallScript": true, "dependencies": { - "@fortawesome/fontawesome-common-types": "6.4.0" + "@fortawesome/fontawesome-common-types": "6.5.1" }, "engines": { "node": ">=6" } }, "node_modules/@fortawesome/free-solid-svg-icons": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.4.0.tgz", - "integrity": "sha512-kutPeRGWm8V5dltFP1zGjQOEAzaLZj4StdQhWVZnfGFCvAPVvHh8qk5bRrU4KXnRRRNni5tKQI9PBAdI6MP8nQ==", + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.5.1.tgz", + "integrity": "sha512-S1PPfU3mIJa59biTtXJz1oI0+KAXW6bkAb31XKhxdxtuXDiUIFsih4JR1v5BbxY7hVHsD1RKq+jRkVRaf773NQ==", "hasInstallScript": true, "dependencies": { - "@fortawesome/fontawesome-common-types": "6.4.0" + "@fortawesome/fontawesome-common-types": "6.5.1" }, "engines": { "node": ">=6" @@ -3510,35 +3583,35 @@ "dev": true }, "node_modules/@lezer/common": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.3.tgz", - "integrity": "sha512-JH4wAXCgUOcCGNekQPLhVeUtIqjH0yPBs7vvUdSjyQama9618IOKFJwkv2kcqdhF0my8hQEgCTEJU0GIgnahvA==" + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.1.1.tgz", + "integrity": "sha512-aAPB9YbvZHqAW+bIwiuuTDGB4DG0sYNRObGLxud8cW7osw1ZQxfDuTZ8KQiqfZ0QJGcR34CvpTMDXEyo/+Htgg==" }, "node_modules/@lezer/generator": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.2.3.tgz", - "integrity": "sha512-xRmNryYbJpWs7novjWtQLCGHOj71B4X1QHQ4SgJqwm11tl6COEVAGhuFTXKX16JMJUhumdXaX8We6hEMd4clDg==", + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.5.1.tgz", + "integrity": "sha512-vodJv2JPwsFsiBBHE463yBhvUI9TmhIu5duF/8MH304xNS6FyWH/vTyG61pjhERm5f+VBP94co0eiN+afWcvXw==", "dev": true, "dependencies": { "@lezer/common": "^1.0.2", "@lezer/lr": "^1.3.0" }, "bin": { - "lezer-generator": "dist/lezer-generator.cjs" + "lezer-generator": "src/lezer-generator.cjs" } }, "node_modules/@lezer/highlight": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.6.tgz", - "integrity": "sha512-cmSJYa2us+r3SePpRCjN5ymCqCPv+zyXmDl0ciWtVaNiORT/MxM7ZgOMQZADD0o51qOaOg24qc/zBViOIwAjJg==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.0.tgz", + "integrity": "sha512-WrS5Mw51sGrpqjlh3d4/fOwpEV2Hd3YOkp9DBt4k8XZQcoTHZFB7sx030A6OcahF4J1nDQAa3jXlTVVYH50IFA==", "dependencies": { "@lezer/common": "^1.0.0" } }, "node_modules/@lezer/lr": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.6.tgz", - "integrity": "sha512-IDhcWjfxwWACnatUi0GzWBCbochfqxo3LZZlS27LbJh8RVYYXXyR5Ck9659IhkWkhSW/kZlaaiJpUO+YZTUK+Q==", + "version": "1.3.14", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.14.tgz", + "integrity": "sha512-z5mY4LStlA3yL7aHT/rqgG614cfcvklS+8oFRFBYrs4YaWLJyKKM4+nN6KopToX0o9Hj6zmH6M5kinOYuy06ug==", "dependencies": { "@lezer/common": "^1.0.0" } @@ -4209,9 +4282,9 @@ } }, "node_modules/@types/enzyme": { - "version": "3.10.13", - "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.13.tgz", - "integrity": "sha512-FCtoUhmFsud0Yx9fmZk179GkdZ4U9B0GFte64/Md+W/agx0L5SxsIIbhLBOxIb9y2UfBA4WQnaG1Od/UsUQs9Q==", + "version": "3.10.18", + "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.18.tgz", + "integrity": "sha512-RaO/TyyHZvXkpzinbMTZmd/S5biU4zxkvDsn22ujC29t9FMSzq8tnn8f2MxQ2P8GVhFRG5jTAL05DXKyTtpEQQ==", "dev": true, "dependencies": { "@types/cheerio": "*", @@ -4279,9 +4352,9 @@ } }, "node_modules/@types/flot": { - "version": "0.0.32", - "resolved": "https://registry.npmjs.org/@types/flot/-/flot-0.0.32.tgz", - "integrity": "sha512-aturel4TWMY86N4Pkpc9pSoUd/p8c3BjGj4fTDkaZIpkRPzLH1VXZCAKGUywcFkTqgZMhPJFPWxd4pl87y8h/w==", + "version": "0.0.36", + "resolved": "https://registry.npmjs.org/@types/flot/-/flot-0.0.36.tgz", + "integrity": "sha512-xRo4MUIMnRPGXJCuQXAWvo+uKRmziRGHAy9LQHsLgbKanknpe5z3EThqVuYkVCC6ZWPZ/8pllBXnzQmGzFkJ/Q==", "dev": true, "dependencies": { "@types/jquery": "*" @@ -4341,9 +4414,9 @@ } }, "node_modules/@types/jest": { - "version": "29.5.2", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.2.tgz", - "integrity": "sha512-mSoZVJF5YzGVCk+FsDxzDuH7s+SCkzrgKZzf0Z0T2WudhBUPoF6ktoTPC4R0ZoCPCV5xUvuU6ias5NvxcBcMMg==", + "version": "29.5.11", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.11.tgz", + "integrity": "sha512-S2mHmYIVe13vrm6q4kN6fLYYAka15ALQki/vgDC3mIukEOx8WJlv0kQPM+d4w8Gp6u0uSdKND04IlTXBv0rwnQ==", "dev": true, "dependencies": { "expect": "^29.0.0", @@ -4383,9 +4456,9 @@ "dev": true }, "node_modules/@types/jquery": { - "version": "3.5.16", - "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.16.tgz", - "integrity": "sha512-bsI7y4ZgeMkmpG9OM710RRzDFp+w4P1RGiIt30C1mSBT+ExCleeh4HObwgArnDFELmRrOpXgSYN9VF1hj+f1lw==", + "version": "3.5.29", + "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.29.tgz", + "integrity": "sha512-oXQQC9X9MOPRrMhPHHOsXqeQDnWeCDT3PelUIg/Oy8FAbzSZtFHRjc7IpbfFVmpLtJ+UOoywpRsuO5Jxjybyeg==", "dev": true, "dependencies": { "@types/sizzle": "*" @@ -4410,9 +4483,12 @@ "devOptional": true }, "node_modules/@types/node": { - "version": "17.0.45", - "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", - "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==" + "version": "20.10.4", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.10.4.tgz", + "integrity": "sha512-D08YG6rr8X90YB56tSIuBaddy/UXAA9RKJoFvrsnogAum/0pmjkgi4+2nx96A330FmioegBWmEYQ+syqCFaveg==", + "dependencies": { + "undici-types": "~5.26.4" + } }, "node_modules/@types/parse-json": { "version": "4.0.0", @@ -4451,9 +4527,9 @@ "devOptional": true }, "node_modules/@types/react": { - "version": "17.0.53", - "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.53.tgz", - "integrity": "sha512-1yIpQR2zdYu1Z/dc1OxC+MA6GR240u3gcnP4l6mvj/PJiVaqHsQPmWttsvHsfnhfPbU2FuGmo0wSITPygjBmsw==", + "version": "17.0.71", + "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.71.tgz", + "integrity": "sha512-lfqOu9mp16nmaGRrS8deS2Taqhd5Ih0o92Te5Ws6I1py4ytHBcXLqh0YIqVsViqwVI5f+haiFM6hju814BzcmA==", "dev": true, "dependencies": { "@types/prop-types": "*", @@ -4462,18 +4538,18 @@ } }, "node_modules/@types/react-copy-to-clipboard": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/@types/react-copy-to-clipboard/-/react-copy-to-clipboard-5.0.4.tgz", - "integrity": "sha512-otTJsJpofYAeaIeOwV5xBUGpo6exXG2HX7X4nseToCB2VgPEBxGBHCm/FecZ676doNR7HCSTVtmohxfG2b3/yQ==", + "version": "5.0.7", + "resolved": "https://registry.npmjs.org/@types/react-copy-to-clipboard/-/react-copy-to-clipboard-5.0.7.tgz", + "integrity": "sha512-Gft19D+as4M+9Whq1oglhmK49vqPhcLzk8WfvfLvaYMIPYanyfLy0+CwFucMJfdKoSFyySPmkkWn8/E6voQXjQ==", "dev": true, "dependencies": { "@types/react": "*" } }, "node_modules/@types/react-dom": { - "version": "17.0.20", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.20.tgz", - "integrity": "sha512-4pzIjSxDueZZ90F52mU3aPoogkHIoSIDG+oQ+wQK7Cy2B9S+MvOqY0uEA/qawKz381qrEDkvpwyt8Bm31I8sbA==", + "version": "17.0.25", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.25.tgz", + "integrity": "sha512-urx7A7UxkZQmThYA4So0NelOVjx3V4rNFVJwp0WZlbIK5eM4rNJDiN3R/E9ix0MBh6kAEojk/9YL+Te6D9zHNA==", "dev": true, "dependencies": { "@types/react": "^17" @@ -4525,9 +4601,9 @@ "dev": true }, "node_modules/@types/sanitize-html": { - "version": "2.9.0", - "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.9.0.tgz", - "integrity": "sha512-4fP/kEcKNj2u39IzrxWYuf/FnCCwwQCpif6wwY6ROUS1EPRIfWJjGkY3HIowY1EX/VbX5e86yq8AAE7UPMgATg==", + "version": "2.9.5", + "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.9.5.tgz", + "integrity": "sha512-2Sr1vd8Dw+ypsg/oDDfZ57OMSG2Befs+l2CMyCC5bVSK3CpE7lTB2aNlbbWzazgVA+Qqfuholwom6x/mWd1qmw==", "dev": true, "dependencies": { "htmlparser2": "^8.0.0" @@ -4578,9 +4654,9 @@ } }, "node_modules/@types/sinon": { - "version": "10.0.15", - "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.15.tgz", - "integrity": "sha512-3lrFNQG0Kr2LDzvjyjB6AMJk4ge+8iYhQfdnSwIwlG88FUOV43kPcQqDZkDa/h3WSZy6i8Fr0BSjfQtB1B3xuQ==", + "version": "10.0.20", + "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.20.tgz", + "integrity": "sha512-2APKKruFNCAZgx3daAyACGzWuJ028VVCUDk6o2rw/Z4PXT0ogwdV4KUegW0MwVs0Zu59auPXbbuBJHF12Sx1Eg==", "dev": true, "dependencies": { "@types/sinonjs__fake-timers": "*" @@ -7562,9 +7638,9 @@ "dev": true }, "node_modules/downshift": { - "version": "7.6.0", - "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.0.tgz", - "integrity": "sha512-VSoTVynTAsabou/hbZ6HJHUVhtBiVOjQoBsCPcQq5eAROIGP+9XKMp9asAKQ3cEcUP4oe0fFdD2pziUjhFY33Q==", + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.2.tgz", + "integrity": "sha512-iOv+E1Hyt3JDdL9yYcOgW7nZ7GQ2Uz6YbggwXvKUSleetYhU2nXD482Rz6CzvM4lvI1At34BYruKAL4swRGxaA==", "dependencies": { "@babel/runtime": "^7.14.8", "compute-scroll-into-view": "^2.0.4", @@ -7989,9 +8065,9 @@ } }, "node_modules/eslint-config-prettier": { - "version": "8.8.0", - "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.8.0.tgz", - "integrity": "sha512-wLbQiFre3tdGgpDv67NQKnJuTlcUVYHas3k+DZCc2U2BadthoEY4B7hLPvAxaqdyOGCzuLfii2fqGph10va7oA==", + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.10.0.tgz", + "integrity": "sha512-SM8AMJdeQqRYT9O9zguiruQZaN7+z+E4eAP9oiLNGKMtomwaB1E9dcgUD6ZAn/eQAb52USbvezbiljfZUhbJcg==", "dev": true, "bin": { "eslint-config-prettier": "bin/cli.js" @@ -9244,9 +9320,9 @@ "dev": true }, "node_modules/fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", "hasInstallScript": true, "optional": true, "os": [ @@ -10555,9 +10631,9 @@ } }, "node_modules/jest-canvas-mock": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.5.1.tgz", - "integrity": "sha512-IVnRiz+v4EYn3ydM/pBo8GW/J+nU/Hg5gHBQQOUQhdRyNfvHnabB8ReqARLO0p+kvQghqr4V0tA92CF3JcUSRg==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.5.2.tgz", + "integrity": "sha512-vgnpPupjOL6+L5oJXzxTxFrlGEIbHdZqFU+LFNdtLxZ3lRDCl17FlTMM7IatoRQkrcyOTMlDinjUguqmQ6bR2A==", "dev": true, "dependencies": { "cssfontparser": "^1.2.1", @@ -12841,9 +12917,9 @@ } }, "node_modules/jquery": { - "version": "3.7.0", - "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.0.tgz", - "integrity": "sha512-umpJ0/k8X0MvD1ds0P9SfowREz2LenHsQaxSohMZ5OMNEU2r0tf8pdeEFTHMFxWVxKNyU9rTtK3CWzUCTKJUeQ==" + "version": "3.7.1", + "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.1.tgz", + "integrity": "sha512-m4avr8yL8kmFN8psrbFFFmB/If14iN5o9nw/NgnnM+kybDJpRsAynV2BsfpTYrTRysYUdADVD7CkUUizgkpLfg==" }, "node_modules/jquery.flot.tooltip": { "version": "0.9.0", @@ -13646,14 +13722,13 @@ } }, "node_modules/nock": { - "version": "13.3.1", - "resolved": "https://registry.npmjs.org/nock/-/nock-13.3.1.tgz", - "integrity": "sha512-vHnopocZuI93p2ccivFyGuUfzjq2fxNyNurp7816mlT5V5HF4SzXu8lvLrVzBbNqzs+ODooZ6OksuSUNM7Njkw==", + "version": "13.4.0", + "resolved": "https://registry.npmjs.org/nock/-/nock-13.4.0.tgz", + "integrity": "sha512-W8NVHjO/LCTNA64yxAPHV/K47LpGYcVzgKd3Q0n6owhwvD0Dgoterc25R4rnZbckJEb6Loxz1f5QMuJpJnbSyQ==", "dev": true, "dependencies": { "debug": "^4.1.0", "json-stringify-safe": "^5.0.1", - "lodash": "^4.17.21", "propagate": "^2.0.0" }, "engines": { @@ -18065,9 +18140,9 @@ "dev": true }, "node_modules/sass": { - "version": "1.62.1", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.62.1.tgz", - "integrity": "sha512-NHpxIzN29MXvWiuswfc1W3I0N8SXBd8UR26WntmDlRYf0bSADnwnOjsyMZ3lMezSlArD33Vs3YFhp7dWvL770A==", + "version": "1.69.5", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.69.5.tgz", + "integrity": "sha512-qg2+UCJibLr2LCVOt3OlPhr/dqVHWOa9XtZf2OjbLs/T4VPSJ00udtgJxH3neXZm+QqX8B+3cU7RaLqp1iVfcQ==", "dependencies": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -18796,9 +18871,9 @@ } }, "node_modules/style-mod": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.0.0.tgz", - "integrity": "sha512-OPhtyEjyyN9x3nhPsu76f52yUGXiZcgvsrFVtvTkyGRQJ0XK+GPc6ov1z+lRpbeabka+MYEQxOYRnt5nF30aMw==" + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.0.tgz", + "integrity": "sha512-Ca5ib8HrFn+f+0n4N4ScTIA9iTOQ7MaGS1ylHcoVqW9J7w2w8PzN6g9gKmTYgGEBH8e120+RCmhpje6jC5uGWA==" }, "node_modules/stylehacks": { "version": "5.1.0", @@ -19384,9 +19459,9 @@ "dev": true }, "node_modules/ts-jest": { - "version": "29.1.0", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.0.tgz", - "integrity": "sha512-ZhNr7Z4PcYa+JjMl62ir+zPiNJfXJN6E8hSLnaUKhOgqcn8vb3e537cpkd0FuAfRK3sR1LSqM1MOhliXNgOFPA==", + "version": "29.1.1", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz", + "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==", "dev": true, "dependencies": { "bs-logger": "0.x", @@ -19395,7 +19470,7 @@ "json5": "^2.2.3", "lodash.memoize": "4.x", "make-error": "1.x", - "semver": "7.x", + "semver": "^7.5.3", "yargs-parser": "^21.0.1" }, "bin": { @@ -19605,6 +19680,11 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, "node_modules/unicode-canonical-property-names-ecmascript": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", @@ -20764,30 +20844,30 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.48.0-rc.2", + "version": "0.48.0", "dependencies": { - "@codemirror/autocomplete": "^6.7.1", - "@codemirror/commands": "^6.2.4", - "@codemirror/language": "^6.7.0", - "@codemirror/lint": "^6.2.2", - "@codemirror/search": "^6.5.0", - "@codemirror/state": "^6.2.1", - "@codemirror/view": "^6.13.0", + "@codemirror/autocomplete": "^6.11.1", + "@codemirror/commands": "^6.3.2", + "@codemirror/language": "^6.9.3", + "@codemirror/lint": "^6.4.2", + "@codemirror/search": "^6.5.5", + "@codemirror/state": "^6.3.3", + "@codemirror/view": "^6.22.1", "@forevolve/bootstrap-dark": "^2.1.1", - "@fortawesome/fontawesome-svg-core": "6.4.0", - "@fortawesome/free-solid-svg-icons": "6.4.0", + "@fortawesome/fontawesome-svg-core": "6.5.1", + "@fortawesome/free-solid-svg-icons": "6.5.1", "@fortawesome/react-fontawesome": "0.2.0", - "@lezer/common": "^1.0.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6", + "@lezer/common": "^1.1.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.48.0-rc.2", + "@prometheus-io/codemirror-promql": "0.48.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", - "downshift": "^7.6.0", + "downshift": "^7.6.2", "http-proxy-middleware": "^2.0.6", - "jquery": "^3.7.0", + "jquery": "^3.7.1", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.29.4", "moment-timezone": "^0.5.43", @@ -20801,21 +20881,21 @@ "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", "sanitize-html": "^2.11.0", - "sass": "1.62.1", + "sass": "1.69.5", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" }, "devDependencies": { "@testing-library/react-hooks": "^7.0.2", - "@types/enzyme": "^3.10.13", - "@types/flot": "0.0.32", - "@types/jquery": "^3.5.16", - "@types/react": "^17.0.60", - "@types/react-copy-to-clipboard": "^5.0.4", - "@types/react-dom": "^17.0.20", + "@types/enzyme": "^3.10.18", + "@types/flot": "0.0.36", + "@types/jquery": "^3.5.29", + "@types/react": "^17.0.71", + "@types/react-copy-to-clipboard": "^5.0.7", + "@types/react-dom": "^17.0.25", "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.9.0", - "@types/sinon": "^10.0.15", + "@types/sanitize-html": "^2.9.5", + "@types/sinon": "^10.0.20", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "enzyme": "^3.11.0", "enzyme-to-json": "^3.6.2", @@ -20823,18 +20903,7 @@ "sinon": "^14.0.2" }, "optionalDependencies": { - "fsevents": "^2.3.2" - } - }, - "react-app/node_modules/@types/react": { - "version": "17.0.60", - "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.60.tgz", - "integrity": "sha512-pCH7bqWIfzHs3D+PDs3O/COCQJka+Kcw3RnO9rFA2zalqoXg7cNjJDh6mZ7oRtY1wmY4LVwDdAbA1F7Z8tv3BQ==", - "dev": true, - "dependencies": { - "@types/prop-types": "*", - "@types/scheduler": "*", - "csstype": "^3.0.2" + "fsevents": "^2.3.3" } } }, @@ -20850,12 +20919,71 @@ } }, "@babel/code-frame": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz", - "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "dev": true, "requires": { - "@babel/highlight": "^7.18.6" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true + }, + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } } }, "@babel/compat-data": { @@ -20907,13 +21035,14 @@ } }, "@babel/generator": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.19.3.tgz", - "integrity": "sha512-fqVZnmp1ncvZU757UzDheKZpfPgatqY59XtW2/j/18H7u76akb8xqvjw82f+i2UKd/ksYsSick/BCLQUUtJ/qQ==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.3.tgz", + "integrity": "sha512-keeZWAV4LU3tW0qRi19HRpabC/ilM0HRBBzf9/k8FFiG4KVpiv0FIy4hHfLfFQZNhziCTPTmd59zoyv6DNISzg==", "dev": true, "requires": { - "@babel/types": "^7.19.3", + "@babel/types": "^7.23.3", "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" }, "dependencies": { @@ -21001,9 +21130,9 @@ } }, "@babel/helper-environment-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", - "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", "dev": true }, "@babel/helper-explode-assignable-expression": { @@ -21016,22 +21145,22 @@ } }, "@babel/helper-function-name": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz", - "integrity": "sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", "dev": true, "requires": { - "@babel/template": "^7.18.10", - "@babel/types": "^7.19.0" + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" } }, "@babel/helper-hoist-variables": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", - "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", "dev": true, "requires": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" } }, "@babel/helper-member-expression-to-functions": { @@ -21127,24 +21256,24 @@ } }, "@babel/helper-split-export-declaration": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", - "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "dev": true, "requires": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" } }, "@babel/helper-string-parser": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.18.10.tgz", - "integrity": "sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", "dev": true }, "@babel/helper-validator-identifier": { - "version": "7.19.1", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", - "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", "dev": true }, "@babel/helper-validator-option": { @@ -21177,13 +21306,13 @@ } }, "@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", "dev": true, "requires": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, "dependencies": { @@ -21246,9 +21375,9 @@ } }, "@babel/parser": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.19.3.tgz", - "integrity": "sha512-pJ9xOlNWHiy9+FuFP09DEAFbAn4JskgRsVcc169w2xRBC3FRGuQEwjeIMMND9L2zc0iEhO/tGv4Zq+km+hxNpQ==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.3.tgz", + "integrity": "sha512-uVsWNvlVsIninV2prNz/3lHCb+5CJ+e+IUBfbjToAHODtfGYLfCFuY4AU7TskI+dAKk+njsPiBjq1gKTvZOBaw==", "dev": true }, "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { @@ -22182,30 +22311,30 @@ } }, "@babel/template": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz", - "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "dev": true, "requires": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.18.10", - "@babel/types": "^7.18.10" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" } }, "@babel/traverse": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.19.3.tgz", - "integrity": "sha512-qh5yf6149zhq2sgIXmwjnsvmnNQC2iw70UFjp4olxucKrWd/dvlUsBI88VSLUsnMNF7/vnOiA+nk1+yLoCqROQ==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.3.tgz", + "integrity": "sha512-+K0yF1/9yR0oHdE0StHuEj3uTPzwwbrLGfNOndVJVV2TqA5+j3oljJUb4nmB954FLGjNem976+B+eDuLIjesiQ==", "dev": true, "requires": { - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.19.3", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.19.3", - "@babel/types": "^7.19.3", + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.3", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.3", + "@babel/types": "^7.23.3", "debug": "^4.1.0", "globals": "^11.1.0" }, @@ -22219,13 +22348,13 @@ } }, "@babel/types": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.19.3.tgz", - "integrity": "sha512-hGCaQzIY22DJlDh9CH7NOxgKkFjBk0Cw9xDO1Xmh2151ti7wiGfQ3LauXzL4HP1fmFlTX6XjpRETTpUcv7wQLw==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.3.tgz", + "integrity": "sha512-OZnvoH2l8PK5eUvEcUyCt/sXgr/h+UWpVuBbOljwcrAgUl6lpchoQ++PHGyQy1AtYnVA6CEq3y5xeEI10brpXw==", "dev": true, "requires": { - "@babel/helper-string-parser": "^7.18.10", - "@babel/helper-validator-identifier": "^7.19.1", + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" } }, @@ -22236,44 +22365,44 @@ "dev": true }, "@codemirror/autocomplete": { - "version": "6.7.1", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.7.1.tgz", - "integrity": "sha512-hSxf9S0uB+GV+gBsjY1FZNo53e1FFdzPceRfCfD1gWOnV6o21GfB5J5Wg9G/4h76XZMPrF0A6OCK/Rz5+V1egg==", + "version": "6.11.1", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.11.1.tgz", + "integrity": "sha512-L5UInv8Ffd6BPw0P3EF7JLYAMeEbclY7+6Q11REt8vhih8RuLreKtPy/xk8wPxs4EQgYqzI7cdgpiYwWlbS/ow==", "requires": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.6.0", + "@codemirror/view": "^6.17.0", "@lezer/common": "^1.0.0" } }, "@codemirror/commands": { - "version": "6.2.4", - "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.2.4.tgz", - "integrity": "sha512-42lmDqVH0ttfilLShReLXsDfASKLXzfyC36bzwcqzox9PlHulMcsUOfHXNo2X2aFMVNUoQ7j+d4q5bnfseYoOA==", + "version": "6.3.2", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.3.2.tgz", + "integrity": "sha512-tjoi4MCWDNxgIpoLZ7+tezdS9OEB6pkiDKhfKx9ReJ/XBcs2G2RXIu+/FxXBlWsPTsz6C9q/r4gjzrsxpcnqCQ==", "requires": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.2.0", "@codemirror/view": "^6.0.0", - "@lezer/common": "^1.0.0" + "@lezer/common": "^1.1.0" } }, "@codemirror/language": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.7.0.tgz", - "integrity": "sha512-4SMwe6Fwn57klCUsVN0y4/h/iWT+XIXFEmop2lIHHuWO0ubjCrF3suqSZLyOQlznxkNnNbOOfKe5HQbQGCAmTg==", + "version": "6.9.3", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.9.3.tgz", + "integrity": "sha512-qq48pYzoi6ldYWV/52+Z9Ou6QouVI+8YwvxFbUypI33NbjG2UeRHKENRyhwljTTiOqjQ33FjyZj6EREQ9apAOQ==", "requires": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", - "@lezer/common": "^1.0.0", + "@lezer/common": "^1.1.0", "@lezer/highlight": "^1.0.0", "@lezer/lr": "^1.0.0", "style-mod": "^4.0.0" } }, "@codemirror/lint": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.2.2.tgz", - "integrity": "sha512-kHGuynBHjqinp1Bx25D2hgH8a6Fh1m9rSmZFzBVTqPIXDIcZ6j3VI67DY8USGYpGrjrJys9R52eLxtfERGNozg==", + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.4.2.tgz", + "integrity": "sha512-wzRkluWb1ptPKdzlsrbwwjYCPLgzU6N88YBAmlZi8WFyuiEduSd05MnJYNogzyc8rPK7pj6m95ptUApc8sHKVA==", "requires": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -22281,9 +22410,9 @@ } }, "@codemirror/search": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.0.tgz", - "integrity": "sha512-64/M40YeJPToKvGO6p3fijo2vwUEj4nACEAXElCaYQ50HrXSvRaK+NHEhSh73WFBGdvIdhrV+lL9PdJy2RfCYA==", + "version": "6.5.5", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.5.tgz", + "integrity": "sha512-PIEN3Ke1buPod2EHbJsoQwlbpkz30qGZKcnmH1eihq9+bPQx8gelauUwLYaY4vBOuBAuEhmpDLii4rj/uO0yMA==", "requires": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -22291,17 +22420,17 @@ } }, "@codemirror/state": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.1.tgz", - "integrity": "sha512-RupHSZ8+OjNT38zU9fKH2sv+Dnlr8Eb8sl4NOnnqz95mCFTZUaiRP8Xv5MeeaG0px2b8Bnfe7YGwCV3nsBhbuw==" + "version": "6.3.3", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.3.3.tgz", + "integrity": "sha512-0wufKcTw2dEwEaADajjHf6hBy1sh3M6V0e+q4JKIhLuiMSe5td5HOWpUdvKth1fT1M9VYOboajoBHpkCd7PG7A==" }, "@codemirror/view": { - "version": "6.13.0", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.13.0.tgz", - "integrity": "sha512-oXTfJzHJ5Tl7f6T8ZO0HKf981zubxgKohjddLobbntbNZHlOZGMRL+pPZGtclDWFaFJWtGBYRGyNdjQ6Xsx5yA==", + "version": "6.22.1", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.22.1.tgz", + "integrity": "sha512-38BRn1nPqZqiHbmWfI8zri23IbRVbmSpSmh1E/Ysvc+lIGGdBC17K8zlK7ZU6fhfy9x4De9Zyj5JQqScPq5DkA==", "requires": { "@codemirror/state": "^6.1.4", - "style-mod": "^4.0.0", + "style-mod": "^4.1.0", "w3c-keyname": "^2.2.4" } }, @@ -22475,24 +22604,24 @@ } }, "@fortawesome/fontawesome-common-types": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.4.0.tgz", - "integrity": "sha512-HNii132xfomg5QVZw0HwXXpN22s7VBHQBv9CeOu9tfJnhsWQNd2lmTNi8CSrnw5B+5YOmzu1UoPAyxaXsJ6RgQ==" + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.5.1.tgz", + "integrity": "sha512-GkWzv+L6d2bI5f/Vk6ikJ9xtl7dfXtoRu3YGE6nq0p/FFqA1ebMOAWg3XgRyb0I6LYyYkiAo+3/KrwuBp8xG7A==" }, "@fortawesome/fontawesome-svg-core": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.4.0.tgz", - "integrity": "sha512-Bertv8xOiVELz5raB2FlXDPKt+m94MQ3JgDfsVbrqNpLU9+UE2E18GKjLKw+d3XbeYPqg1pzyQKGsrzbw+pPaw==", + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.5.1.tgz", + "integrity": "sha512-MfRCYlQPXoLlpem+egxjfkEuP9UQswTrlCOsknus/NcMoblTH2g0jPrapbcIb04KGA7E2GZxbAccGZfWoYgsrQ==", "requires": { - "@fortawesome/fontawesome-common-types": "6.4.0" + "@fortawesome/fontawesome-common-types": "6.5.1" } }, "@fortawesome/free-solid-svg-icons": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.4.0.tgz", - "integrity": "sha512-kutPeRGWm8V5dltFP1zGjQOEAzaLZj4StdQhWVZnfGFCvAPVvHh8qk5bRrU4KXnRRRNni5tKQI9PBAdI6MP8nQ==", + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.5.1.tgz", + "integrity": "sha512-S1PPfU3mIJa59biTtXJz1oI0+KAXW6bkAb31XKhxdxtuXDiUIFsih4JR1v5BbxY7hVHsD1RKq+jRkVRaf773NQ==", "requires": { - "@fortawesome/fontawesome-common-types": "6.4.0" + "@fortawesome/fontawesome-common-types": "6.5.1" } }, "@fortawesome/react-fontawesome": { @@ -23282,14 +23411,14 @@ "dev": true }, "@lezer/common": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.3.tgz", - "integrity": "sha512-JH4wAXCgUOcCGNekQPLhVeUtIqjH0yPBs7vvUdSjyQama9618IOKFJwkv2kcqdhF0my8hQEgCTEJU0GIgnahvA==" + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.1.1.tgz", + "integrity": "sha512-aAPB9YbvZHqAW+bIwiuuTDGB4DG0sYNRObGLxud8cW7osw1ZQxfDuTZ8KQiqfZ0QJGcR34CvpTMDXEyo/+Htgg==" }, "@lezer/generator": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.2.3.tgz", - "integrity": "sha512-xRmNryYbJpWs7novjWtQLCGHOj71B4X1QHQ4SgJqwm11tl6COEVAGhuFTXKX16JMJUhumdXaX8We6hEMd4clDg==", + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.5.1.tgz", + "integrity": "sha512-vodJv2JPwsFsiBBHE463yBhvUI9TmhIu5duF/8MH304xNS6FyWH/vTyG61pjhERm5f+VBP94co0eiN+afWcvXw==", "dev": true, "requires": { "@lezer/common": "^1.0.2", @@ -23297,17 +23426,17 @@ } }, "@lezer/highlight": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.6.tgz", - "integrity": "sha512-cmSJYa2us+r3SePpRCjN5ymCqCPv+zyXmDl0ciWtVaNiORT/MxM7ZgOMQZADD0o51qOaOg24qc/zBViOIwAjJg==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.0.tgz", + "integrity": "sha512-WrS5Mw51sGrpqjlh3d4/fOwpEV2Hd3YOkp9DBt4k8XZQcoTHZFB7sx030A6OcahF4J1nDQAa3jXlTVVYH50IFA==", "requires": { "@lezer/common": "^1.0.0" } }, "@lezer/lr": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.6.tgz", - "integrity": "sha512-IDhcWjfxwWACnatUi0GzWBCbochfqxo3LZZlS27LbJh8RVYYXXyR5Ck9659IhkWkhSW/kZlaaiJpUO+YZTUK+Q==", + "version": "1.3.14", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.14.tgz", + "integrity": "sha512-z5mY4LStlA3yL7aHT/rqgG614cfcvklS+8oFRFBYrs4YaWLJyKKM4+nN6KopToX0o9Hj6zmH6M5kinOYuy06ug==", "requires": { "@lezer/common": "^1.0.0" } @@ -23406,42 +23535,42 @@ "@prometheus-io/app": { "version": "file:react-app", "requires": { - "@codemirror/autocomplete": "^6.7.1", - "@codemirror/commands": "^6.2.4", - "@codemirror/language": "^6.7.0", - "@codemirror/lint": "^6.2.2", - "@codemirror/search": "^6.5.0", - "@codemirror/state": "^6.2.1", - "@codemirror/view": "^6.13.0", + "@codemirror/autocomplete": "^6.11.1", + "@codemirror/commands": "^6.3.2", + "@codemirror/language": "^6.9.3", + "@codemirror/lint": "^6.4.2", + "@codemirror/search": "^6.5.5", + "@codemirror/state": "^6.3.3", + "@codemirror/view": "^6.22.1", "@forevolve/bootstrap-dark": "^2.1.1", - "@fortawesome/fontawesome-svg-core": "6.4.0", - "@fortawesome/free-solid-svg-icons": "6.4.0", + "@fortawesome/fontawesome-svg-core": "6.5.1", + "@fortawesome/free-solid-svg-icons": "6.5.1", "@fortawesome/react-fontawesome": "0.2.0", - "@lezer/common": "^1.0.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6", + "@lezer/common": "^1.1.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.48.0-rc.2", + "@prometheus-io/codemirror-promql": "0.48.0", "@testing-library/react-hooks": "^7.0.2", - "@types/enzyme": "^3.10.13", - "@types/flot": "0.0.32", - "@types/jquery": "^3.5.16", - "@types/react": "^17.0.60", - "@types/react-copy-to-clipboard": "^5.0.4", - "@types/react-dom": "^17.0.20", + "@types/enzyme": "^3.10.18", + "@types/flot": "0.0.36", + "@types/jquery": "^3.5.29", + "@types/react": "^17.0.71", + "@types/react-copy-to-clipboard": "^5.0.7", + "@types/react-dom": "^17.0.25", "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.9.0", - "@types/sinon": "^10.0.15", + "@types/sanitize-html": "^2.9.5", + "@types/sinon": "^10.0.20", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", - "downshift": "^7.6.0", + "downshift": "^7.6.2", "enzyme": "^3.11.0", "enzyme-to-json": "^3.6.2", - "fsevents": "^2.3.2", + "fsevents": "^2.3.3", "http-proxy-middleware": "^2.0.6", - "jquery": "^3.7.0", + "jquery": "^3.7.1", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.29.4", "moment-timezone": "^0.5.43", @@ -23456,40 +23585,27 @@ "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", "sanitize-html": "^2.11.0", - "sass": "1.62.1", + "sass": "1.69.5", "sinon": "^14.0.2", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" - }, - "dependencies": { - "@types/react": { - "version": "17.0.60", - "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.60.tgz", - "integrity": "sha512-pCH7bqWIfzHs3D+PDs3O/COCQJka+Kcw3RnO9rFA2zalqoXg7cNjJDh6mZ7oRtY1wmY4LVwDdAbA1F7Z8tv3BQ==", - "dev": true, - "requires": { - "@types/prop-types": "*", - "@types/scheduler": "*", - "csstype": "^3.0.2" - } - } } }, "@prometheus-io/codemirror-promql": { "version": "file:module/codemirror-promql", "requires": { - "@codemirror/autocomplete": "^6.7.1", - "@codemirror/language": "^6.7.0", - "@codemirror/lint": "^6.2.2", - "@codemirror/state": "^6.2.1", - "@codemirror/view": "^6.13.0", - "@lezer/common": "^1.0.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6", - "@prometheus-io/lezer-promql": "0.48.0-rc.2", + "@codemirror/autocomplete": "^6.11.1", + "@codemirror/language": "^6.9.3", + "@codemirror/lint": "^6.4.2", + "@codemirror/state": "^6.3.3", + "@codemirror/view": "^6.22.1", + "@lezer/common": "^1.1.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14", + "@prometheus-io/lezer-promql": "0.48.0", "isomorphic-fetch": "^3.0.0", "lru-cache": "^7.18.3", - "nock": "^13.3.1" + "nock": "^13.4.0" }, "dependencies": { "lru-cache": { @@ -23502,9 +23618,9 @@ "@prometheus-io/lezer-promql": { "version": "file:module/lezer-promql", "requires": { - "@lezer/generator": "^1.2.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6" + "@lezer/generator": "^1.5.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14" } }, "@rollup/plugin-babel": { @@ -23875,9 +23991,9 @@ } }, "@types/enzyme": { - "version": "3.10.13", - "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.13.tgz", - "integrity": "sha512-FCtoUhmFsud0Yx9fmZk179GkdZ4U9B0GFte64/Md+W/agx0L5SxsIIbhLBOxIb9y2UfBA4WQnaG1Od/UsUQs9Q==", + "version": "3.10.18", + "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.18.tgz", + "integrity": "sha512-RaO/TyyHZvXkpzinbMTZmd/S5biU4zxkvDsn22ujC29t9FMSzq8tnn8f2MxQ2P8GVhFRG5jTAL05DXKyTtpEQQ==", "dev": true, "requires": { "@types/cheerio": "*", @@ -23947,9 +24063,9 @@ } }, "@types/flot": { - "version": "0.0.32", - "resolved": "https://registry.npmjs.org/@types/flot/-/flot-0.0.32.tgz", - "integrity": "sha512-aturel4TWMY86N4Pkpc9pSoUd/p8c3BjGj4fTDkaZIpkRPzLH1VXZCAKGUywcFkTqgZMhPJFPWxd4pl87y8h/w==", + "version": "0.0.36", + "resolved": "https://registry.npmjs.org/@types/flot/-/flot-0.0.36.tgz", + "integrity": "sha512-xRo4MUIMnRPGXJCuQXAWvo+uKRmziRGHAy9LQHsLgbKanknpe5z3EThqVuYkVCC6ZWPZ/8pllBXnzQmGzFkJ/Q==", "dev": true, "requires": { "@types/jquery": "*" @@ -24009,9 +24125,9 @@ } }, "@types/jest": { - "version": "29.5.2", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.2.tgz", - "integrity": "sha512-mSoZVJF5YzGVCk+FsDxzDuH7s+SCkzrgKZzf0Z0T2WudhBUPoF6ktoTPC4R0ZoCPCV5xUvuU6ias5NvxcBcMMg==", + "version": "29.5.11", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.11.tgz", + "integrity": "sha512-S2mHmYIVe13vrm6q4kN6fLYYAka15ALQki/vgDC3mIukEOx8WJlv0kQPM+d4w8Gp6u0uSdKND04IlTXBv0rwnQ==", "dev": true, "requires": { "expect": "^29.0.0", @@ -24044,9 +24160,9 @@ } }, "@types/jquery": { - "version": "3.5.16", - "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.16.tgz", - "integrity": "sha512-bsI7y4ZgeMkmpG9OM710RRzDFp+w4P1RGiIt30C1mSBT+ExCleeh4HObwgArnDFELmRrOpXgSYN9VF1hj+f1lw==", + "version": "3.5.29", + "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.29.tgz", + "integrity": "sha512-oXQQC9X9MOPRrMhPHHOsXqeQDnWeCDT3PelUIg/Oy8FAbzSZtFHRjc7IpbfFVmpLtJ+UOoywpRsuO5Jxjybyeg==", "dev": true, "requires": { "@types/sizzle": "*" @@ -24071,9 +24187,12 @@ "devOptional": true }, "@types/node": { - "version": "17.0.45", - "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", - "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==" + "version": "20.10.4", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.10.4.tgz", + "integrity": "sha512-D08YG6rr8X90YB56tSIuBaddy/UXAA9RKJoFvrsnogAum/0pmjkgi4+2nx96A330FmioegBWmEYQ+syqCFaveg==", + "requires": { + "undici-types": "~5.26.4" + } }, "@types/parse-json": { "version": "4.0.0", @@ -24112,9 +24231,9 @@ "devOptional": true }, "@types/react": { - "version": "17.0.53", - "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.53.tgz", - "integrity": "sha512-1yIpQR2zdYu1Z/dc1OxC+MA6GR240u3gcnP4l6mvj/PJiVaqHsQPmWttsvHsfnhfPbU2FuGmo0wSITPygjBmsw==", + "version": "17.0.71", + "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.71.tgz", + "integrity": "sha512-lfqOu9mp16nmaGRrS8deS2Taqhd5Ih0o92Te5Ws6I1py4ytHBcXLqh0YIqVsViqwVI5f+haiFM6hju814BzcmA==", "dev": true, "requires": { "@types/prop-types": "*", @@ -24123,18 +24242,18 @@ } }, "@types/react-copy-to-clipboard": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/@types/react-copy-to-clipboard/-/react-copy-to-clipboard-5.0.4.tgz", - "integrity": "sha512-otTJsJpofYAeaIeOwV5xBUGpo6exXG2HX7X4nseToCB2VgPEBxGBHCm/FecZ676doNR7HCSTVtmohxfG2b3/yQ==", + "version": "5.0.7", + "resolved": "https://registry.npmjs.org/@types/react-copy-to-clipboard/-/react-copy-to-clipboard-5.0.7.tgz", + "integrity": "sha512-Gft19D+as4M+9Whq1oglhmK49vqPhcLzk8WfvfLvaYMIPYanyfLy0+CwFucMJfdKoSFyySPmkkWn8/E6voQXjQ==", "dev": true, "requires": { "@types/react": "*" } }, "@types/react-dom": { - "version": "17.0.20", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.20.tgz", - "integrity": "sha512-4pzIjSxDueZZ90F52mU3aPoogkHIoSIDG+oQ+wQK7Cy2B9S+MvOqY0uEA/qawKz381qrEDkvpwyt8Bm31I8sbA==", + "version": "17.0.25", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.25.tgz", + "integrity": "sha512-urx7A7UxkZQmThYA4So0NelOVjx3V4rNFVJwp0WZlbIK5eM4rNJDiN3R/E9ix0MBh6kAEojk/9YL+Te6D9zHNA==", "dev": true, "requires": { "@types/react": "^17" @@ -24186,9 +24305,9 @@ "dev": true }, "@types/sanitize-html": { - "version": "2.9.0", - "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.9.0.tgz", - "integrity": "sha512-4fP/kEcKNj2u39IzrxWYuf/FnCCwwQCpif6wwY6ROUS1EPRIfWJjGkY3HIowY1EX/VbX5e86yq8AAE7UPMgATg==", + "version": "2.9.5", + "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.9.5.tgz", + "integrity": "sha512-2Sr1vd8Dw+ypsg/oDDfZ57OMSG2Befs+l2CMyCC5bVSK3CpE7lTB2aNlbbWzazgVA+Qqfuholwom6x/mWd1qmw==", "dev": true, "requires": { "htmlparser2": "^8.0.0" @@ -24234,9 +24353,9 @@ } }, "@types/sinon": { - "version": "10.0.15", - "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.15.tgz", - "integrity": "sha512-3lrFNQG0Kr2LDzvjyjB6AMJk4ge+8iYhQfdnSwIwlG88FUOV43kPcQqDZkDa/h3WSZy6i8Fr0BSjfQtB1B3xuQ==", + "version": "10.0.20", + "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.20.tgz", + "integrity": "sha512-2APKKruFNCAZgx3daAyACGzWuJ028VVCUDk6o2rw/Z4PXT0ogwdV4KUegW0MwVs0Zu59auPXbbuBJHF12Sx1Eg==", "dev": true, "requires": { "@types/sinonjs__fake-timers": "*" @@ -26472,9 +26591,9 @@ "dev": true }, "downshift": { - "version": "7.6.0", - "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.0.tgz", - "integrity": "sha512-VSoTVynTAsabou/hbZ6HJHUVhtBiVOjQoBsCPcQq5eAROIGP+9XKMp9asAKQ3cEcUP4oe0fFdD2pziUjhFY33Q==", + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.2.tgz", + "integrity": "sha512-iOv+E1Hyt3JDdL9yYcOgW7nZ7GQ2Uz6YbggwXvKUSleetYhU2nXD482Rz6CzvM4lvI1At34BYruKAL4swRGxaA==", "requires": { "@babel/runtime": "^7.14.8", "compute-scroll-into-view": "^2.0.4", @@ -26810,9 +26929,9 @@ } }, "eslint-config-prettier": { - "version": "8.8.0", - "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.8.0.tgz", - "integrity": "sha512-wLbQiFre3tdGgpDv67NQKnJuTlcUVYHas3k+DZCc2U2BadthoEY4B7hLPvAxaqdyOGCzuLfii2fqGph10va7oA==", + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.10.0.tgz", + "integrity": "sha512-SM8AMJdeQqRYT9O9zguiruQZaN7+z+E4eAP9oiLNGKMtomwaB1E9dcgUD6ZAn/eQAb52USbvezbiljfZUhbJcg==", "dev": true, "requires": {} }, @@ -27735,9 +27854,9 @@ "dev": true }, "fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", "optional": true }, "function-bind": { @@ -28681,9 +28800,9 @@ } }, "jest-canvas-mock": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.5.1.tgz", - "integrity": "sha512-IVnRiz+v4EYn3ydM/pBo8GW/J+nU/Hg5gHBQQOUQhdRyNfvHnabB8ReqARLO0p+kvQghqr4V0tA92CF3JcUSRg==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.5.2.tgz", + "integrity": "sha512-vgnpPupjOL6+L5oJXzxTxFrlGEIbHdZqFU+LFNdtLxZ3lRDCl17FlTMM7IatoRQkrcyOTMlDinjUguqmQ6bR2A==", "dev": true, "requires": { "cssfontparser": "^1.2.1", @@ -30529,9 +30648,9 @@ } }, "jquery": { - "version": "3.7.0", - "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.0.tgz", - "integrity": "sha512-umpJ0/k8X0MvD1ds0P9SfowREz2LenHsQaxSohMZ5OMNEU2r0tf8pdeEFTHMFxWVxKNyU9rTtK3CWzUCTKJUeQ==" + "version": "3.7.1", + "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.1.tgz", + "integrity": "sha512-m4avr8yL8kmFN8psrbFFFmB/If14iN5o9nw/NgnnM+kybDJpRsAynV2BsfpTYrTRysYUdADVD7CkUUizgkpLfg==" }, "jquery.flot.tooltip": { "version": "0.9.0", @@ -31178,14 +31297,13 @@ } }, "nock": { - "version": "13.3.1", - "resolved": "https://registry.npmjs.org/nock/-/nock-13.3.1.tgz", - "integrity": "sha512-vHnopocZuI93p2ccivFyGuUfzjq2fxNyNurp7816mlT5V5HF4SzXu8lvLrVzBbNqzs+ODooZ6OksuSUNM7Njkw==", + "version": "13.4.0", + "resolved": "https://registry.npmjs.org/nock/-/nock-13.4.0.tgz", + "integrity": "sha512-W8NVHjO/LCTNA64yxAPHV/K47LpGYcVzgKd3Q0n6owhwvD0Dgoterc25R4rnZbckJEb6Loxz1f5QMuJpJnbSyQ==", "dev": true, "requires": { "debug": "^4.1.0", "json-stringify-safe": "^5.0.1", - "lodash": "^4.17.21", "propagate": "^2.0.0" } }, @@ -34329,9 +34447,9 @@ "dev": true }, "sass": { - "version": "1.62.1", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.62.1.tgz", - "integrity": "sha512-NHpxIzN29MXvWiuswfc1W3I0N8SXBd8UR26WntmDlRYf0bSADnwnOjsyMZ3lMezSlArD33Vs3YFhp7dWvL770A==", + "version": "1.69.5", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.69.5.tgz", + "integrity": "sha512-qg2+UCJibLr2LCVOt3OlPhr/dqVHWOa9XtZf2OjbLs/T4VPSJ00udtgJxH3neXZm+QqX8B+3cU7RaLqp1iVfcQ==", "requires": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -34896,9 +35014,9 @@ "requires": {} }, "style-mod": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.0.0.tgz", - "integrity": "sha512-OPhtyEjyyN9x3nhPsu76f52yUGXiZcgvsrFVtvTkyGRQJ0XK+GPc6ov1z+lRpbeabka+MYEQxOYRnt5nF30aMw==" + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.0.tgz", + "integrity": "sha512-Ca5ib8HrFn+f+0n4N4ScTIA9iTOQ7MaGS1ylHcoVqW9J7w2w8PzN6g9gKmTYgGEBH8e120+RCmhpje6jC5uGWA==" }, "stylehacks": { "version": "5.1.0", @@ -35344,9 +35462,9 @@ "dev": true }, "ts-jest": { - "version": "29.1.0", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.0.tgz", - "integrity": "sha512-ZhNr7Z4PcYa+JjMl62ir+zPiNJfXJN6E8hSLnaUKhOgqcn8vb3e537cpkd0FuAfRK3sR1LSqM1MOhliXNgOFPA==", + "version": "29.1.1", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz", + "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==", "dev": true, "requires": { "bs-logger": "0.x", @@ -35355,7 +35473,7 @@ "json5": "^2.2.3", "lodash.memoize": "4.x", "make-error": "1.x", - "semver": "7.x", + "semver": "^7.5.3", "yargs-parser": "^21.0.1" }, "dependencies": { @@ -35498,6 +35616,11 @@ "which-boxed-primitive": "^1.0.2" } }, + "undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, "unicode-canonical-property-names-ecmascript": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", diff --git a/web/ui/package.json b/web/ui/package.json index 9b4dfbc628..7212ccacba 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -16,17 +16,17 @@ "npm": ">=7.0.0" }, "devDependencies": { - "@types/jest": "^29.5.2", - "@types/node": "^17.0.45", - "eslint-config-prettier": "^8.8.0", + "@types/jest": "^29.5.11", + "@types/node": "^20.10.4", + "eslint-config-prettier": "^8.10.0", "eslint-config-react-app": "^7.0.1", "eslint-plugin-prettier": "^4.2.1", - "jest-canvas-mock": "^2.5.1", + "jest-canvas-mock": "^2.5.2", "jest-fetch-mock": "^3.0.3", "prettier": "^2.8.8", "react-scripts": "^5.0.1", - "ts-jest": "^29.1.0", + "ts-jest": "^29.1.1", "typescript": "^4.9.5" }, - "version": "0.48.0-rc.2" + "version": "0.48.0" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index c14dbb4b0f..46deeefc0b 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,30 +1,30 @@ { "name": "@prometheus-io/app", - "version": "0.48.0-rc.2", + "version": "0.48.0", "private": true, "dependencies": { - "@codemirror/autocomplete": "^6.7.1", - "@codemirror/commands": "^6.2.4", - "@codemirror/language": "^6.7.0", - "@codemirror/lint": "^6.2.2", - "@codemirror/search": "^6.5.0", - "@codemirror/state": "^6.2.1", - "@codemirror/view": "^6.13.0", + "@codemirror/autocomplete": "^6.11.1", + "@codemirror/commands": "^6.3.2", + "@codemirror/language": "^6.9.3", + "@codemirror/lint": "^6.4.2", + "@codemirror/search": "^6.5.5", + "@codemirror/state": "^6.3.3", + "@codemirror/view": "^6.22.1", "@forevolve/bootstrap-dark": "^2.1.1", - "@fortawesome/fontawesome-svg-core": "6.4.0", - "@fortawesome/free-solid-svg-icons": "6.4.0", + "@fortawesome/fontawesome-svg-core": "6.5.1", + "@fortawesome/free-solid-svg-icons": "6.5.1", "@fortawesome/react-fontawesome": "0.2.0", - "@lezer/common": "^1.0.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6", + "@lezer/common": "^1.1.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.48.0-rc.2", + "@prometheus-io/codemirror-promql": "0.48.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", - "downshift": "^7.6.0", + "downshift": "^7.6.2", "http-proxy-middleware": "^2.0.6", - "jquery": "^3.7.0", + "jquery": "^3.7.1", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.29.4", "moment-timezone": "^0.5.43", @@ -38,7 +38,7 @@ "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", "sanitize-html": "^2.11.0", - "sass": "1.62.1", + "sass": "1.69.5", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" }, @@ -66,15 +66,15 @@ ], "devDependencies": { "@testing-library/react-hooks": "^7.0.2", - "@types/enzyme": "^3.10.13", - "@types/flot": "0.0.32", - "@types/jquery": "^3.5.16", - "@types/react": "^17.0.60", - "@types/react-copy-to-clipboard": "^5.0.4", - "@types/react-dom": "^17.0.20", + "@types/enzyme": "^3.10.18", + "@types/flot": "0.0.36", + "@types/jquery": "^3.5.29", + "@types/react": "^17.0.71", + "@types/react-copy-to-clipboard": "^5.0.7", + "@types/react-dom": "^17.0.25", "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.9.0", - "@types/sinon": "^10.0.15", + "@types/sanitize-html": "^2.9.5", + "@types/sinon": "^10.0.20", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "enzyme": "^3.11.0", "enzyme-to-json": "^3.6.2", @@ -94,6 +94,6 @@ } }, "optionalDependencies": { - "fsevents": "^2.3.2" + "fsevents": "^2.3.3" } }