diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 85109b39a..0f3c5d277 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: bufbuild/buf-setup-action@eb60cd0de4f14f1f57cf346916b8cd69a9e7ed0b # v1.26.1 + - uses: bufbuild/buf-setup-action@382440cdb8ec7bc25a68d7b4711163d95f7cc3aa # v1.28.1 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@bd48f53224baaaf0fc55de9a913e7680ca6dbea4 # v1.0.3 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index c2c9dc070..f6d5c9191 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -13,7 +13,7 @@ jobs: if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: bufbuild/buf-setup-action@eb60cd0de4f14f1f57cf346916b8cd69a9e7ed0b # v1.26.1 + - uses: bufbuild/buf-setup-action@382440cdb8ec7bc25a68d7b4711163d95f7cc3aa # v1.28.1 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@bd48f53224baaaf0fc55de9a913e7680ca6dbea4 # v1.0.3 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6dcaefadd..8ba154e25 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -151,7 +151,8 @@ jobs: uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 with: args: --verbose - version: v1.54.2 + # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. + version: v1.55.2 fuzzing: uses: ./.github/workflows/fuzzing.yml if: github.event_name == 'pull_request' diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 19f8cfb55..5e14936a9 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -30,12 +30,12 @@ jobs: go-version: 1.21.x - name: Initialize CodeQL - uses: github/codeql-action/init@74483a38d39275f33fcff5f35b679b5ca4a26a99 # v2.22.5 + uses: github/codeql-action/init@407ffafae6a767df3e0230c3df91b6443ae8df75 # v2.22.8 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@74483a38d39275f33fcff5f35b679b5ca4a26a99 # v2.22.5 + uses: github/codeql-action/autobuild@407ffafae6a767df3e0230c3df91b6443ae8df75 # v2.22.8 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@74483a38d39275f33fcff5f35b679b5ca4a26a99 # v2.22.5 + uses: github/codeql-action/analyze@407ffafae6a767df3e0230c3df91b6443ae8df75 # v2.22.8 diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml index 3f557a089..e7e813e3b 100644 --- a/.github/workflows/lock.yml +++ b/.github/workflows/lock.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest if: github.repository_owner == 'prometheus' steps: - - uses: dessant/lock-threads@be8aa5be94131386884a6da4189effda9b14aa21 # v4.0.1 + - uses: dessant/lock-threads@1bf7ec25051fe7c00bdd17e6a7cf3d7bfb7dc771 # v5.0.1 with: process-only: 'issues' issue-inactive-days: '180' diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 091b50ccd..f71e1331b 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@74483a38d39275f33fcff5f35b679b5ca4a26a99 # tag=v2.22.5 + uses: github/codeql-action/upload-sarif@407ffafae6a767df3e0230c3df91b6443ae8df75 # tag=v2.22.8 with: sarif_file: results.sarif diff --git a/.golangci.yml b/.golangci.yml index 158bb3579..166b2e0d4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -23,6 +23,7 @@ linters: - nolintlint - predeclared - revive + - testifylint - unconvert - unused @@ -35,13 +36,9 @@ issues: - path: _test.go linters: - errcheck - - path: tsdb/ + - path: "tsdb/head_wal.go" linters: - errorlint - - path: tsdb/ - text: "import 'github.com/pkg/errors' is not allowed" - linters: - - depguard - linters: - godot source: "^// ===" @@ -117,3 +114,19 @@ linters-settings: disabled: true - name: var-declaration - name: var-naming + testifylint: + disable: + - float-compare + - go-require + enable: + - bool-compare + - compares + - empty + - error-is-as + - error-nil + - expected-actual + - len + - require-error + - suite-dont-use-pkg + - suite-extra-assert-call + diff --git a/Makefile b/Makefile index 0dd8673af..ab229f931 100644 --- a/Makefile +++ b/Makefile @@ -93,9 +93,9 @@ endif # If we only want to only test go code we have to change the test target # which is called by all. ifeq ($(GO_ONLY),1) -test: common-test +test: common-test check-go-mod-version else -test: common-test ui-build-module ui-test ui-lint +test: common-test ui-build-module ui-test ui-lint check-go-mod-version endif .PHONY: npm_licenses @@ -138,3 +138,17 @@ bench_tsdb: $(PROMU) cli-documentation: $(GO) run ./cmd/prometheus/ --write-documentation > docs/command-line/prometheus.md $(GO) run ./cmd/promtool/ write-documentation > docs/command-line/promtool.md + +.PHONY: check-go-mod-version +check-go-mod-version: + @echo ">> checking go.mod version matching" + @./scripts/check-go-mod-version.sh + +.PHONY: update-all-go-deps +update-all-go-deps: + @$(MAKE) update-go-deps + @echo ">> updating Go dependencies in ./documentation/examples/remote_storage/" + @cd ./documentation/examples/remote_storage/ && for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ + $(GO) get -d $$m; \ + done + @cd ./documentation/examples/remote_storage/ && $(GO) mod tidy diff --git a/Makefile.common b/Makefile.common index 062a28185..bc2a07d72 100644 --- a/Makefile.common +++ b/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.54.2 +GOLANGCI_LINT_VERSION ?= v1.55.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/RELEASE.md b/RELEASE.md index 04d106aee..6ab2f6389 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -109,14 +109,16 @@ This is also a good time to consider any experimental features and feature flags for promotion to stable or for deprecation or ultimately removal. Do any of these in pull requests, one per feature. +> NOTE: As a validation step check if all security alerts are closed after this step: https://github.com/prometheus/prometheus/security/dependabot. Sometimes it's ok +> if not critical and e.g. fix is not released yet (or it does not relate to +> upgrading) or when we are unaffected. + #### Manually updating Go dependencies This is usually only needed for `+incompatible` and `v0.0.0` non-semver updates. ```bash -make update-go-deps -git add go.mod go.sum -git commit -m "Update dependencies" +make update-all-go-deps ``` #### Manually updating React dependencies diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index dfafe66c6..7e8e23444 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -206,9 +206,15 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "native-histograms": c.tsdb.EnableNativeHistograms = true // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. - config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultNativeHistogramScrapeProtocols - config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultNativeHistogramScrapeProtocols + config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols + config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + case "created-timestamp-zero-ingestion": + c.scrape.EnableCreatedTimestampZeroIngestion = true + // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. + config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols + config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols + level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) case "": continue case "promql-at-modifier", "promql-negative-offset": @@ -1449,6 +1455,10 @@ func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, return 0, tsdb.ErrNotReady } +func (n notReadyAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { + return 0, tsdb.ErrNotReady +} + func (n notReadyAppender) Commit() error { return tsdb.ErrNotReady } func (n notReadyAppender) Rollback() error { return tsdb.ErrNotReady } @@ -1587,7 +1597,6 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { RetentionDuration: int64(time.Duration(opts.RetentionDuration) / time.Millisecond), MaxBytes: int64(opts.MaxBytes), NoLockfile: opts.NoLockfile, - AllowOverlappingCompaction: true, WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize, SamplesPerChunk: opts.SamplesPerChunk, diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index e4f831939..f4fe3855c 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -346,7 +346,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames continue } - require.Equal(t, 1, len(g.GetMetric())) + require.Len(t, g.GetMetric(), 1) if _, ok := res[m]; ok { t.Error("expected only one metric family for", m) t.FailNow() diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index f20f2a22c..dd6b56672 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -284,7 +284,7 @@ func (p *queryLogTest) run(t *testing.T) { if !p.enabledAtStart { p.query(t) - require.Equal(t, 0, len(readQueryLog(t, queryLogFile.Name()))) + require.Empty(t, readQueryLog(t, queryLogFile.Name())) p.setQueryLog(t, queryLogFile.Name()) p.reloadConfig(t) } @@ -309,7 +309,7 @@ func (p *queryLogTest) run(t *testing.T) { p.query(t) ql = readQueryLog(t, queryLogFile.Name()) - require.Equal(t, qc, len(ql)) + require.Len(t, ql, qc) qc = len(ql) p.setQueryLog(t, queryLogFile.Name()) @@ -320,7 +320,7 @@ func (p *queryLogTest) run(t *testing.T) { ql = readQueryLog(t, queryLogFile.Name()) if p.exactQueryCount() { - require.Equal(t, qc, len(ql)) + require.Len(t, ql, qc) } else { require.Greater(t, len(ql), qc, "no queries logged") } @@ -340,7 +340,7 @@ func (p *queryLogTest) run(t *testing.T) { require.NoError(t, os.Rename(queryLogFile.Name(), newFile.Name())) ql = readQueryLog(t, newFile.Name()) if p.exactQueryCount() { - require.Equal(t, qc, len(ql)) + require.Len(t, ql, qc) } p.validateLastQuery(t, ql) qc = len(ql) @@ -351,7 +351,7 @@ func (p *queryLogTest) run(t *testing.T) { ql = readQueryLog(t, newFile.Name()) if p.exactQueryCount() { - require.Equal(t, qc, len(ql)) + require.Len(t, ql, qc) } else { require.Greater(t, len(ql), qc, "no queries logged") } diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go index f373ebd6e..7d29690e4 100644 --- a/cmd/promtool/backfill_test.go +++ b/cmd/promtool/backfill_test.go @@ -61,7 +61,7 @@ func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMa func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, expectedBlockDuration int64, expectedSamples []backfillSample, expectedNumBlocks int) { blocks := db.Blocks() - require.Equal(t, expectedNumBlocks, len(blocks), "did not create correct number of blocks") + require.Len(t, blocks, expectedNumBlocks, "did not create correct number of blocks") for i, block := range blocks { require.Equal(t, block.MinTime()/expectedBlockDuration, (block.MaxTime()-1)/expectedBlockDuration, "block %d contains data outside of one aligned block duration", i) diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 09c91f92a..7306a3e64 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -56,7 +56,7 @@ func TestQueryRange(t *testing.T) { defer s.Close() urlObject, err := url.Parse(s.URL) - require.Equal(t, nil, err) + require.NoError(t, err) p := &promqlPrinter{} exitCode := QueryRange(urlObject, http.DefaultTransport, map[string]string{}, "up", "0", "300", 0, p) @@ -79,7 +79,7 @@ func TestQueryInstant(t *testing.T) { defer s.Close() urlObject, err := url.Parse(s.URL) - require.Equal(t, nil, err) + require.NoError(t, err) p := &promqlPrinter{} exitCode := QueryInstant(urlObject, http.DefaultTransport, "up", "300", p) diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index 1c0698288..75aad6786 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -91,13 +91,13 @@ func TestBackfillRuleIntegration(t *testing.T) { for _, err := range errs { require.NoError(t, err) } - require.Equal(t, 3, len(ruleImporter.groups)) + require.Len(t, ruleImporter.groups, 3) group1 := ruleImporter.groups[path1+";group0"] require.NotNil(t, group1) const defaultInterval = 60 require.Equal(t, defaultInterval*time.Second, group1.Interval()) gRules := group1.Rules() - require.Equal(t, 1, len(gRules)) + require.Len(t, gRules, 1) require.Equal(t, "rule1", gRules[0].Name()) require.Equal(t, "ruleExpr", gRules[0].Query().String()) require.Equal(t, 1, gRules[0].Labels().Len()) @@ -106,7 +106,7 @@ func TestBackfillRuleIntegration(t *testing.T) { require.NotNil(t, group2) require.Equal(t, defaultInterval*time.Second, group2.Interval()) g2Rules := group2.Rules() - require.Equal(t, 2, len(g2Rules)) + require.Len(t, g2Rules, 2) require.Equal(t, "grp2_rule1", g2Rules[0].Name()) require.Equal(t, "grp2_rule1_expr", g2Rules[0].Query().String()) require.Equal(t, 0, g2Rules[0].Labels().Len()) @@ -122,7 +122,7 @@ func TestBackfillRuleIntegration(t *testing.T) { require.NoError(t, err) blocks := db.Blocks() - require.Equal(t, (i+1)*tt.expectedBlockCount, len(blocks)) + require.Len(t, blocks, (i+1)*tt.expectedBlockCount) q, err := db.Querier(math.MinInt64, math.MaxInt64) require.NoError(t, err) diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go index 82bc9947f..2f4d3aba7 100644 --- a/cmd/promtool/sd_test.go +++ b/cmd/promtool/sd_test.go @@ -35,7 +35,7 @@ func TestSDCheckResult(t *testing.T) { }} reg, err := relabel.NewRegexp("(.*)") - require.Nil(t, err) + require.NoError(t, err) scrapeConfig := &config.ScrapeConfig{ ScrapeInterval: model.Duration(1 * time.Minute), diff --git a/config/config.go b/config/config.go index b832ac9a1..ddcca84dc 100644 --- a/config/config.go +++ b/config/config.go @@ -454,12 +454,19 @@ var ( OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0", } + // DefaultScrapeProtocols is the set of scrape protocols that will be proposed + // to scrape target, ordered by priority. DefaultScrapeProtocols = []ScrapeProtocol{ OpenMetricsText1_0_0, OpenMetricsText0_0_1, PrometheusText0_0_4, } - DefaultNativeHistogramScrapeProtocols = []ScrapeProtocol{ + + // DefaultProtoFirstScrapeProtocols is like DefaultScrapeProtocols, but it + // favors protobuf Prometheus exposition format. + // Used by default for certain feature-flags like + // "native-histograms" and "created-timestamp-zero-ingestion". + DefaultProtoFirstScrapeProtocols = []ScrapeProtocol{ PrometheusProto, OpenMetricsText1_0_0, OpenMetricsText0_0_1, diff --git a/config/config_test.go b/config/config_test.go index 408622cd5..5d753a0f7 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1457,8 +1457,8 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) { got := &Config{} require.NoError(t, yaml.UnmarshalStrict(out, got)) - require.Equal(t, true, got.RemoteWriteConfigs[0].QueueConfig.RetryOnRateLimit) - require.Equal(t, false, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit) + require.True(t, got.RemoteWriteConfigs[0].QueueConfig.RetryOnRateLimit) + require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit) } func TestLoadConfig(t *testing.T) { @@ -1475,9 +1475,9 @@ func TestLoadConfig(t *testing.T) { func TestScrapeIntervalLarger(t *testing.T) { c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger()) require.NoError(t, err) - require.Equal(t, 1, len(c.ScrapeConfigs)) + require.Len(t, c.ScrapeConfigs, 1) for _, sc := range c.ScrapeConfigs { - require.Equal(t, true, sc.ScrapeInterval >= sc.ScrapeTimeout) + require.GreaterOrEqual(t, sc.ScrapeInterval, sc.ScrapeTimeout) } } @@ -1493,7 +1493,7 @@ func TestElideSecrets(t *testing.T) { yamlConfig := string(config) matches := secretRe.FindAllStringIndex(yamlConfig, -1) - require.Equal(t, 22, len(matches), "wrong number of secret matches found") + require.Len(t, matches, 22, "wrong number of secret matches found") require.NotContains(t, yamlConfig, "mysecret", "yaml marshal reveals authentication credentials.") } @@ -2063,7 +2063,7 @@ func TestAgentMode(t *testing.T) { c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger()) require.NoError(t, err) - require.Len(t, c.RemoteWriteConfigs, 0) + require.Empty(t, c.RemoteWriteConfigs) c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger()) require.NoError(t, err) @@ -2257,5 +2257,5 @@ func TestScrapeConfigDisableCompression(t *testing.T) { got := &Config{} require.NoError(t, yaml.UnmarshalStrict(out, got)) - require.Equal(t, false, got.ScrapeConfigs[0].EnableCompression) + require.False(t, got.ScrapeConfigs[0].EnableCompression) } diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index faccadcf8..ef953b802 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -28,6 +28,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" @@ -638,7 +639,7 @@ func (client *azureClient) getNetworkInterfaceByID(ctx context.Context, networkI return nil, fmt.Errorf("could not parse network interface ID: %w", err) } - resp, err := client.nic.Get(ctx, r.ResourceGroupName, r.Name, nil) + resp, err := client.nic.Get(ctx, r.ResourceGroupName, r.Name, &armnetwork.InterfacesClientGetOptions{Expand: to.Ptr("IPConfigurations/PublicIPAddress")}) if err != nil { var responseError *azcore.ResponseError if errors.As(err, &responseError) && responseError.StatusCode == http.StatusNotFound { diff --git a/discovery/azure/azure_test.go b/discovery/azure/azure_test.go index 6c3ec236b..48f5b076c 100644 --- a/discovery/azure/azure_test.go +++ b/discovery/azure/azure_test.go @@ -269,7 +269,7 @@ func TestNewAzureResourceFromID(t *testing.T) { }, } { actual, err := newAzureResourceFromID(tc.id, nil) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, tc.expected.Name, actual.Name) require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName) } diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index c92960163..19f7d3c4a 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -268,13 +268,13 @@ func newDiscovery(t *testing.T, config *SDConfig) *Discovery { } func checkOneTarget(t *testing.T, tg []*targetgroup.Group) { - require.Equal(t, 1, len(tg)) + require.Len(t, tg, 1) target := tg[0] require.Equal(t, "test-dc", string(target.Labels["__meta_consul_dc"])) require.Equal(t, target.Source, string(target.Labels["__meta_consul_service"])) if target.Source == "test" { // test service should have one node. - require.Greater(t, len(target.Targets), 0, "Test service should have one node") + require.NotEmpty(t, target.Targets, "Test service should have one node") } } @@ -313,7 +313,7 @@ func TestNoTargets(t *testing.T) { }() targets := (<-ch)[0].Targets - require.Equal(t, 0, len(targets)) + require.Empty(t, targets) cancel() <-ch } @@ -484,7 +484,7 @@ oauth2: return } - require.Equal(t, config, test.expected) + require.Equal(t, test.expected, config) }) } } diff --git a/discovery/digitalocean/digitalocean_test.go b/discovery/digitalocean/digitalocean_test.go index a5da4b26e..df2514ecb 100644 --- a/discovery/digitalocean/digitalocean_test.go +++ b/discovery/digitalocean/digitalocean_test.go @@ -56,12 +56,12 @@ func TestDigitalOceanSDRefresh(t *testing.T) { tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 4, len(tg.Targets)) + require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/eureka/client_test.go b/discovery/eureka/client_test.go index f1451c3a9..83f6fd5ff 100644 --- a/discovery/eureka/client_test.go +++ b/discovery/eureka/client_test.go @@ -184,17 +184,17 @@ func TestFetchApps(t *testing.T) { apps, err := fetchApps(context.TODO(), ts.URL, &http.Client{}) require.NoError(t, err) - require.Equal(t, len(apps.Applications), 2) - require.Equal(t, apps.Applications[0].Name, "CONFIG-SERVICE") - require.Equal(t, apps.Applications[1].Name, "META-SERVICE") + require.Len(t, apps.Applications, 2) + require.Equal(t, "CONFIG-SERVICE", apps.Applications[0].Name) + require.Equal(t, "META-SERVICE", apps.Applications[1].Name) - require.Equal(t, len(apps.Applications[1].Instances), 2) - require.Equal(t, apps.Applications[1].Instances[0].InstanceID, "meta-service002.test.com:meta-service:8080") - require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[0].XMLName.Local, "project") - require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[0].Content, "meta-service") - require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[1].XMLName.Local, "management.port") - require.Equal(t, apps.Applications[1].Instances[0].Metadata.Items[1].Content, "8090") - require.Equal(t, apps.Applications[1].Instances[1].InstanceID, "meta-service001.test.com:meta-service:8080") + require.Len(t, apps.Applications[1].Instances, 2) + require.Equal(t, "meta-service002.test.com:meta-service:8080", apps.Applications[1].Instances[0].InstanceID) + require.Equal(t, "project", apps.Applications[1].Instances[0].Metadata.Items[0].XMLName.Local) + require.Equal(t, "meta-service", apps.Applications[1].Instances[0].Metadata.Items[0].Content) + require.Equal(t, "management.port", apps.Applications[1].Instances[0].Metadata.Items[1].XMLName.Local) + require.Equal(t, "8090", apps.Applications[1].Instances[0].Metadata.Items[1].Content) + require.Equal(t, "meta-service001.test.com:meta-service:8080", apps.Applications[1].Instances[1].InstanceID) } func Test500ErrorHttpResponse(t *testing.T) { diff --git a/discovery/eureka/eureka_test.go b/discovery/eureka/eureka_test.go index 0641aa7bf..cb75e1428 100644 --- a/discovery/eureka/eureka_test.go +++ b/discovery/eureka/eureka_test.go @@ -55,7 +55,7 @@ func TestEurekaSDHandleError(t *testing.T) { tgs, err := testUpdateServices(respHandler) require.EqualError(t, err, errTesting) - require.Equal(t, len(tgs), 0) + require.Empty(t, tgs) } func TestEurekaSDEmptyList(t *testing.T) { @@ -72,7 +72,7 @@ func TestEurekaSDEmptyList(t *testing.T) { ) tgs, err := testUpdateServices(respHandler) require.NoError(t, err) - require.Equal(t, len(tgs), 1) + require.Len(t, tgs, 1) } func TestEurekaSDSendGroup(t *testing.T) { @@ -232,11 +232,11 @@ func TestEurekaSDSendGroup(t *testing.T) { tgs, err := testUpdateServices(respHandler) require.NoError(t, err) - require.Equal(t, len(tgs), 1) + require.Len(t, tgs, 1) tg := tgs[0] - require.Equal(t, tg.Source, "eureka") - require.Equal(t, len(tg.Targets), 4) + require.Equal(t, "eureka", tg.Source) + require.Len(t, tg.Targets, 4) tgt := tg.Targets[0] require.Equal(t, tgt[model.AddressLabel], model.LabelValue("config-service001.test.com:8080")) diff --git a/discovery/hetzner/hcloud_test.go b/discovery/hetzner/hcloud_test.go index a4f19cfdd..10b799037 100644 --- a/discovery/hetzner/hcloud_test.go +++ b/discovery/hetzner/hcloud_test.go @@ -48,12 +48,12 @@ func TestHCloudSDRefresh(t *testing.T) { targetGroups, err := d.refresh(context.Background()) require.NoError(t, err) - require.Equal(t, 1, len(targetGroups)) + require.Len(t, targetGroups, 1) targetGroup := targetGroups[0] require.NotNil(t, targetGroup, "targetGroup should not be nil") require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil") - require.Equal(t, 3, len(targetGroup.Targets)) + require.Len(t, targetGroup.Targets, 3) for i, labelSet := range []model.LabelSet{ { diff --git a/discovery/hetzner/robot_test.go b/discovery/hetzner/robot_test.go index f78a0bbda..abee5fea9 100644 --- a/discovery/hetzner/robot_test.go +++ b/discovery/hetzner/robot_test.go @@ -47,12 +47,12 @@ func TestRobotSDRefresh(t *testing.T) { targetGroups, err := d.refresh(context.Background()) require.NoError(t, err) - require.Equal(t, 1, len(targetGroups)) + require.Len(t, targetGroups, 1) targetGroup := targetGroups[0] require.NotNil(t, targetGroup, "targetGroup should not be nil") require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil") - require.Equal(t, 2, len(targetGroup.Targets)) + require.Len(t, targetGroup.Targets, 2) for i, labelSet := range []model.LabelSet{ { @@ -98,5 +98,5 @@ func TestRobotSDRefreshHandleError(t *testing.T) { require.Error(t, err) require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error()) - require.Equal(t, 0, len(targetGroups)) + require.Empty(t, targetGroups) } diff --git a/discovery/http/http_test.go b/discovery/http/http_test.go index a284e7f36..9bbda95b7 100644 --- a/discovery/http/http_test.go +++ b/discovery/http/http_test.go @@ -62,7 +62,7 @@ func TestHTTPValidRefresh(t *testing.T) { Source: urlSource(ts.URL+"/http_sd.good.json", 0), }, } - require.Equal(t, tgs, expectedTargets) + require.Equal(t, expectedTargets, tgs) require.Equal(t, 0.0, getFailureCount()) } diff --git a/discovery/ionos/server_test.go b/discovery/ionos/server_test.go index 92f2a96f9..30f358e32 100644 --- a/discovery/ionos/server_test.go +++ b/discovery/ionos/server_test.go @@ -48,12 +48,12 @@ func TestIONOSServerRefresh(t *testing.T) { tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 2, len(tg.Targets)) + require.Len(t, tg.Targets, 2) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/kubernetes/endpointslice_adaptor_test.go b/discovery/kubernetes/endpointslice_adaptor_test.go index e56491093..1ee333719 100644 --- a/discovery/kubernetes/endpointslice_adaptor_test.go +++ b/discovery/kubernetes/endpointslice_adaptor_test.go @@ -29,7 +29,7 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) { require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace()) require.Equal(t, endpointSlice.AddressType, v1.AddressType(adaptor.addressType())) require.Equal(t, endpointSlice.Labels, adaptor.labels()) - require.Equal(t, endpointSlice.Labels[v1.LabelServiceName], "testendpoints") + require.Equal(t, "testendpoints", endpointSlice.Labels[v1.LabelServiceName]) for i, endpointAdaptor := range adaptor.endpoints() { require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses()) @@ -57,7 +57,7 @@ func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) { require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace()) require.Equal(t, endpointSlice.AddressType, v1beta1.AddressType(adaptor.addressType())) require.Equal(t, endpointSlice.Labels, adaptor.labels()) - require.Equal(t, endpointSlice.Labels[v1beta1.LabelServiceName], "testendpoints") + require.Equal(t, "testendpoints", endpointSlice.Labels[v1beta1.LabelServiceName]) for i, endpointAdaptor := range adaptor.endpoints() { require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses()) diff --git a/discovery/linode/linode_test.go b/discovery/linode/linode_test.go index 988313b70..db4ee9bf8 100644 --- a/discovery/linode/linode_test.go +++ b/discovery/linode/linode_test.go @@ -61,12 +61,12 @@ func TestLinodeSDRefresh(t *testing.T) { tgs, err := d.refresh(context.Background()) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 4, len(tg.Targets)) + require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 537160811..796b01458 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -790,21 +790,21 @@ func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) { discoveryManager.ApplyConfig(c) syncedTargets := <-discoveryManager.SyncCh() - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 1) discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) } func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) { @@ -822,12 +822,12 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) { discoveryManager.ApplyConfig(c) syncedTargets := <-discoveryManager.SyncCh() - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 1) c["prometheus2"] = c["prometheus"] delete(c, "prometheus") @@ -836,10 +836,10 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) { syncedTargets = <-discoveryManager.SyncCh() p = pk("static", "prometheus2", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) } func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testing.T) { @@ -860,24 +860,24 @@ func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testi c["prometheus2"] = c["prometheus"] discoveryManager.ApplyConfig(c) syncedTargets := <-discoveryManager.SyncCh() - require.Equal(t, 2, len(syncedTargets)) + require.Len(t, syncedTargets, 2) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 2, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 2) delete(c, "prometheus") discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() p = pk("static", "prometheus2", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) } func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { @@ -895,9 +895,9 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { discoveryManager.ApplyConfig(c) syncedTargets := <-discoveryManager.SyncCh() - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) var mu sync.Mutex c["prometheus2"] = Configs{ @@ -912,33 +912,33 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { // Original targets should be present as soon as possible. syncedTargets = <-discoveryManager.SyncCh() mu.Unlock() - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) // prometheus2 configs should be ready on second sync. syncedTargets = <-discoveryManager.SyncCh() - require.Equal(t, 2, len(syncedTargets)) + require.Len(t, syncedTargets, 2) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"bar:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) p = pk("lockstatic", "prometheus2", 1) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) - require.Equal(t, 2, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 2) // Delete part of config and ensure only original targets exist. delete(c, "prometheus2") discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() - require.Equal(t, 1, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 1) verifyPresence(t, discoveryManager.targets, pk("static", "prometheus", 0), "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) } func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) { @@ -959,25 +959,25 @@ func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) { p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true) - require.Equal(t, 2, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 2) c["prometheus"] = Configs{ staticConfig("foo:9090"), } discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() - require.Equal(t, 1, len(discoveryManager.targets)) + require.Len(t, discoveryManager.targets, 1) p = pk("static", "prometheus", 1) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", false) - require.Equal(t, 1, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 1) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) } func TestDiscovererConfigs(t *testing.T) { @@ -1001,12 +1001,12 @@ func TestDiscovererConfigs(t *testing.T) { verifyPresence(t, discoveryManager.targets, p, "{__address__=\"bar:9090\"}", true) p = pk("static", "prometheus", 1) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"baz:9090\"}", true) - require.Equal(t, 2, len(discoveryManager.targets)) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, discoveryManager.targets, 2) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"bar:9090\"}", true) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"baz:9090\"}", true) - require.Equal(t, 3, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 3) } // TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after @@ -1029,9 +1029,9 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { syncedTargets := <-discoveryManager.SyncCh() p := pk("static", "prometheus", 0) verifyPresence(t, discoveryManager.targets, p, "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets)) + require.Len(t, syncedTargets, 1) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) c["prometheus"] = Configs{ StaticConfig{{}}, @@ -1052,8 +1052,8 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { if len(group.Targets) != 0 { t.Fatalf("Invalid number of targets: expected 0, got %d", len(group.Targets)) } - require.Equal(t, 1, len(syncedTargets)) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets, 1) + require.Len(t, syncedTargets["prometheus"], 1) if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil { t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls) } @@ -1082,11 +1082,11 @@ func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { if len(discoveryManager.providers) != 1 { t.Fatalf("Invalid number of providers: expected 1, got %d", len(discoveryManager.providers)) } - require.Equal(t, 2, len(syncedTargets)) + require.Len(t, syncedTargets, 2) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus"])) + require.Len(t, syncedTargets["prometheus"], 1) verifySyncedPresence(t, syncedTargets, "prometheus2", "{__address__=\"foo:9090\"}", true) - require.Equal(t, 1, len(syncedTargets["prometheus2"])) + require.Len(t, syncedTargets["prometheus2"], 1) } func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) { diff --git a/discovery/moby/docker_test.go b/discovery/moby/docker_test.go index bb84b1571..f80c53b61 100644 --- a/discovery/moby/docker_test.go +++ b/discovery/moby/docker_test.go @@ -44,12 +44,12 @@ host: %s tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 3, len(tg.Targets)) + require.Len(t, tg.Targets, 3) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/moby/nodes_test.go b/discovery/moby/nodes_test.go index 1a5332137..2bc383374 100644 --- a/discovery/moby/nodes_test.go +++ b/discovery/moby/nodes_test.go @@ -45,12 +45,12 @@ host: %s tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 5, len(tg.Targets)) + require.Len(t, tg.Targets, 5) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/moby/services_test.go b/discovery/moby/services_test.go index 1bc9832c7..81c8d31f1 100644 --- a/discovery/moby/services_test.go +++ b/discovery/moby/services_test.go @@ -45,12 +45,12 @@ host: %s tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 15, len(tg.Targets)) + require.Len(t, tg.Targets, 15) for i, lbls := range []model.LabelSet{ { @@ -339,12 +339,12 @@ filters: tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg, "tg should not be nil") require.NotNil(t, tg.Targets, "tg.targets should not be nil") - require.Equal(t, 4, len(tg.Targets)) + require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/moby/tasks_test.go b/discovery/moby/tasks_test.go index 2cc9322f6..eed5f2924 100644 --- a/discovery/moby/tasks_test.go +++ b/discovery/moby/tasks_test.go @@ -45,12 +45,12 @@ host: %s tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 27, len(tg.Targets)) + require.Len(t, tg.Targets, 27) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/nomad/nomad_test.go b/discovery/nomad/nomad_test.go index d9aa54330..f9490f476 100644 --- a/discovery/nomad/nomad_test.go +++ b/discovery/nomad/nomad_test.go @@ -147,12 +147,12 @@ func TestNomadSDRefresh(t *testing.T) { tgs, err := d.refresh(context.Background()) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 1, len(tg.Targets)) + require.Len(t, tg.Targets, 1) lbls := model.LabelSet{ "__address__": model.LabelValue("127.0.0.1:30456"), diff --git a/discovery/openstack/hypervisor_test.go b/discovery/openstack/hypervisor_test.go index 396d5283d..45684b4a2 100644 --- a/discovery/openstack/hypervisor_test.go +++ b/discovery/openstack/hypervisor_test.go @@ -53,12 +53,12 @@ func TestOpenstackSDHypervisorRefresh(t *testing.T) { hypervisor, _ := mock.openstackAuthSuccess() ctx := context.Background() tgs, err := hypervisor.refresh(ctx) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NoError(t, err) require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 2, len(tg.Targets)) + require.Len(t, tg.Targets, 2) for l, v := range map[string]string{ "__address__": "172.16.70.14:0", diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index d2da5d968..9e124b605 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -61,12 +61,12 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { tgs, err := instance.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 4, len(tg.Targets)) + require.Len(t, tg.Targets, 4) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/ovhcloud/dedicated_server_test.go b/discovery/ovhcloud/dedicated_server_test.go index e8ffa4a28..52311bcc8 100644 --- a/discovery/ovhcloud/dedicated_server_test.go +++ b/discovery/ovhcloud/dedicated_server_test.go @@ -47,11 +47,11 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr targetGroups, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(targetGroups)) + require.Len(t, targetGroups, 1) targetGroup := targetGroups[0] require.NotNil(t, targetGroup) require.NotNil(t, targetGroup.Targets) - require.Equal(t, 1, len(targetGroup.Targets)) + require.Len(t, targetGroup.Targets, 1) for i, lbls := range []model.LabelSet{ { diff --git a/discovery/ovhcloud/vps_test.go b/discovery/ovhcloud/vps_test.go index b1177f215..2d2d6dcd2 100644 --- a/discovery/ovhcloud/vps_test.go +++ b/discovery/ovhcloud/vps_test.go @@ -49,11 +49,11 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr targetGroups, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(targetGroups)) + require.Len(t, targetGroups, 1) targetGroup := targetGroups[0] require.NotNil(t, targetGroup) require.NotNil(t, targetGroup.Targets) - require.Equal(t, 1, len(targetGroup.Targets)) + require.Len(t, targetGroup.Targets, 1) for i, lbls := range []model.LabelSet{ { "__address__": "192.0.2.1", diff --git a/discovery/puppetdb/puppetdb_test.go b/discovery/puppetdb/puppetdb_test.go index 5514787d4..236efec16 100644 --- a/discovery/puppetdb/puppetdb_test.go +++ b/discovery/puppetdb/puppetdb_test.go @@ -105,7 +105,7 @@ func TestPuppetDBRefresh(t *testing.T) { Source: ts.URL + "/pdb/query/v4?query=vhosts", }, } - require.Equal(t, tgs, expectedTargets) + require.Equal(t, expectedTargets, tgs) } func TestPuppetDBRefreshWithParameters(t *testing.T) { @@ -156,7 +156,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) { Source: ts.URL + "/pdb/query/v4?query=vhosts", }, } - require.Equal(t, tgs, expectedTargets) + require.Equal(t, expectedTargets, tgs) } func TestPuppetDBInvalidCode(t *testing.T) { diff --git a/discovery/scaleway/instance_test.go b/discovery/scaleway/instance_test.go index e7a32dd92..d2449d00c 100644 --- a/discovery/scaleway/instance_test.go +++ b/discovery/scaleway/instance_test.go @@ -55,12 +55,12 @@ api_url: %s tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 2, len(tg.Targets)) + require.Len(t, tg.Targets, 2) for i, lbls := range []model.LabelSet{ { @@ -161,5 +161,5 @@ api_url: %s tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) } diff --git a/discovery/triton/triton_test.go b/discovery/triton/triton_test.go index ca3896532..0ed9daa68 100644 --- a/discovery/triton/triton_test.go +++ b/discovery/triton/triton_test.go @@ -155,7 +155,7 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) { tgts := testTritonSDRefresh(t, conf, dstr) require.NotNil(t, tgts) - require.Equal(t, 2, len(tgts)) + require.Len(t, tgts, 2) } func TestTritonSDRefreshNoServer(t *testing.T) { @@ -163,7 +163,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) { _, err := td.refresh(context.Background()) require.Error(t, err) - require.Equal(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint"), true) + require.True(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint")) } func TestTritonSDRefreshCancelled(t *testing.T) { @@ -173,7 +173,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) { cancel() _, err := td.refresh(ctx) require.Error(t, err) - require.Equal(t, strings.Contains(err.Error(), context.Canceled.Error()), true) + require.True(t, strings.Contains(err.Error(), context.Canceled.Error())) } func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { @@ -188,7 +188,7 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { tgts := testTritonSDRefresh(t, cnconf, dstr) require.NotNil(t, tgts) - require.Equal(t, 2, len(tgts)) + require.Len(t, tgts, 2) } func TestTritonSDRefreshCNsWithHostname(t *testing.T) { @@ -205,7 +205,7 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) { tgts := testTritonSDRefresh(t, cnconf, dstr) require.NotNil(t, tgts) - require.Equal(t, 2, len(tgts)) + require.Len(t, tgts, 2) } func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet { @@ -235,7 +235,7 @@ func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet tgs, err := td.refresh(context.Background()) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) diff --git a/discovery/uyuni/uyuni_test.go b/discovery/uyuni/uyuni_test.go index d045cde6d..9c910a3a3 100644 --- a/discovery/uyuni/uyuni_test.go +++ b/discovery/uyuni/uyuni_test.go @@ -55,7 +55,7 @@ func TestUyuniSDHandleError(t *testing.T) { tgs, err := testUpdateServices(respHandler) require.EqualError(t, err, errTesting) - require.Equal(t, len(tgs), 0) + require.Empty(t, tgs) } func TestUyuniSDLogin(t *testing.T) { @@ -87,7 +87,7 @@ func TestUyuniSDLogin(t *testing.T) { tgs, err := testUpdateServices(respHandler) require.EqualError(t, err, errTesting) - require.Equal(t, len(tgs), 0) + require.Empty(t, tgs) } func TestUyuniSDSkipLogin(t *testing.T) { @@ -119,5 +119,5 @@ func TestUyuniSDSkipLogin(t *testing.T) { tgs, err := md.refresh(context.Background()) require.EqualError(t, err, errTesting) - require.Equal(t, len(tgs), 0) + require.Empty(t, tgs) } diff --git a/discovery/vultr/vultr_test.go b/discovery/vultr/vultr_test.go index b72954153..0977238e0 100644 --- a/discovery/vultr/vultr_test.go +++ b/discovery/vultr/vultr_test.go @@ -56,12 +56,12 @@ func TestVultrSDRefresh(t *testing.T) { tgs, err := d.refresh(ctx) require.NoError(t, err) - require.Equal(t, 1, len(tgs)) + require.Len(t, tgs, 1) tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Equal(t, 3, len(tg.Targets)) + require.Len(t, tg.Targets, 3) for i, k := range []model.LabelSet{ { diff --git a/discovery/xds/client_test.go b/discovery/xds/client_test.go index ff5217359..b699995fb 100644 --- a/discovery/xds/client_test.go +++ b/discovery/xds/client_test.go @@ -53,14 +53,14 @@ func TestMakeXDSResourceHttpEndpointEmptyServerURLScheme(t *testing.T) { require.Empty(t, endpointURL) require.Error(t, err) - require.Equal(t, err.Error(), "invalid xDS server URL") + require.Equal(t, "invalid xDS server URL", err.Error()) } func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) { endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring") require.Empty(t, endpointURL) - require.NotNil(t, err) + require.Error(t, err) require.Contains(t, err.Error(), "must be either 'http' or 'https'") } @@ -68,7 +68,7 @@ func TestMakeXDSResourceHttpEndpoint(t *testing.T) { endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("http://127.0.0.1:5000"), "monitoring") require.NoError(t, err) - require.Equal(t, endpointURL.String(), "http://127.0.0.1:5000/v3/discovery:monitoring") + require.Equal(t, "http://127.0.0.1:5000/v3/discovery:monitoring", endpointURL.String()) } func TestCreateNewHTTPResourceClient(t *testing.T) { @@ -89,8 +89,8 @@ func TestCreateNewHTTPResourceClient(t *testing.T) { require.NoError(t, err) - require.Equal(t, client.endpoint, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1") - require.Equal(t, client.client.Timeout, 1*time.Minute) + require.Equal(t, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1", client.endpoint) + require.Equal(t, 1*time.Minute, client.client.Timeout) } func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion, responder discoveryResponder) (*HTTPResourceClient, func()) { @@ -138,7 +138,7 @@ func TestHTTPResourceClientFetchFullResponse(t *testing.T) { require.NotNil(t, res) require.Equal(t, client.ResourceTypeURL(), res.TypeUrl) - require.Len(t, res.Resources, 0) + require.Empty(t, res.Resources) require.Equal(t, "abc", client.latestNonce, "Nonce not cached") require.Equal(t, "1", client.latestVersion, "Version not cached") diff --git a/discovery/xds/kuma_test.go b/discovery/xds/kuma_test.go index 1db0a0831..581be9fb1 100644 --- a/discovery/xds/kuma_test.go +++ b/discovery/xds/kuma_test.go @@ -129,7 +129,7 @@ func TestKumaMadsV1ResourceParserInvalidTypeURL(t *testing.T) { func TestKumaMadsV1ResourceParserEmptySlice(t *testing.T) { resources := make([]*anypb.Any, 0) groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL) - require.Len(t, groups, 0) + require.Empty(t, groups) require.NoError(t, err) } diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 8a4b2e80f..dda88fccd 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -640,6 +640,7 @@ over time and return an instant vector with per-series aggregation results: * `quantile_over_time(scalar, range-vector)`: the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified interval. * `stddev_over_time(range-vector)`: the population standard deviation of the values in the specified interval. * `stdvar_over_time(range-vector)`: the population standard variance of the values in the specified interval. +* `mad_over_time(range-vector)`: the median absolute deviation of all points in the specified interval. * `last_over_time(range-vector)`: the most recent point value in the specified interval. * `present_over_time(range-vector)`: the value 1 for any series in the specified interval. diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 1014b0aad..d0fec0f13 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -1,26 +1,26 @@ module github.com/prometheus/prometheus/documentation/examples/remote_storage -go 1.18 +go 1.20 require ( - github.com/alecthomas/kingpin/v2 v2.3.2 + github.com/alecthomas/kingpin/v2 v2.4.0 github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.2 github.com/prometheus/client_golang v1.17.0 github.com/prometheus/common v0.45.0 - github.com/prometheus/prometheus v0.47.2 + github.com/prometheus/prometheus v0.48.0 github.com/stretchr/testify v1.8.4 ) require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/aws/aws-sdk-go v1.44.302 // indirect + github.com/aws/aws-sdk-go v1.45.25 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -29,14 +29,14 @@ require ( github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.3.1 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/compress v1.17.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect @@ -46,28 +46,28 @@ require ( github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect + github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 // indirect - go.opentelemetry.io/collector/semconv v0.81.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect - go.opentelemetry.io/otel v1.16.0 // indirect - go.opentelemetry.io/otel/metric v1.16.0 // indirect - go.opentelemetry.io/otel/trace v1.16.0 // indirect + go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 // indirect + go.opentelemetry.io/collector/semconv v0.87.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.14.0 // indirect - golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect + golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.12.0 // indirect + golang.org/x/oauth2 v0.13.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect - google.golang.org/grpc v1.56.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect + google.golang.org/grpc v1.58.3 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 64ced7423..a30c315b8 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,23 +1,17 @@ -github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= -github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0 h1:QM6sE5k2ZT/vI5BEe0r7mqjsUSnhVBFbOsVkEuaEfiA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 h1:bWh0Z2rOEDfB/ywv/l0iHN1JgyazE6kW/aIA89+CEK0= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= -github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -27,8 +21,8 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAu github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.302 h1:ST3ko6GrJKn3Xi+nAvxjG3uk/V1pW8KC52WLeIxqqNk= -github.com/aws/aws-sdk-go v1.44.302/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4= +github.com/aws/aws-sdk-go v1.45.25/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -44,10 +38,10 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= +github.com/digitalocean/godo v1.104.1 h1:SZNxjAsskM/su0YW9P8Wx3gU0W1Z13b6tZlYNpl5BnA= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/docker v24.0.4+incompatible h1:s/LVDftw9hjblvqIeTiGYXBCD95nOEEl7qRsRrIOuQI= +github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= @@ -82,8 +76,8 @@ github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= +github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -99,23 +93,23 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450WpPH+yvXo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/hashicorp/consul/api v1.22.0 h1:ydEvDooB/A0c/xpsBd8GSt7P2/zYPBui4KrNip0xGjE= +github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -125,13 +119,13 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e h1:sr4lujmn9heD030xx/Pd4B/JSmvRhFzuotNXaaV0WLs= +github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c h1:Nc3Mt2BAnq0/VoLEntF/nipX+K1S7pG+RgwiitSv6v0= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= -github.com/hetznercloud/hcloud-go/v2 v2.0.0 h1:Sg1DJ+MAKvbYAqaBaq9tPbwXBS2ckPIaMtVdUjKu+4g= +github.com/hetznercloud/hcloud-go/v2 v2.4.0 h1:MqlAE+w125PLvJRCpAJmEwrIxoVdUdOyuFUhE/Ukbok= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/influxdata/influxdb v1.11.2 h1:qOF3uQN1mDfJNEKwbAgJsqehf8IXgKok2vlGm736oGo= github.com/influxdata/influxdb v1.11.2/go.mod h1:eUMkLTE2vQwvSk6KGMrTBLKPaqSuczuelGbggigMPFw= -github.com/ionos-cloud/sdk-go/v6 v6.1.8 h1:493wE/BkZxJf7x79UCE0cYGPZoqQcPiEBALvt7uVGY0= +github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -148,8 +142,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g= +github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -162,14 +156,14 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linode/linodego v1.19.0 h1:n4WJrcr9+30e9JGZ6DI0nZbm5SdAj1kSwvvt/998YUw= +github.com/linode/linodego v1.23.0 h1:s0ReCZtuN9Z1IoUN9w1RLeYO1dMZUGPwOQ/IBFsBHtU= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= -github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= +github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -186,7 +180,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= +github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -205,8 +199,8 @@ github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+L github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= @@ -221,10 +215,10 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= -github.com/prometheus/prometheus v0.47.2 h1:jWcnuQHz1o1Wu3MZ6nMJDuTI0kU5yJp9pkxh8XEkNvI= -github.com/prometheus/prometheus v0.47.2/go.mod h1:J/bmOSjgH7lFxz2gZhrWEZs2i64vMS+HIuZfmYNhJ/M= +github.com/prometheus/prometheus v0.48.0 h1:yrBloImGQ7je4h8M10ujGh4R6oxYQJQKlMuETwNskGk= +github.com/prometheus/prometheus v0.48.0/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3pxSD+SC7+CISp7xY0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21 h1:yWfiTPwYxB0l5fGMhl/G+liULugVIHD9AU77iNLrURQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -244,18 +238,18 @@ github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtX github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4= -go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw= -go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8= -go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= -go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= -go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= -go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= -go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= -go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 h1:qCPXSQCoD3qeWFb1RuIks8fw9Atxpk78bmtVdi15KhE= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0016/go.mod h1:OdN0alYOlYhHXu6BDlGehrZWgtBuiDsz/rlNeJeXiNg= +go.opentelemetry.io/collector/semconv v0.87.0 h1:BsG1jdLLRCBRlvUujk4QA86af7r/ZXnizczQpEs/gg8= +go.opentelemetry.io/collector/semconv v0.87.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= @@ -268,12 +262,12 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -294,8 +288,8 @@ golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= -golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -303,7 +297,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -344,7 +338,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -353,12 +347,12 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw= -google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 h1:lCbbUxUDD+DiXx9Q6F/ttL0aAu7N2pz8XnmMm8ZW4NE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 h1:XUODHrpzJEUeWmVo/jfNTLj0YyVveOo28oE6vkFbkO4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= +google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a h1:myvhA4is3vrit1a6NZCWBIwN0kNEnX21DJOJX/NvIfI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -386,12 +380,12 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y= -k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM= -k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8= +k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= +k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= +k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 h1:OmK1d0WrkD3IPfkskvroRykOulHVHf0s0ZIFRjyt+UI= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= diff --git a/go.mod b/go.mod index 6cc2d0237..f785de723 100644 --- a/go.mod +++ b/go.mod @@ -3,47 +3,46 @@ module github.com/prometheus/prometheus go 1.20 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 - github.com/alecthomas/kingpin/v2 v2.3.2 - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aws/aws-sdk-go v1.45.25 + github.com/alecthomas/kingpin/v2 v2.4.0 + github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 + github.com/aws/aws-sdk-go v1.48.14 github.com/cespare/xxhash/v2 v2.2.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.104.1 - github.com/docker/docker v24.0.6+incompatible + github.com/digitalocean/godo v1.106.0 + github.com/docker/docker v24.0.7+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.11.1 github.com/envoyproxy/protoc-gen-validate v1.0.2 - github.com/fsnotify/fsnotify v1.6.0 + github.com/fsnotify/fsnotify v1.7.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 - github.com/go-openapi/strfmt v0.21.7 + github.com/go-openapi/strfmt v0.21.9 github.com/go-zookeeper/zk v1.0.3 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 - github.com/google/uuid v1.3.1 - github.com/gophercloud/gophercloud v1.7.0 + github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08 + github.com/google/uuid v1.4.0 + github.com/gophercloud/gophercloud v1.8.0 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.25.1 - github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c + github.com/hashicorp/consul/api v1.26.1 + github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c // Not upgrading this for now due to https://github.com/prometheus/prometheus/pull/13255#issuecomment-1845237409 github.com/hetznercloud/hcloud-go/v2 v2.4.0 - github.com/ionos-cloud/sdk-go/v6 v6.1.9 + github.com/ionos-cloud/sdk-go/v6 v6.1.10 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.2 + github.com/klauspost/compress v1.17.4 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.24.0 - github.com/miekg/dns v1.1.56 + github.com/linode/linodego v1.25.0 + github.com/miekg/dns v1.1.57 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 github.com/ovh/go-ovh v1.4.3 - github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.26.0 github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_model v0.5.0 @@ -55,61 +54,46 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.8.4 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/featuregate v0.77.0 - go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 - go.opentelemetry.io/collector/semconv v0.88.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 - go.opentelemetry.io/otel v1.19.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 - go.opentelemetry.io/otel/sdk v1.19.0 - go.opentelemetry.io/otel/trace v1.19.0 + go.opentelemetry.io/collector/featuregate v1.0.0 + go.opentelemetry.io/collector/pdata v1.0.0 + go.opentelemetry.io/collector/semconv v0.90.1 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 + go.opentelemetry.io/otel v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 + go.opentelemetry.io/otel/sdk v1.21.0 + go.opentelemetry.io/otel/trace v1.21.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 - go.uber.org/goleak v1.2.1 + go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 - golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa - golang.org/x/net v0.18.0 - golang.org/x/oauth2 v0.13.0 + golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb + golang.org/x/net v0.19.0 + golang.org/x/oauth2 v0.15.0 golang.org/x/sync v0.5.0 - golang.org/x/sys v0.14.0 - golang.org/x/time v0.3.0 - golang.org/x/tools v0.15.0 - google.golang.org/api v0.147.0 - google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a + golang.org/x/sys v0.15.0 + golang.org/x/time v0.5.0 + golang.org/x/tools v0.16.0 + google.golang.org/api v0.153.0 + google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.28.3 - k8s.io/apimachinery v0.28.3 - k8s.io/client-go v0.28.3 + k8s.io/api v0.28.4 + k8s.io/apimachinery v0.28.4 + k8s.io/client-go v0.28.4 k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.100.1 + k8s.io/klog/v2 v2.110.1 ) require ( - cloud.google.com/go/compute v1.23.0 // indirect + cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/golang-jwt/jwt/v5 v5.0.0 // indirect - github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect -) - -require ( github.com/Code-Hex/go-generics-cache v1.3.1 github.com/Microsoft/go-winio v0.6.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect @@ -117,6 +101,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect @@ -124,10 +109,10 @@ require ( github.com/emicklei/go-restful/v3 v3.10.2 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/fatih/color v1.15.0 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-kit/kit v0.12.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.4 // indirect @@ -138,22 +123,28 @@ require ( github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/validate v0.22.1 // indirect github.com/go-resty/resty/v2 v2.10.0 // indirect + github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.4 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect github.com/imdario/mergo v0.3.16 // indirect @@ -161,6 +152,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect @@ -173,18 +165,24 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/procfs v0.11.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - go.mongodb.org/mongo-driver v1.12.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/xhit/go-str2duration/v2 v2.1.0 // indirect + go.mongodb.org/mongo-driver v1.13.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect - golang.org/x/crypto v0.15.0 // indirect + golang.org/x/crypto v0.16.0 // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/term v0.14.0 // indirect + golang.org/x/term v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect diff --git a/go.sum b/go.sum index 01804516b..d29694fb9 100644 --- a/go.sum +++ b/go.sum @@ -18,8 +18,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -34,12 +34,12 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1/go.mod h1:oGV6NlB0cvi1ZbYRR2UN44QHxWFyGk+iylgD0qaMXjA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= @@ -66,15 +66,15 @@ github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWX github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= -github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -91,8 +91,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4= -github.com/aws/aws-sdk-go v1.45.25/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.48.14 h1:nVLrp+F84SG+xGiFMfe1TE6ZV6smF+42tuuNgYGV30s= +github.com/aws/aws-sdk-go v1.48.14/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -135,13 +135,13 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/digitalocean/godo v1.104.1 h1:SZNxjAsskM/su0YW9P8Wx3gU0W1Z13b6tZlYNpl5BnA= -github.com/digitalocean/godo v1.104.1/go.mod h1:VAI/L5YDzMuPRU01lEEUSQ/sp5Z//1HnnFv/RBTEdbg= +github.com/digitalocean/godo v1.106.0 h1:m5iErwl3xHovGFlawd50n54ntgXHt1BLsvU6BXsVxEU= +github.com/digitalocean/godo v1.106.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= -github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -171,13 +171,13 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -197,8 +197,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= @@ -228,8 +228,8 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6 github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= -github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/strfmt v0.21.9 h1:LnEGOO9qyEC1v22Bzr323M98G13paIUGPU7yeJtG9Xs= +github.com/go-openapi/strfmt v0.21.9/go.mod h1:0k3v301mglEaZRJdDDGSlN6Npq4VMVU69DE0LUyf7uA= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -348,24 +348,24 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08 h1:PxlBVtIFHR/mtWk2i0gTEdCz+jBnqiuHNSki0epDbVs= +github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs= -github.com/gophercloud/gophercloud v1.7.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= +github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -383,10 +383,10 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= -github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g= +github.com/hashicorp/consul/api v1.26.1 h1:5oSXOO5fboPZeW5SN+TdGFP/BILDgBm19OrPZ/pICIM= +github.com/hashicorp/consul/api v1.26.1/go.mod h1:B4sQTeaSO16NtynqrAdwOlahJ7IUDZM9cj2420xYL8A= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= +github.com/hashicorp/consul/sdk v0.15.0 h1:2qK9nDrr4tiJKRoxPGhm6B7xJjLVIQqkjiab2M4aKjU= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -422,6 +422,7 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -447,8 +448,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4= -github.com/ionos-cloud/sdk-go/v6 v6.1.9/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/ionos-cloud/sdk-go/v6 v6.1.10 h1:3815Q2Hw/wc4cJ8wD7bwfsmDsdfIEp80B7BQMj0YP2w= +github.com/ionos-cloud/sdk-go/v6 v6.1.10/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -481,8 +482,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -500,8 +501,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.24.0 h1:zO+bMdTE6wPccqP7QIkbxAfACX7DjSX6DW9JE/qOKDQ= -github.com/linode/linodego v1.24.0/go.mod h1:cq/ty5BCEQnsO6OjMqD7Q03KCCyB8CNM5E3MNg0LV6M= +github.com/linode/linodego v1.25.0 h1:zYMz0lTasD503jBu3tSRhzEmXHQN1zptCw5o71ibyyU= +github.com/linode/linodego v1.25.0/go.mod h1:BMZI0pMM/YGjBis7pIXDPbcgYfCZLH0/UvzqtsGtG1c= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -534,8 +535,8 @@ github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04 github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= -github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -751,8 +752,8 @@ go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mI go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE= -go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0= +go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= +go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -762,28 +763,28 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/featuregate v0.77.0 h1:m1/IzaXoQh6SgF6CM80vrBOCf5zSJ2GVISfA27fYzGU= -go.opentelemetry.io/collector/featuregate v0.77.0/go.mod h1:/kVAsGUCyJXIDSgHftCN63QiwAEVHRLX2Kh/S+dqgHY= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 h1:AgALhc2VenoA5l1DvTdg7mkzaBGqoTSuMkAtjsttBFo= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0017/go.mod h1:Rv9fOclA5AtM/JGm0d4jBOIAo1+jBA13UT5Bx0ovXi4= -go.opentelemetry.io/collector/semconv v0.88.0 h1:8TVP4hYaUC87S6CCLKNoSxsUE0ChldE4vqotvNHHUnE= -go.opentelemetry.io/collector/semconv v0.88.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= -go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/collector/featuregate v1.0.0 h1:5MGqe2v5zxaoo73BUOvUTunftX5J8RGrbFsC2Ha7N3g= +go.opentelemetry.io/collector/featuregate v1.0.0/go.mod h1:xGbRuw+GbutRtVVSEy3YR2yuOlEyiUMhN2M9DJljgqY= +go.opentelemetry.io/collector/pdata v1.0.0 h1:ECP2jnLztewsHmL1opL8BeMtWVc7/oSlKNhfY9jP8ec= +go.opentelemetry.io/collector/pdata v1.0.0/go.mod h1:TsDFgs4JLNG7t6x9D8kGswXUz4mme+MyNChHx8zSF6k= +go.opentelemetry.io/collector/semconv v0.90.1 h1:2fkQZbefQBbIcNb9Rk1mRcWlFZgQOk7CpST1e1BK8eg= +go.opentelemetry.io/collector/semconv v0.90.1/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -792,8 +793,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -816,8 +817,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -828,8 +829,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8= +golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -896,20 +897,19 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -989,22 +989,19 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= -golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1014,7 +1011,6 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= @@ -1024,8 +1020,9 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1080,8 +1077,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= -golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1101,8 +1098,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.147.0 h1:Can3FaQo9LlVqxJCodNmeZW/ib3/qKAY3rFeXiHo5gc= -google.golang.org/api v0.147.0/go.mod h1:pQ/9j83DcmPd/5C9e2nFOdjjNkDZ1G+zkbK2uvdkJMs= +google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4= +google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1141,12 +1138,12 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= -google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= -google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a h1:myvhA4is3vrit1a6NZCWBIwN0kNEnX21DJOJX/NvIfI= -google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:SUBoKXbI1Efip18FClrQVGjWcyd0QZd8KkvdP34t7ww= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 h1:ZcOkrmX74HbKFYnpPY8Qsw93fC29TbJXspYKaBkSXDQ= +google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1226,12 +1223,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= -k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= -k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= -k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= -k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= -k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= +k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= +k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= +k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8= +k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg= +k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY= +k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index cf20b5080..64c7b797a 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -231,11 +231,8 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { // resulting histogram might have buckets with a population of zero or directly // adjacent spans (offset=0). To normalize those, call the Compact method. // -// The method reconciles differences in the zero threshold and in the schema, -// but the schema of the other histogram must be ≥ the schema of the receiving -// histogram (i.e. must have an equal or higher resolution). This means that the -// schema of the receiving histogram won't change. Its zero threshold, however, -// will change if needed. The other histogram will not be modified in any case. +// The method reconciles differences in the zero threshold and in the schema, and +// changes them if needed. The other histogram will not be modified in any case. // // This method returns a pointer to the receiving histogram for convenience. func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { @@ -269,21 +266,30 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { h.Sum += other.Sum var ( + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets + otherPositiveSpans = other.PositiveSpans otherPositiveBuckets = other.PositiveBuckets otherNegativeSpans = other.NegativeSpans otherNegativeBuckets = other.NegativeBuckets ) - if other.Schema < h.Schema { - panic(fmt.Errorf("cannot add histogram with schema %d to %d", other.Schema, h.Schema)) - } else if other.Schema > h.Schema { + switch { + case other.Schema < h.Schema: + hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true) + hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true) + h.Schema = other.Schema + + case other.Schema > h.Schema: otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false) otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false) } - h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) - h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -296,21 +302,29 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { h.Sum -= other.Sum var ( + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets + otherPositiveSpans = other.PositiveSpans otherPositiveBuckets = other.PositiveBuckets otherNegativeSpans = other.NegativeSpans otherNegativeBuckets = other.NegativeBuckets ) - if other.Schema < h.Schema { - panic(fmt.Errorf("cannot subtract histogram with schema %d to %d", other.Schema, h.Schema)) - } else if other.Schema > h.Schema { + switch { + case other.Schema < h.Schema: + hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true) + hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true) + h.Schema = other.Schema + case other.Schema > h.Schema: otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false) otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false) } - h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) - h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) return h } @@ -765,8 +779,9 @@ func (h *FloatHistogram) floatBucketIterator( schema: h.Schema, positive: positive, }, - targetSchema: targetSchema, - absoluteStartValue: absoluteStartValue, + targetSchema: targetSchema, + absoluteStartValue: absoluteStartValue, + boundReachedStartValue: absoluteStartValue == 0, } if positive { i.spans = h.PositiveSpans @@ -824,55 +839,83 @@ func (i *floatBucketIterator) Next() bool { return false } - // Copy all of these into local variables so that we can forward to the - // next bucket and then roll back if needed. - origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan - span := i.spans[spansIdx] - firstPass := true - i.currCount = 0 - -mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. - for { + if i.schema == i.targetSchema { + // Fast path for the common case. + span := i.spans[i.spansIdx] if i.bucketsIdx == 0 { // Seed origIdx for the first bucket. - origIdx = span.Offset + i.currIdx = span.Offset } else { - origIdx++ + i.currIdx++ } - for idxInSpan >= span.Length { + + for i.idxInSpan >= span.Length { // We have exhausted the current span and have to find a new // one. We even handle pathologic spans of length 0 here. - idxInSpan = 0 - spansIdx++ - if spansIdx >= len(i.spans) { - if firstPass { - return false + i.idxInSpan = 0 + i.spansIdx++ + if i.spansIdx >= len(i.spans) { + return false + } + span = i.spans[i.spansIdx] + i.currIdx += span.Offset + } + + i.currCount = i.buckets[i.bucketsIdx] + i.idxInSpan++ + i.bucketsIdx++ + } else { + // Copy all of these into local variables so that we can forward to the + // next bucket and then roll back if needed. + origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan + span := i.spans[spansIdx] + firstPass := true + i.currCount = 0 + + mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. + for { + if i.bucketsIdx == 0 { + // Seed origIdx for the first bucket. + origIdx = span.Offset + } else { + origIdx++ + } + for idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We even handle pathologic spans of length 0 here. + idxInSpan = 0 + spansIdx++ + if spansIdx >= len(i.spans) { + if firstPass { + return false + } + break mergeLoop } + span = i.spans[spansIdx] + origIdx += span.Offset + } + currIdx := targetIdx(origIdx, i.schema, i.targetSchema) + switch { + case firstPass: + i.currIdx = currIdx + firstPass = false + case currIdx != i.currIdx: + // Reached next bucket in targetSchema. + // Do not actually forward to the next bucket, but break out. + break mergeLoop + } + i.currCount += i.buckets[i.bucketsIdx] + idxInSpan++ + i.bucketsIdx++ + i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan + if i.schema == i.targetSchema { + // Don't need to test the next bucket for mergeability + // if we have no schema change anyway. break mergeLoop } - span = i.spans[spansIdx] - origIdx += span.Offset - } - currIdx := i.targetIdx(origIdx) - switch { - case firstPass: - i.currIdx = currIdx - firstPass = false - case currIdx != i.currIdx: - // Reached next bucket in targetSchema. - // Do not actually forward to the next bucket, but break out. - break mergeLoop - } - i.currCount += i.buckets[i.bucketsIdx] - idxInSpan++ - i.bucketsIdx++ - i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan - if i.schema == i.targetSchema { - // Don't need to test the next bucket for mergeability - // if we have no schema change anyway. - break mergeLoop } } + // Skip buckets before absoluteStartValue. // TODO(beorn7): Maybe do something more efficient than this recursive call. if !i.boundReachedStartValue && getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue { @@ -882,17 +925,6 @@ mergeLoop: // Merge together all buckets from the original schema that fall into return true } -// targetIdx returns the bucket index within i.targetSchema for the given bucket -// index within i.schema. -func (i *floatBucketIterator) targetIdx(idx int32) int32 { - if i.schema == i.targetSchema { - // Fast path for the common case. The below would yield the same - // result, just with more effort. - return idx - } - return ((idx - 1) >> (i.schema - i.targetSchema)) + 1 -} - type reverseFloatBucketIterator struct { baseBucketIterator[float64, float64] idxInSpan int32 // Changed from uint32 to allow negative values for exhaustion detection. diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index b93a6d90d..3d20960f6 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -1242,7 +1242,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 1.234, PositiveSpans: []Span{{0, 2}, {3, 3}}, PositiveBuckets: []float64{5, 4, 2, 3, 6}, - NegativeSpans: []Span{{-9, 2}, {3, 2}}, + NegativeSpans: []Span{{-6, 2}, {1, 2}}, NegativeBuckets: []float64{1, 1, 4, 4}, }, &FloatHistogram{ @@ -1262,7 +1262,7 @@ func TestFloatHistogramAdd(t *testing.T) { Sum: 3.579, PositiveSpans: []Span{{-2, 2}, {0, 5}, {0, 3}}, PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, - NegativeSpans: []Span{{-9, 2}, {3, 2}, {5, 2}, {3, 2}}, + NegativeSpans: []Span{{-6, 2}, {1, 2}, {4, 2}, {3, 2}}, NegativeBuckets: []float64{1, 1, 4, 4, 3, 1, 5, 6}, }, }, @@ -1573,16 +1573,33 @@ func TestFloatHistogramAdd(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - in2Copy := c.in2.Copy() - require.Equal(t, c.expected, c.in1.Add(c.in2)) - // Has it also happened in-place? - require.Equal(t, c.expected, c.in1) - // Check that the argument was not mutated. - require.Equal(t, in2Copy, c.in2) + testHistogramAdd(t, c.in1, c.in2, c.expected) + testHistogramAdd(t, c.in2, c.in1, c.expected) }) } } +func testHistogramAdd(t *testing.T, a, b, expected *FloatHistogram) { + var ( + aCopy = a.Copy() + bCopy = b.Copy() + expectedCopy = expected.Copy() + ) + + res := aCopy.Add(bCopy) + + res.Compact(0) + expectedCopy.Compact(0) + + require.Equal(t, expectedCopy, res) + + // Has it also happened in-place? + require.Equal(t, expectedCopy, aCopy) + + // Check that the argument was not mutated. + require.Equal(t, b, bCopy) +} + func TestFloatHistogramSub(t *testing.T) { // This has fewer test cases than TestFloatHistogramAdd because Add and // Sub share most of the trickier code. @@ -1662,16 +1679,35 @@ func TestFloatHistogramSub(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - in2Copy := c.in2.Copy() - require.Equal(t, c.expected, c.in1.Sub(c.in2)) - // Has it also happened in-place? - require.Equal(t, c.expected, c.in1) - // Check that the argument was not mutated. - require.Equal(t, in2Copy, c.in2) + testFloatHistogramSub(t, c.in1, c.in2, c.expected) + + expectedNegative := c.expected.Copy().Mul(-1) + testFloatHistogramSub(t, c.in2, c.in1, expectedNegative) }) } } +func testFloatHistogramSub(t *testing.T, a, b, expected *FloatHistogram) { + var ( + aCopy = a.Copy() + bCopy = b.Copy() + expectedCopy = expected.Copy() + ) + + res := aCopy.Sub(bCopy) + + res.Compact(0) + expectedCopy.Compact(0) + + require.Equal(t, expectedCopy, res) + + // Has it also happened in-place? + require.Equal(t, expectedCopy, aCopy) + + // Check that the argument was not mutated. + require.Equal(t, b, bCopy) +} + func TestFloatHistogramCopyToSchema(t *testing.T) { cases := []struct { name string @@ -1782,8 +1818,8 @@ func TestReverseFloatBucketIterator(t *testing.T) { for it.Next() { actBuckets = append(actBuckets, it.At()) } - require.Greater(t, len(expBuckets), 0) - require.Greater(t, len(actBuckets), 0) + require.NotEmpty(t, expBuckets) + require.NotEmpty(t, actBuckets) require.Equal(t, expBuckets, actBuckets) // Negative buckets. @@ -1798,8 +1834,8 @@ func TestReverseFloatBucketIterator(t *testing.T) { for it.Next() { actBuckets = append(actBuckets, it.At()) } - require.Greater(t, len(expBuckets), 0) - require.Greater(t, len(actBuckets), 0) + require.NotEmpty(t, expBuckets) + require.NotEmpty(t, actBuckets) require.Equal(t, expBuckets, actBuckets) } diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go index fb0185a63..f4d292b34 100644 --- a/model/histogram/histogram.go +++ b/model/histogram/histogram.go @@ -282,50 +282,49 @@ func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram { return h } -// ToFloat returns a FloatHistogram representation of the Histogram. It is a -// deep copy (e.g. spans are not shared). -func (h *Histogram) ToFloat() *FloatHistogram { - var ( - positiveSpans, negativeSpans []Span - positiveBuckets, negativeBuckets []float64 - ) - if len(h.PositiveSpans) != 0 { - positiveSpans = make([]Span, len(h.PositiveSpans)) - copy(positiveSpans, h.PositiveSpans) +// ToFloat returns a FloatHistogram representation of the Histogram. It is a deep +// copy (e.g. spans are not shared). The function accepts a FloatHistogram as an +// argument whose memory will be reused and overwritten if provided. If this +// argument is nil, a new FloatHistogram will be allocated. +func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram { + if fh == nil { + fh = &FloatHistogram{} } - if len(h.NegativeSpans) != 0 { - negativeSpans = make([]Span, len(h.NegativeSpans)) - copy(negativeSpans, h.NegativeSpans) - } - if len(h.PositiveBuckets) != 0 { - positiveBuckets = make([]float64, len(h.PositiveBuckets)) - var current float64 - for i, b := range h.PositiveBuckets { - current += float64(b) - positiveBuckets[i] = current - } - } - if len(h.NegativeBuckets) != 0 { - negativeBuckets = make([]float64, len(h.NegativeBuckets)) - var current float64 - for i, b := range h.NegativeBuckets { - current += float64(b) - negativeBuckets[i] = current - } + fh.CounterResetHint = h.CounterResetHint + fh.Schema = h.Schema + fh.ZeroThreshold = h.ZeroThreshold + fh.ZeroCount = float64(h.ZeroCount) + fh.Count = float64(h.Count) + fh.Sum = h.Sum + + fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans)) + copy(fh.PositiveSpans, h.PositiveSpans) + + fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans)) + copy(fh.NegativeSpans, h.NegativeSpans) + + fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets)) + var currentPositive float64 + for i, b := range h.PositiveBuckets { + currentPositive += float64(b) + fh.PositiveBuckets[i] = currentPositive } - return &FloatHistogram{ - CounterResetHint: h.CounterResetHint, - Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: float64(h.ZeroCount), - Count: float64(h.Count), - Sum: h.Sum, - PositiveSpans: positiveSpans, - NegativeSpans: negativeSpans, - PositiveBuckets: positiveBuckets, - NegativeBuckets: negativeBuckets, + fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets)) + var currentNegative float64 + for i, b := range h.NegativeBuckets { + currentNegative += float64(b) + fh.NegativeBuckets[i] = currentNegative } + + return fh +} + +func resize[T any](items []T, n int) []T { + if cap(items) < n { + return make([]T, n) + } + return items[:n] } // Validate validates consistency between span and bucket slices. Also, buckets are checked diff --git a/model/histogram/histogram_test.go b/model/histogram/histogram_test.go index d5aed112a..9a64faaaa 100644 --- a/model/histogram/histogram_test.go +++ b/model/histogram/histogram_test.go @@ -408,9 +408,57 @@ func TestHistogramToFloat(t *testing.T) { }, NegativeBuckets: []int64{1, 2, -2, 1, -1, 0}, } - fh := h.ToFloat() + cases := []struct { + name string + fh *FloatHistogram + }{ + {name: "without prior float histogram"}, + {name: "prior float histogram with more buckets", fh: &FloatHistogram{ + Schema: 2, + Count: 3, + Sum: 5, + ZeroThreshold: 4, + ZeroCount: 1, + PositiveSpans: []Span{ + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + NegativeSpans: []Span{ + {Offset: 20, Length: 6}, + {Offset: 12, Length: 7}, + {Offset: 33, Length: 10}, + }, + NegativeBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}, + }}, + {name: "prior float histogram with fewer buckets", fh: &FloatHistogram{ + Schema: 2, + Count: 3, + Sum: 5, + ZeroThreshold: 4, + ZeroCount: 1, + PositiveSpans: []Span{ + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2}, + NegativeSpans: []Span{ + {Offset: 20, Length: 6}, + {Offset: 12, Length: 7}, + {Offset: 33, Length: 10}, + }, + NegativeBuckets: []float64{1, 2}, + }}, + } - require.Equal(t, h.String(), fh.String()) + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + fh := h.ToFloat(c.fh) + require.Equal(t, h.String(), fh.String()) + }) + } } // TestHistogramEquals tests both Histogram and FloatHistogram. @@ -436,14 +484,14 @@ func TestHistogramEquals(t *testing.T) { equals := func(h1, h2 Histogram) { require.True(t, h1.Equals(&h2)) require.True(t, h2.Equals(&h1)) - h1f, h2f := h1.ToFloat(), h2.ToFloat() + h1f, h2f := h1.ToFloat(nil), h2.ToFloat(nil) require.True(t, h1f.Equals(h2f)) require.True(t, h2f.Equals(h1f)) } notEquals := func(h1, h2 Histogram) { require.False(t, h1.Equals(&h2)) require.False(t, h2.Equals(&h1)) - h1f, h2f := h1.ToFloat(), h2.ToFloat() + h1f, h2f := h1.ToFloat(nil), h2.ToFloat(nil) require.False(t, h1f.Equals(h2f)) require.False(t, h2f.Equals(h1f)) } @@ -950,7 +998,7 @@ func TestHistogramValidation(t *testing.T) { return } - fh := tc.h.ToFloat() + fh := tc.h.ToFloat(nil) if err := fh.Validate(); tc.errMsg != "" { require.EqualError(t, err, tc.errMsg) } else { diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go index d6499538e..ef5008f4b 100644 --- a/model/rulefmt/rulefmt_test.go +++ b/model/rulefmt/rulefmt_test.go @@ -193,10 +193,10 @@ groups: _, errs := Parse([]byte(group)) require.Len(t, errs, 2, "Expected two errors") var err00 *Error - require.True(t, errors.As(errs[0], &err00)) + require.ErrorAs(t, errs[0], &err00) err0 := err00.Err.node var err01 *Error - require.True(t, errors.As(errs[1], &err01)) + require.ErrorAs(t, errs[1], &err01) err1 := err01.Err.node require.NotEqual(t, err0, err1, "Error nodes should not be the same") } diff --git a/model/textparse/interface.go b/model/textparse/interface.go index 2f5fdbc3b..df4259c85 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -16,8 +16,6 @@ package textparse import ( "mime" - "github.com/gogo/protobuf/types" - "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -66,10 +64,10 @@ type Parser interface { // retrieved (including the case where no exemplars exist at all). Exemplar(l *exemplar.Exemplar) bool - // CreatedTimestamp writes the created timestamp of the current sample - // into the passed timestamp. It returns false if no created timestamp - // exists or if the metric type does not support created timestamps. - CreatedTimestamp(ct *types.Timestamp) bool + // CreatedTimestamp returns the created timestamp (in milliseconds) for the + // current sample. It returns nil if it is unknown e.g. if it wasn't set, + // if the scrape protocol or metric type does not support created timestamps. + CreatedTimestamp() *int64 // Next advances the parser to the next sample. It returns false if no // more samples were read or an error occurred. diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index bb5075544..f0c383723 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -24,8 +24,6 @@ import ( "strings" "unicode/utf8" - "github.com/gogo/protobuf/types" - "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -213,9 +211,10 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { return true } -// CreatedTimestamp returns false because OpenMetricsParser does not support created timestamps (yet). -func (p *OpenMetricsParser) CreatedTimestamp(_ *types.Timestamp) bool { - return false +// CreatedTimestamp returns nil as it's not implemented yet. +// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 +func (p *OpenMetricsParser) CreatedTimestamp() *int64 { + return nil } // nextToken returns the next token from the openMetricsLexer. diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index d65e4977e..eed30364c 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -269,9 +269,9 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` require.Equal(t, exp[i].v, v) require.Equal(t, exp[i].lset, res) if exp[i].e == nil { - require.Equal(t, false, found) + require.False(t, found) } else { - require.Equal(t, true, found) + require.True(t, found) require.Equal(t, *exp[i].e, e) } @@ -296,7 +296,7 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` i++ } - require.Equal(t, len(exp), i) + require.Len(t, exp, i) } func TestOpenMetricsParseErrors(t *testing.T) { diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index b3fa2d8a6..935801fb9 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -26,8 +26,6 @@ import ( "unicode/utf8" "unsafe" - "github.com/gogo/protobuf/types" - "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -247,9 +245,10 @@ func (p *PromParser) Exemplar(*exemplar.Exemplar) bool { return false } -// CreatedTimestamp returns false because PromParser does not support created timestamps. -func (p *PromParser) CreatedTimestamp(_ *types.Timestamp) bool { - return false +// CreatedTimestamp returns nil as it's not implemented yet. +// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 +func (p *PromParser) CreatedTimestamp() *int64 { + return nil } // nextToken returns the next token from the promlexer. It skips over tabs diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 280f39b4f..ac79a1394 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -209,7 +209,7 @@ testmetric{label="\"bar\""} 1` i++ } - require.Equal(t, len(exp), i) + require.Len(t, exp, i) } func TestPromParseErrors(t *testing.T) { diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index 23afb5c59..baede7e1d 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -360,22 +360,26 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { return true } -func (p *ProtobufParser) CreatedTimestamp(ct *types.Timestamp) bool { - var foundCT *types.Timestamp +// CreatedTimestamp returns CT or nil if CT is not present or +// invalid (as timestamp e.g. negative value) on counters, summaries or histograms. +func (p *ProtobufParser) CreatedTimestamp() *int64 { + var ct *types.Timestamp switch p.mf.GetType() { case dto.MetricType_COUNTER: - foundCT = p.mf.GetMetric()[p.metricPos].GetCounter().GetCreatedTimestamp() + ct = p.mf.GetMetric()[p.metricPos].GetCounter().GetCreatedTimestamp() case dto.MetricType_SUMMARY: - foundCT = p.mf.GetMetric()[p.metricPos].GetSummary().GetCreatedTimestamp() + ct = p.mf.GetMetric()[p.metricPos].GetSummary().GetCreatedTimestamp() case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: - foundCT = p.mf.GetMetric()[p.metricPos].GetHistogram().GetCreatedTimestamp() + ct = p.mf.GetMetric()[p.metricPos].GetHistogram().GetCreatedTimestamp() default: } - if foundCT == nil { - return false + ctAsTime, err := types.TimestampFromProto(ct) + if err != nil { + // Errors means ct == nil or invalid timestamp, which we silently ignore. + return nil } - *ct = *foundCT - return true + ctMilis := ctAsTime.UnixMilli() + return &ctMilis } // Next advances the parser to the next "sample" (emulating the behavior of a diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index d83f2088a..c5b672dbc 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -21,7 +21,6 @@ import ( "testing" "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/exemplar" @@ -630,7 +629,7 @@ func TestProtobufParse(t *testing.T) { shs *histogram.Histogram fhs *histogram.FloatHistogram e []exemplar.Exemplar - ct *types.Timestamp + ct int64 } inputBuf := createTestProtoBuf(t) @@ -1069,7 +1068,7 @@ func TestProtobufParse(t *testing.T) { { m: "test_counter_with_createdtimestamp", v: 42, - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, lset: labels.FromStrings( "__name__", "test_counter_with_createdtimestamp", ), @@ -1085,7 +1084,7 @@ func TestProtobufParse(t *testing.T) { { m: "test_summary_with_createdtimestamp_count", v: 42, - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_count", ), @@ -1093,7 +1092,7 @@ func TestProtobufParse(t *testing.T) { { m: "test_summary_with_createdtimestamp_sum", v: 1.234, - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_sum", ), @@ -1108,7 +1107,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_histogram_with_createdtimestamp", - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, PositiveSpans: []histogram.Span{}, @@ -1128,7 +1127,7 @@ func TestProtobufParse(t *testing.T) { }, { m: "test_gaugehistogram_with_createdtimestamp", - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, PositiveSpans: []histogram.Span{}, @@ -1887,7 +1886,7 @@ func TestProtobufParse(t *testing.T) { { // 83 m: "test_counter_with_createdtimestamp", v: 42, - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, lset: labels.FromStrings( "__name__", "test_counter_with_createdtimestamp", ), @@ -1903,7 +1902,7 @@ func TestProtobufParse(t *testing.T) { { // 86 m: "test_summary_with_createdtimestamp_count", v: 42, - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_count", ), @@ -1911,7 +1910,7 @@ func TestProtobufParse(t *testing.T) { { // 87 m: "test_summary_with_createdtimestamp_sum", v: 1.234, - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, lset: labels.FromStrings( "__name__", "test_summary_with_createdtimestamp_sum", ), @@ -1926,7 +1925,7 @@ func TestProtobufParse(t *testing.T) { }, { // 90 m: "test_histogram_with_createdtimestamp", - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, PositiveSpans: []histogram.Span{}, @@ -1946,7 +1945,7 @@ func TestProtobufParse(t *testing.T) { }, { // 93 m: "test_gaugehistogram_with_createdtimestamp", - ct: &types.Timestamp{Seconds: 1, Nanos: 1}, + ct: 1000, shs: &histogram.Histogram{ CounterResetHint: histogram.GaugeType, PositiveSpans: []histogram.Span{}, @@ -1981,30 +1980,29 @@ func TestProtobufParse(t *testing.T) { m, ts, v := p.Series() var e exemplar.Exemplar - var ct types.Timestamp p.Metric(&res) eFound := p.Exemplar(&e) - ctFound := p.CreatedTimestamp(&ct) + ct := p.CreatedTimestamp() require.Equal(t, exp[i].m, string(m), "i: %d", i) if ts != nil { require.Equal(t, exp[i].t, *ts, "i: %d", i) } else { - require.Equal(t, exp[i].t, int64(0), "i: %d", i) + require.Equal(t, int64(0), exp[i].t, "i: %d", i) } require.Equal(t, exp[i].v, v, "i: %d", i) require.Equal(t, exp[i].lset, res, "i: %d", i) if len(exp[i].e) == 0 { - require.Equal(t, false, eFound, "i: %d", i) + require.False(t, eFound, "i: %d", i) } else { - require.Equal(t, true, eFound, "i: %d", i) + require.True(t, eFound, "i: %d", i) require.Equal(t, exp[i].e[0], e, "i: %d", i) require.False(t, p.Exemplar(&e), "too many exemplars returned, i: %d", i) } - if exp[i].ct != nil { - require.Equal(t, true, ctFound, "i: %d", i) - require.Equal(t, exp[i].ct.String(), ct.String(), "i: %d", i) + if exp[i].ct != 0 { + require.NotNilf(t, ct, "i: %d", i) + require.Equal(t, exp[i].ct, *ct, "i: %d", i) } else { - require.Equal(t, false, ctFound, "i: %d", i) + require.Nilf(t, ct, "i: %d", i) } case EntryHistogram: @@ -2014,7 +2012,7 @@ func TestProtobufParse(t *testing.T) { if ts != nil { require.Equal(t, exp[i].t, *ts, "i: %d", i) } else { - require.Equal(t, exp[i].t, int64(0), "i: %d", i) + require.Equal(t, int64(0), exp[i].t, "i: %d", i) } require.Equal(t, exp[i].lset, res, "i: %d", i) require.Equal(t, exp[i].m, string(m), "i: %d", i) @@ -2028,7 +2026,7 @@ func TestProtobufParse(t *testing.T) { require.Equal(t, exp[i].e[j], e, "i: %d", i) e = exemplar.Exemplar{} } - require.Equal(t, len(exp[i].e), j, "not enough exemplars found, i: %d", i) + require.Len(t, exp[i].e, j, "not enough exemplars found, i: %d", i) case EntryType: m, typ := p.Type() @@ -2051,7 +2049,7 @@ func TestProtobufParse(t *testing.T) { i++ } - require.Equal(t, len(exp), i) + require.Len(t, exp, i) }) } } diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index 66ee45c6e..837ec66bd 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -83,7 +83,7 @@ func TestHandlerNextBatch(t *testing.T) { require.NoError(t, alertsEqual(expected[0:maxBatchSize], h.nextBatch())) require.NoError(t, alertsEqual(expected[maxBatchSize:2*maxBatchSize], h.nextBatch())) require.NoError(t, alertsEqual(expected[2*maxBatchSize:], h.nextBatch())) - require.Equal(t, 0, len(h.queue), "Expected queue to be empty but got %d alerts", len(h.queue)) + require.Empty(t, h.queue, "Expected queue to be empty but got %d alerts", len(h.queue)) } func alertsEqual(a, b []*Alert) error { @@ -482,7 +482,7 @@ alerting: ` err := yaml.UnmarshalStrict([]byte(s), cfg) require.NoError(t, err, "Unable to load YAML config.") - require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs)) + require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 1) err = n.ApplyConfig(cfg) require.NoError(t, err, "Error applying the config.") @@ -533,7 +533,7 @@ alerting: ` err := yaml.UnmarshalStrict([]byte(s), cfg) require.NoError(t, err, "Unable to load YAML config.") - require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs)) + require.Len(t, cfg.AlertingConfig.AlertmanagerConfigs, 1) err = n.ApplyConfig(cfg) require.NoError(t, err, "Error applying the config.") diff --git a/prompb/io/prometheus/client/metrics.pb.go b/prompb/io/prometheus/client/metrics.pb.go index e6623e9e1..702ee62fc 100644 --- a/prompb/io/prometheus/client/metrics.pb.go +++ b/prompb/io/prometheus/client/metrics.pb.go @@ -878,6 +878,7 @@ type MetricFamily struct { Help string `protobuf:"bytes,2,opt,name=help,proto3" json:"help,omitempty"` Type MetricType `protobuf:"varint,3,opt,name=type,proto3,enum=io.prometheus.client.MetricType" json:"type,omitempty"` Metric []Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric"` + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -944,6 +945,13 @@ func (m *MetricFamily) GetMetric() []Metric { return nil } +func (m *MetricFamily) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + func init() { proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") @@ -965,67 +973,68 @@ func init() { } var fileDescriptor_d1e5ddb18987a258 = []byte{ - // 960 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x6e, 0x1b, 0x45, - 0x14, 0xee, 0xd6, 0xbf, 0x7b, 0x1c, 0x27, 0x9b, 0xc1, 0xaa, 0x56, 0x81, 0xc4, 0x66, 0x25, 0xa4, - 0x80, 0x90, 0x2d, 0xa0, 0x08, 0x54, 0x8a, 0x44, 0xd2, 0xa6, 0x2e, 0x2a, 0x6e, 0xcb, 0xd8, 0xbe, - 0x28, 0x37, 0xab, 0xb1, 0x3d, 0x59, 0xaf, 0xd8, 0xdd, 0x59, 0xf6, 0xa7, 0x22, 0xdc, 0xf3, 0x0c, - 0xbc, 0x00, 0x17, 0x3c, 0x05, 0x97, 0xa8, 0x97, 0x5c, 0x71, 0x89, 0x50, 0x9e, 0x04, 0xcd, 0xdf, - 0xae, 0x53, 0xad, 0x03, 0x81, 0xbb, 0x99, 0xcf, 0xdf, 0x39, 0xf3, 0x9d, 0x6f, 0xc6, 0xe7, 0x2c, - 0x38, 0x3e, 0x1b, 0xc5, 0x09, 0x0b, 0x69, 0xb6, 0xa6, 0x79, 0x3a, 0x5a, 0x06, 0x3e, 0x8d, 0xb2, - 0x51, 0x48, 0xb3, 0xc4, 0x5f, 0xa6, 0xc3, 0x38, 0x61, 0x19, 0x43, 0x3d, 0x9f, 0x0d, 0x4b, 0xce, - 0x50, 0x72, 0x0e, 0x7a, 0x1e, 0xf3, 0x98, 0x20, 0x8c, 0xf8, 0x4a, 0x72, 0x0f, 0xfa, 0x1e, 0x63, - 0x5e, 0x40, 0x47, 0x62, 0xb7, 0xc8, 0xcf, 0x47, 0x99, 0x1f, 0xd2, 0x34, 0x23, 0x61, 0x2c, 0x09, - 0xce, 0xc7, 0x60, 0x7e, 0x45, 0x16, 0x34, 0x78, 0x4e, 0xfc, 0x04, 0x21, 0xa8, 0x47, 0x24, 0xa4, - 0xb6, 0x31, 0x30, 0x8e, 0x4d, 0x2c, 0xd6, 0xa8, 0x07, 0x8d, 0x97, 0x24, 0xc8, 0xa9, 0x7d, 0x5b, - 0x80, 0x72, 0xe3, 0x1c, 0x42, 0x63, 0x4c, 0x72, 0x6f, 0xe3, 0x67, 0x1e, 0x63, 0xe8, 0x9f, 0x7f, - 0x36, 0xa0, 0xf5, 0x80, 0xe5, 0x51, 0x46, 0x93, 0x6a, 0x06, 0xba, 0x07, 0x6d, 0xfa, 0x3d, 0x0d, - 0xe3, 0x80, 0x24, 0x22, 0x73, 0xe7, 0xc3, 0xa3, 0x61, 0x55, 0x5d, 0xc3, 0x33, 0xc5, 0xc2, 0x05, - 0x1f, 0x8d, 0x61, 0x7f, 0x99, 0x50, 0x92, 0xd1, 0x95, 0x5b, 0x94, 0x63, 0xd7, 0x44, 0x92, 0x83, - 0xa1, 0x2c, 0x78, 0xa8, 0x0b, 0x1e, 0xce, 0x34, 0x03, 0x5b, 0x2a, 0xa8, 0x40, 0x9c, 0xfb, 0xd0, - 0xfe, 0x3a, 0x27, 0x51, 0xe6, 0x07, 0x14, 0x1d, 0x40, 0xfb, 0x3b, 0xb5, 0x56, 0x4a, 0x8b, 0xfd, - 0x55, 0x0f, 0x8a, 0x22, 0xff, 0x30, 0xa0, 0x35, 0xcd, 0xc3, 0x90, 0x24, 0x17, 0xe8, 0x6d, 0xd8, - 0x49, 0x49, 0x18, 0x07, 0xd4, 0x5d, 0xf2, 0xb2, 0x45, 0x86, 0x3a, 0xee, 0x48, 0x4c, 0x38, 0x81, - 0x0e, 0x01, 0x14, 0x25, 0xcd, 0x43, 0x95, 0xc9, 0x94, 0xc8, 0x34, 0x0f, 0xd1, 0x17, 0x1b, 0xe7, - 0xd7, 0x06, 0xb5, 0xed, 0x86, 0x68, 0xc5, 0xa7, 0xf5, 0x57, 0x7f, 0xf6, 0x6f, 0x6d, 0xa8, 0xac, - 0xb4, 0xa5, 0xfe, 0x1f, 0x6c, 0xe9, 0x43, 0x6b, 0x1e, 0x65, 0x17, 0x31, 0x5d, 0x6d, 0xb9, 0xde, - 0x5f, 0x1b, 0x60, 0x3e, 0xf6, 0xd3, 0x8c, 0x79, 0x09, 0x09, 0xff, 0x4d, 0xed, 0xef, 0x03, 0xda, - 0xa4, 0xb8, 0xe7, 0x01, 0x23, 0x99, 0xd0, 0x66, 0x60, 0x6b, 0x83, 0xf8, 0x88, 0xe3, 0xff, 0xe4, - 0xd4, 0x3d, 0x68, 0x2e, 0xf2, 0xe5, 0xb7, 0x34, 0x53, 0x3e, 0xbd, 0x55, 0xed, 0xd3, 0xa9, 0xe0, - 0x28, 0x97, 0x54, 0x44, 0xb5, 0x47, 0x7b, 0x37, 0xf7, 0x08, 0xdd, 0x81, 0x66, 0xba, 0x5c, 0xd3, - 0x90, 0xd8, 0x8d, 0x81, 0x71, 0xbc, 0x8f, 0xd5, 0x0e, 0xbd, 0x03, 0xbb, 0x3f, 0xd0, 0x84, 0xb9, - 0xd9, 0x3a, 0xa1, 0xe9, 0x9a, 0x05, 0x2b, 0xbb, 0x29, 0xf4, 0x77, 0x39, 0x3a, 0xd3, 0x20, 0x2f, - 0x51, 0xd0, 0xa4, 0x63, 0x2d, 0xe1, 0x98, 0xc9, 0x11, 0xe9, 0xd7, 0x31, 0x58, 0xe5, 0xcf, 0xca, - 0xad, 0xb6, 0xc8, 0xb3, 0x5b, 0x90, 0xa4, 0x57, 0x4f, 0xa0, 0x1b, 0x51, 0x8f, 0x64, 0xfe, 0x4b, - 0xea, 0xa6, 0x31, 0x89, 0x6c, 0x53, 0x78, 0x32, 0xb8, 0xce, 0x93, 0x69, 0x4c, 0x22, 0xe5, 0xcb, - 0x8e, 0x0e, 0xe6, 0x18, 0x17, 0x5f, 0x24, 0x5b, 0xd1, 0x20, 0x23, 0x36, 0x0c, 0x6a, 0xc7, 0x08, - 0x17, 0x47, 0x3c, 0xe4, 0xe0, 0x15, 0x9a, 0x2c, 0xa0, 0x33, 0xa8, 0xf1, 0x1a, 0x35, 0x2a, 0x8b, - 0x78, 0x02, 0xdd, 0x98, 0xa5, 0x7e, 0x29, 0x6d, 0xe7, 0x66, 0xd2, 0x74, 0xb0, 0x96, 0x56, 0x24, - 0x93, 0xd2, 0xba, 0x52, 0x9a, 0x46, 0x0b, 0x69, 0x05, 0x4d, 0x4a, 0xdb, 0x95, 0xd2, 0x34, 0x2a, - 0xa4, 0x39, 0xbf, 0x19, 0xd0, 0x94, 0x07, 0xa2, 0x77, 0xc1, 0x5a, 0xe6, 0x61, 0x1e, 0x6c, 0x96, - 0x23, 0x5f, 0xf0, 0x5e, 0x89, 0xcb, 0x82, 0xee, 0xc2, 0x9d, 0xd7, 0xa9, 0x57, 0x5e, 0x72, 0xef, - 0xb5, 0x00, 0x79, 0x43, 0x7d, 0xe8, 0xe4, 0x71, 0x4c, 0x13, 0x77, 0xc1, 0xf2, 0x68, 0xa5, 0x9e, - 0x33, 0x08, 0xe8, 0x94, 0x23, 0x57, 0x5a, 0x61, 0xed, 0x66, 0xad, 0xd0, 0xb9, 0x0f, 0x50, 0x1a, - 0xc7, 0x1f, 0x25, 0x3b, 0x3f, 0x4f, 0xa9, 0xac, 0x60, 0x1f, 0xab, 0x1d, 0xc7, 0x03, 0x1a, 0x79, - 0xd9, 0x5a, 0x9c, 0xde, 0xc5, 0x6a, 0xe7, 0xfc, 0x64, 0x40, 0x5b, 0x27, 0x45, 0x9f, 0x41, 0x23, - 0xe0, 0x93, 0xc0, 0x36, 0xc4, 0x35, 0xf5, 0xab, 0x35, 0x14, 0xc3, 0x42, 0xdd, 0x92, 0x8c, 0xa9, - 0xee, 0x90, 0xe8, 0x53, 0x30, 0x6f, 0xd2, 0xa0, 0x4b, 0xb2, 0xf3, 0x63, 0x0d, 0x9a, 0x13, 0x31, - 0xf5, 0xfe, 0x9f, 0xae, 0x0f, 0xa0, 0xe1, 0xf1, 0x39, 0xa5, 0x66, 0xcc, 0x9b, 0xd5, 0xc1, 0x62, - 0x94, 0x61, 0xc9, 0x44, 0x9f, 0x40, 0x6b, 0x29, 0x47, 0x97, 0x92, 0x7c, 0x58, 0x1d, 0xa4, 0xe6, - 0x1b, 0xd6, 0x6c, 0x1e, 0x98, 0xca, 0x71, 0xa0, 0xba, 0xee, 0x96, 0x40, 0x35, 0x33, 0xb0, 0x66, - 0xf3, 0xc0, 0x5c, 0xf6, 0x5b, 0xd1, 0x4c, 0xb6, 0x06, 0xaa, 0xa6, 0x8c, 0x35, 0x1b, 0x7d, 0x0e, - 0xe6, 0x5a, 0xb7, 0x61, 0xd1, 0x44, 0xb6, 0xda, 0x53, 0x74, 0x6b, 0x5c, 0x46, 0xf0, 0xc6, 0x5d, - 0x38, 0xee, 0x86, 0xa9, 0xe8, 0x54, 0x35, 0xdc, 0x29, 0xb0, 0x49, 0xea, 0xfc, 0x62, 0xc0, 0x8e, - 0xbc, 0x87, 0x47, 0x24, 0xf4, 0x83, 0x8b, 0xca, 0x4f, 0x04, 0x04, 0xf5, 0x35, 0x0d, 0x62, 0xf5, - 0x85, 0x20, 0xd6, 0xe8, 0x2e, 0xd4, 0xb9, 0x46, 0x61, 0xe1, 0xee, 0xb6, 0xff, 0xbc, 0xcc, 0x3c, - 0xbb, 0x88, 0x29, 0x16, 0x6c, 0xde, 0xda, 0xe5, 0xb7, 0x8e, 0x5d, 0xbf, 0xae, 0xb5, 0xcb, 0x38, - 0xdd, 0xda, 0x65, 0xc4, 0x7b, 0x0b, 0x80, 0x32, 0x1f, 0xea, 0x40, 0xeb, 0xc1, 0xb3, 0xf9, 0xd3, - 0xd9, 0x19, 0xb6, 0x6e, 0x21, 0x13, 0x1a, 0xe3, 0x93, 0xf9, 0xf8, 0xcc, 0x32, 0x38, 0x3e, 0x9d, - 0x4f, 0x26, 0x27, 0xf8, 0x85, 0x75, 0x9b, 0x6f, 0xe6, 0x4f, 0x67, 0x2f, 0x9e, 0x9f, 0x3d, 0xb4, - 0x6a, 0xa8, 0x0b, 0xe6, 0xe3, 0x2f, 0xa7, 0xb3, 0x67, 0x63, 0x7c, 0x32, 0xb1, 0xea, 0xe8, 0x0d, - 0xd8, 0x13, 0x31, 0x6e, 0x09, 0x36, 0x4e, 0x9d, 0x57, 0x97, 0x47, 0xc6, 0xef, 0x97, 0x47, 0xc6, - 0x5f, 0x97, 0x47, 0xc6, 0x37, 0x3d, 0x9f, 0xb9, 0xa5, 0x38, 0x57, 0x8a, 0x5b, 0x34, 0xc5, 0xcb, - 0xfe, 0xe8, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x2e, 0x66, 0xc1, 0xcb, 0x09, 0x00, 0x00, + // 969 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x14, 0xae, 0x9b, 0x5f, 0x9f, 0x6c, 0x76, 0xbd, 0x43, 0x54, 0x59, 0x0b, 0xbb, 0x09, 0x96, 0x90, + 0x16, 0x84, 0x12, 0x01, 0x45, 0xa0, 0x52, 0x24, 0x76, 0xdb, 0x6d, 0x8a, 0x4a, 0xda, 0x32, 0x49, + 0x2e, 0xca, 0x8d, 0x35, 0x49, 0x66, 0x1d, 0x0b, 0xdb, 0x63, 0xec, 0x71, 0xc5, 0x72, 0xcf, 0x33, + 0xf0, 0x02, 0x3c, 0x06, 0xe2, 0x12, 0xf5, 0x92, 0x2b, 0x2e, 0x11, 0xda, 0x27, 0x41, 0xf3, 0x63, + 0x3b, 0x5b, 0x39, 0x0b, 0x0b, 0x77, 0x33, 0x5f, 0xbe, 0x73, 0xe6, 0x3b, 0xdf, 0x4c, 0xce, 0x31, + 0x38, 0x3e, 0x1b, 0xc5, 0x09, 0x0b, 0x29, 0x5f, 0xd3, 0x2c, 0x1d, 0x2d, 0x03, 0x9f, 0x46, 0x7c, + 0x14, 0x52, 0x9e, 0xf8, 0xcb, 0x74, 0x18, 0x27, 0x8c, 0x33, 0xd4, 0xf3, 0xd9, 0xb0, 0xe4, 0x0c, + 0x15, 0xe7, 0xa0, 0xe7, 0x31, 0x8f, 0x49, 0xc2, 0x48, 0xac, 0x14, 0xf7, 0xa0, 0xef, 0x31, 0xe6, + 0x05, 0x74, 0x24, 0x77, 0x8b, 0xec, 0x7c, 0xc4, 0xfd, 0x90, 0xa6, 0x9c, 0x84, 0xb1, 0x22, 0x38, + 0x1f, 0x83, 0xf9, 0x15, 0x59, 0xd0, 0xe0, 0x39, 0xf1, 0x13, 0x84, 0xa0, 0x1e, 0x91, 0x90, 0xda, + 0xc6, 0xc0, 0x38, 0x36, 0xb1, 0x5c, 0xa3, 0x1e, 0x34, 0x5e, 0x92, 0x20, 0xa3, 0xf6, 0x6d, 0x09, + 0xaa, 0x8d, 0x73, 0x08, 0x8d, 0x31, 0xc9, 0xbc, 0x8d, 0x9f, 0x45, 0x8c, 0x91, 0xff, 0xfc, 0xb3, + 0x01, 0xad, 0x07, 0x2c, 0x8b, 0x38, 0x4d, 0xaa, 0x19, 0xe8, 0x1e, 0xb4, 0xe9, 0xf7, 0x34, 0x8c, + 0x03, 0x92, 0xc8, 0xcc, 0x9d, 0x0f, 0x8f, 0x86, 0x55, 0x75, 0x0d, 0xcf, 0x34, 0x0b, 0x17, 0x7c, + 0x34, 0x86, 0xfd, 0x65, 0x42, 0x09, 0xa7, 0x2b, 0xb7, 0x28, 0xc7, 0xae, 0xc9, 0x24, 0x07, 0x43, + 0x55, 0xf0, 0x30, 0x2f, 0x78, 0x38, 0xcb, 0x19, 0xd8, 0xd2, 0x41, 0x05, 0xe2, 0xdc, 0x87, 0xf6, + 0xd7, 0x19, 0x89, 0xb8, 0x1f, 0x50, 0x74, 0x00, 0xed, 0xef, 0xf4, 0x5a, 0x2b, 0x2d, 0xf6, 0x57, + 0x3d, 0x28, 0x8a, 0xfc, 0xc3, 0x80, 0xd6, 0x34, 0x0b, 0x43, 0x92, 0x5c, 0xa0, 0xb7, 0x61, 0x27, + 0x25, 0x61, 0x1c, 0x50, 0x77, 0x29, 0xca, 0x96, 0x19, 0xea, 0xb8, 0xa3, 0x30, 0xe9, 0x04, 0x3a, + 0x04, 0xd0, 0x94, 0x34, 0x0b, 0x75, 0x26, 0x53, 0x21, 0xd3, 0x2c, 0x44, 0x5f, 0x6c, 0x9c, 0x5f, + 0x1b, 0xd4, 0xb6, 0x1b, 0x92, 0x2b, 0x3e, 0xad, 0xbf, 0xfa, 0xb3, 0x7f, 0x6b, 0x43, 0x65, 0xa5, + 0x2d, 0xf5, 0xff, 0x60, 0x4b, 0x1f, 0x5a, 0xf3, 0x88, 0x5f, 0xc4, 0x74, 0xb5, 0xe5, 0x7a, 0x7f, + 0x6d, 0x80, 0xf9, 0xd8, 0x4f, 0x39, 0xf3, 0x12, 0x12, 0xfe, 0x9b, 0xda, 0xdf, 0x07, 0xb4, 0x49, + 0x71, 0xcf, 0x03, 0x46, 0xb8, 0xd4, 0x66, 0x60, 0x6b, 0x83, 0xf8, 0x48, 0xe0, 0xff, 0xe4, 0xd4, + 0x3d, 0x68, 0x2e, 0xb2, 0xe5, 0xb7, 0x94, 0x6b, 0x9f, 0xde, 0xaa, 0xf6, 0xe9, 0x54, 0x72, 0xb4, + 0x4b, 0x3a, 0xa2, 0xda, 0xa3, 0xbd, 0x9b, 0x7b, 0x84, 0xee, 0x40, 0x33, 0x5d, 0xae, 0x69, 0x48, + 0xec, 0xc6, 0xc0, 0x38, 0xde, 0xc7, 0x7a, 0x87, 0xde, 0x81, 0xdd, 0x1f, 0x68, 0xc2, 0x5c, 0xbe, + 0x4e, 0x68, 0xba, 0x66, 0xc1, 0xca, 0x6e, 0x4a, 0xfd, 0x5d, 0x81, 0xce, 0x72, 0x50, 0x94, 0x28, + 0x69, 0xca, 0xb1, 0x96, 0x74, 0xcc, 0x14, 0x88, 0xf2, 0xeb, 0x18, 0xac, 0xf2, 0x67, 0xed, 0x56, + 0x5b, 0xe6, 0xd9, 0x2d, 0x48, 0xca, 0xab, 0x27, 0xd0, 0x8d, 0xa8, 0x47, 0xb8, 0xff, 0x92, 0xba, + 0x69, 0x4c, 0x22, 0xdb, 0x94, 0x9e, 0x0c, 0xae, 0xf3, 0x64, 0x1a, 0x93, 0x48, 0xfb, 0xb2, 0x93, + 0x07, 0x0b, 0x4c, 0x88, 0x2f, 0x92, 0xad, 0x68, 0xc0, 0x89, 0x0d, 0x83, 0xda, 0x31, 0xc2, 0xc5, + 0x11, 0x0f, 0x05, 0x78, 0x85, 0xa6, 0x0a, 0xe8, 0x0c, 0x6a, 0xa2, 0xc6, 0x1c, 0x55, 0x45, 0x3c, + 0x81, 0x6e, 0xcc, 0x52, 0xbf, 0x94, 0xb6, 0x73, 0x33, 0x69, 0x79, 0x70, 0x2e, 0xad, 0x48, 0xa6, + 0xa4, 0x75, 0x95, 0xb4, 0x1c, 0x2d, 0xa4, 0x15, 0x34, 0x25, 0x6d, 0x57, 0x49, 0xcb, 0x51, 0x29, + 0xcd, 0xf9, 0xcd, 0x80, 0xa6, 0x3a, 0x10, 0xbd, 0x0b, 0xd6, 0x32, 0x0b, 0xb3, 0x60, 0xb3, 0x1c, + 0xf5, 0x82, 0xf7, 0x4a, 0x5c, 0x15, 0x74, 0x17, 0xee, 0xbc, 0x4e, 0xbd, 0xf2, 0x92, 0x7b, 0xaf, + 0x05, 0xa8, 0x1b, 0xea, 0x43, 0x27, 0x8b, 0x63, 0x9a, 0xb8, 0x0b, 0x96, 0x45, 0x2b, 0xfd, 0x9c, + 0x41, 0x42, 0xa7, 0x02, 0xb9, 0xd2, 0x0a, 0x6b, 0x37, 0x6b, 0x85, 0xce, 0x7d, 0x80, 0xd2, 0x38, + 0xf1, 0x28, 0xd9, 0xf9, 0x79, 0x4a, 0x55, 0x05, 0xfb, 0x58, 0xef, 0x04, 0x1e, 0xd0, 0xc8, 0xe3, + 0x6b, 0x79, 0x7a, 0x17, 0xeb, 0x9d, 0xf3, 0x93, 0x01, 0xed, 0x3c, 0x29, 0xfa, 0x0c, 0x1a, 0x81, + 0x98, 0x04, 0xb6, 0x21, 0xaf, 0xa9, 0x5f, 0xad, 0xa1, 0x18, 0x16, 0xfa, 0x96, 0x54, 0x4c, 0x75, + 0x87, 0x44, 0x9f, 0x82, 0x79, 0x93, 0x06, 0x5d, 0x92, 0x9d, 0x1f, 0x6b, 0xd0, 0x9c, 0xc8, 0xa9, + 0xf7, 0xff, 0x74, 0x7d, 0x00, 0x0d, 0x4f, 0xcc, 0x29, 0x3d, 0x63, 0xde, 0xac, 0x0e, 0x96, 0xa3, + 0x0c, 0x2b, 0x26, 0xfa, 0x04, 0x5a, 0x4b, 0x35, 0xba, 0xb4, 0xe4, 0xc3, 0xea, 0x20, 0x3d, 0xdf, + 0x70, 0xce, 0x16, 0x81, 0xa9, 0x1a, 0x07, 0xba, 0xeb, 0x6e, 0x09, 0xd4, 0x33, 0x03, 0xe7, 0x6c, + 0x11, 0x98, 0xa9, 0x7e, 0x2b, 0x9b, 0xc9, 0xd6, 0x40, 0xdd, 0x94, 0x71, 0xce, 0x46, 0x9f, 0x83, + 0xb9, 0xce, 0xdb, 0xb0, 0x6c, 0x22, 0x5b, 0xed, 0x29, 0xba, 0x35, 0x2e, 0x23, 0x44, 0xe3, 0x2e, + 0x1c, 0x77, 0xc3, 0x54, 0x76, 0xaa, 0x1a, 0xee, 0x14, 0xd8, 0x24, 0x75, 0x7e, 0x31, 0x60, 0x47, + 0xdd, 0xc3, 0x23, 0x12, 0xfa, 0xc1, 0x45, 0xe5, 0x27, 0x02, 0x82, 0xfa, 0x9a, 0x06, 0xb1, 0xfe, + 0x42, 0x90, 0x6b, 0x74, 0x17, 0xea, 0x42, 0xa3, 0xb4, 0x70, 0x77, 0xdb, 0x7f, 0x5e, 0x65, 0x9e, + 0x5d, 0xc4, 0x14, 0x4b, 0xb6, 0x68, 0xed, 0xea, 0x5b, 0xc7, 0xae, 0x5f, 0xd7, 0xda, 0x55, 0x5c, + 0xde, 0xda, 0x55, 0x84, 0x50, 0x91, 0x45, 0x3e, 0x97, 0x16, 0x9a, 0x58, 0xae, 0xdf, 0x5b, 0x00, + 0x94, 0x67, 0xa0, 0x0e, 0xb4, 0x1e, 0x3c, 0x9b, 0x3f, 0x9d, 0x9d, 0x61, 0xeb, 0x16, 0x32, 0xa1, + 0x31, 0x3e, 0x99, 0x8f, 0xcf, 0x2c, 0x43, 0xe0, 0xd3, 0xf9, 0x64, 0x72, 0x82, 0x5f, 0x58, 0xb7, + 0xc5, 0x66, 0xfe, 0x74, 0xf6, 0xe2, 0xf9, 0xd9, 0x43, 0xab, 0x86, 0xba, 0x60, 0x3e, 0xfe, 0x72, + 0x3a, 0x7b, 0x36, 0xc6, 0x27, 0x13, 0xab, 0x8e, 0xde, 0x80, 0x3d, 0x19, 0xe3, 0x96, 0x60, 0xe3, + 0xd4, 0x79, 0x75, 0x79, 0x64, 0xfc, 0x7e, 0x79, 0x64, 0xfc, 0x75, 0x79, 0x64, 0x7c, 0xd3, 0xf3, + 0x99, 0x5b, 0x0a, 0x76, 0x95, 0xe0, 0x45, 0x53, 0xbe, 0xf6, 0x8f, 0xfe, 0x0e, 0x00, 0x00, 0xff, + 0xff, 0x6d, 0x53, 0xc5, 0x1e, 0xdf, 0x09, 0x00, 0x00, } func (m *LabelPair) Marshal() (dAtA []byte, err error) { @@ -1755,6 +1764,13 @@ func (m *MetricFamily) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Unit) > 0 { + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0x2a + } if len(m.Metric) > 0 { for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { { @@ -2128,6 +2144,10 @@ func (m *MetricFamily) Size() (n int) { n += 1 + l + sovMetrics(uint64(l)) } } + l = len(m.Unit) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -4074,6 +4094,38 @@ func (m *MetricFamily) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) diff --git a/prompb/io/prometheus/client/metrics.proto b/prompb/io/prometheus/client/metrics.proto index 13ac8dcb4..06dcc34af 100644 --- a/prompb/io/prometheus/client/metrics.proto +++ b/prompb/io/prometheus/client/metrics.proto @@ -153,4 +153,5 @@ message MetricFamily { string help = 2; MetricType type = 3; repeated Metric metric = 4 [(gogoproto.nullable) = false]; + string unit = 5; } diff --git a/promql/engine_test.go b/promql/engine_test.go index d6a10455a..9ab54dd16 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -140,7 +140,7 @@ func TestQueryTimeout(t *testing.T) { require.Error(t, res.Err, "expected timeout error but got none") var e ErrQueryTimeout - require.True(t, errors.As(res.Err, &e), "expected timeout error but got: %s", res.Err) + require.ErrorAs(t, res.Err, &e, "expected timeout error but got: %s", res.Err) } const errQueryCanceled = ErrQueryCanceled("test statement execution") @@ -239,14 +239,14 @@ func TestQueryError(t *testing.T) { res := vectorQuery.Exec(ctx) require.Error(t, res.Err, "expected error on failed select but got none") - require.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match") + require.ErrorIs(t, res.Err, errStorage, "expected error doesn't match") matrixQuery, err := engine.NewInstantQuery(ctx, queryable, nil, "foo[1m]", time.Unix(1, 0)) require.NoError(t, err) res = matrixQuery.Exec(ctx) require.Error(t, res.Err, "expected error on failed select but got none") - require.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match") + require.ErrorIs(t, res.Err, errStorage, "expected error doesn't match") } type noopHintRecordingQueryable struct { @@ -635,7 +635,7 @@ func TestEngineShutdown(t *testing.T) { require.Error(t, res2.Err, "expected error on querying with canceled context but got none") var e ErrQueryCanceled - require.True(t, errors.As(res2.Err, &e), "expected cancellation error but got: %s", res2.Err) + require.ErrorAs(t, res2.Err, &e, "expected cancellation error but got: %s", res2.Err) } func TestEngineEvalStmtTimestamps(t *testing.T) { @@ -2057,7 +2057,7 @@ func TestQueryLogger_basic(t *testing.T) { l := len(f1.logs) queryExec() - require.Equal(t, 2*l, len(f1.logs)) + require.Len(t, f1.logs, 2*l) // Test that we close the query logger when unsetting it. require.False(t, f1.closed, "expected f1 to be open, got closed") @@ -3003,8 +3003,8 @@ func TestEngineOptsValidation(t *testing.T) { require.Equal(t, c.expError, err1) require.Equal(t, c.expError, err2) } else { - require.Nil(t, err1) - require.Nil(t, err2) + require.NoError(t, err1) + require.NoError(t, err2) } } } @@ -3267,7 +3267,7 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { app := storage.Appender(context.Background()) var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, h.ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, h, nil) } @@ -3287,7 +3287,7 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { require.Len(t, vector, 1) require.Nil(t, vector[0].H) if floatHisto { - require.Equal(t, h.ToFloat().Count, vector[0].F) + require.Equal(t, h.ToFloat(nil).Count, vector[0].F) } else { require.Equal(t, float64(h.Count), vector[0].F) } @@ -3305,7 +3305,7 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { require.Len(t, vector, 1) require.Nil(t, vector[0].H) if floatHisto { - require.Equal(t, h.ToFloat().Sum, vector[0].F) + require.Equal(t, h.ToFloat(nil).Sum, vector[0].F) } else { require.Equal(t, h.Sum, vector[0].F) } @@ -3433,7 +3433,7 @@ func TestNativeHistogram_HistogramStdDevVar(t *testing.T) { app := storage.Appender(context.Background()) var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, tc.h.ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, tc.h.ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, tc.h, nil) } @@ -3678,7 +3678,7 @@ func TestNativeHistogram_HistogramQuantile(t *testing.T) { app := storage.Appender(context.Background()) var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) } @@ -4109,7 +4109,7 @@ func TestNativeHistogram_HistogramFraction(t *testing.T) { app := storage.Appender(context.Background()) var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) } @@ -4272,7 +4272,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { // Since we mutate h later, we need to create a copy here. var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) } @@ -4282,7 +4282,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond) // Since we mutate h later, we need to create a copy here. if floatHisto { - _, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat()) + _, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil) } @@ -4530,7 +4530,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { // Since we mutate h later, we need to create a copy here. var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) } @@ -4687,7 +4687,7 @@ func TestNativeHistogram_MulDivOperator(t *testing.T) { // Since we mutate h later, we need to create a copy here. var err error if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) } else { _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) } diff --git a/promql/functions.go b/promql/functions.go index 06f6f8c71..407a11b50 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -609,6 +609,25 @@ func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod }), nil } +// === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + if len(vals[0].(Matrix)[0].Floats) == 0 { + return enh.Out, nil + } + return aggrOverTime(vals, enh, func(s Series) float64 { + values := make(vectorByValueHeap, 0, len(s.Floats)) + for _, f := range s.Floats { + values = append(values, Sample{F: f.F}) + } + median := quantile(0.5, values) + values = make(vectorByValueHeap, 0, len(s.Floats)) + for _, f := range s.Floats { + values = append(values, Sample{F: math.Abs(f.F - median)}) + } + return quantile(0.5, values) + }), nil +} + // === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if len(vals[0].(Matrix)[0].Floats) == 0 { @@ -1538,6 +1557,7 @@ var FunctionCalls = map[string]FunctionCall{ "log10": funcLog10, "log2": funcLog2, "last_over_time": funcLastOverTime, + "mad_over_time": funcMadOverTime, "max_over_time": funcMaxOverTime, "min_over_time": funcMinOverTime, "minute": funcMinute, diff --git a/promql/functions_test.go b/promql/functions_test.go index faf6859e7..6d5c3784e 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -64,7 +64,7 @@ func TestDeriv(t *testing.T) { require.NoError(t, result.Err) vec, _ := result.Vector() - require.Equal(t, 1, len(vec), "Expected 1 result, got %d", len(vec)) + require.Len(t, vec, 1, "Expected 1 result, got %d", len(vec)) require.Equal(t, 0.0, vec[0].F, "Expected 0.0 as value, got %f", vec[0].F) } diff --git a/promql/parser/functions.go b/promql/parser/functions.go index ee2e90c55..46d50d547 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -254,6 +254,12 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "mad_over_time": { + Name: "mad_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Experimental: true, + }, "max_over_time": { Name: "max_over_time", ArgTypes: []ValueType{ValueTypeMatrix}, diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index 38c8a39e6..6c26445e3 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -3579,9 +3579,9 @@ func TestParseExpressions(t *testing.T) { require.True(t, ok, "unexpected error type") for _, e := range errorList { - require.True(t, 0 <= e.PositionRange.Start, "parse error has negative position\nExpression '%s'\nError: %v", test.input, e) - require.True(t, e.PositionRange.Start <= e.PositionRange.End, "parse error has negative length\nExpression '%s'\nError: %v", test.input, e) - require.True(t, e.PositionRange.End <= posrange.Pos(len(test.input)), "parse error is not contained in input\nExpression '%s'\nError: %v", test.input, e) + require.LessOrEqual(t, 0, e.PositionRange.Start, "parse error has negative position\nExpression '%s'\nError: %v", test.input, e) + require.LessOrEqual(t, e.PositionRange.Start, e.PositionRange.End, "parse error has negative length\nExpression '%s'\nError: %v", test.input, e) + require.LessOrEqual(t, e.PositionRange.End, posrange.Pos(len(test.input)), "parse error is not contained in input\nExpression '%s'\nError: %v", test.input, e) } } }) diff --git a/promql/query_logger_test.go b/promql/query_logger_test.go index ce55fecbb..4135753fd 100644 --- a/promql/query_logger_test.go +++ b/promql/query_logger_test.go @@ -123,7 +123,7 @@ func TestMMapFile(t *testing.T) { bytes := make([]byte, 4) n, err := f.Read(bytes) - require.Equal(t, n, 2) + require.Equal(t, 2, n) require.NoError(t, err, "Unexpected error while reading file.") require.Equal(t, fileAsBytes, bytes[:2], "Mmap failed") diff --git a/promql/testdata/functions.test b/promql/testdata/functions.test index b5263a96f..b4547886a 100644 --- a/promql/testdata/functions.test +++ b/promql/testdata/functions.test @@ -739,6 +739,14 @@ eval instant at 1m stdvar_over_time(metric[1m]) eval instant at 1m stddev_over_time(metric[1m]) {} 0 +# Tests for mad_over_time. +clear +load 10s + metric 4 6 2 1 999 1 2 + +eval instant at 70s mad_over_time(metric[70s]) + {} 1 + # Tests for quantile_over_time clear diff --git a/rules/alerting_test.go b/rules/alerting_test.go index f8edcc767..dd324d1ee 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -185,7 +185,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute) res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) - require.Equal(t, 0, len(res)) + require.Empty(t, res) } func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { @@ -828,7 +828,7 @@ func TestKeepFiringFor(t *testing.T) { evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute) res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) - require.Equal(t, 0, len(res)) + require.Empty(t, res) } func TestPendingAndKeepFiringFor(t *testing.T) { @@ -880,7 +880,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) { evalTime := baseTime.Add(time.Minute) res, err = rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) - require.Equal(t, 0, len(res)) + require.Empty(t, res) } // TestAlertingEvalWithOrigin checks that the alerting rule details are passed through the context. diff --git a/rules/manager_test.go b/rules/manager_test.go index 75ee34919..6418c5a37 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -167,7 +167,7 @@ func TestAlertingRule(t *testing.T) { filteredRes = append(filteredRes, smpl) } else { // If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'. - require.Equal(t, smplName, "ALERTS_FOR_STATE") + require.Equal(t, "ALERTS_FOR_STATE", smplName) } } for i := range test.result { @@ -313,7 +313,7 @@ func TestForStateAddSamples(t *testing.T) { filteredRes = append(filteredRes, smpl) } else { // If not 'ALERTS_FOR_STATE', it has to be 'ALERTS'. - require.Equal(t, smplName, "ALERTS") + require.Equal(t, "ALERTS", smplName) } } for i := range test.result { @@ -471,12 +471,12 @@ func TestForStateRestore(t *testing.T) { // Checking if we have restored it correctly. switch { case tst.noRestore: - require.Equal(t, tst.num, len(got)) + require.Len(t, got, tst.num) for _, e := range got { require.Equal(t, e.ActiveAt, restoreTime) } case tst.gracePeriod: - require.Equal(t, tst.num, len(got)) + require.Len(t, got, tst.num) for _, e := range got { require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime)) } @@ -725,7 +725,7 @@ func TestUpdate(t *testing.T) { err := ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil) require.NoError(t, err) - require.Greater(t, len(ruleManager.groups), 0, "expected non-empty rule groups") + require.NotEmpty(t, ruleManager.groups, "expected non-empty rule groups") ogs := map[string]*Group{} for h, g := range ruleManager.groups { g.seriesInPreviousEval = []map[string]labels.Labels{ @@ -746,7 +746,7 @@ func TestUpdate(t *testing.T) { // Groups will be recreated if updated. rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml") - require.Equal(t, 0, len(errs), "file parsing failures") + require.Empty(t, errs, "file parsing failures") tmpFile, err := os.CreateTemp("", "rules.test.*.yaml") require.NoError(t, err) @@ -889,20 +889,20 @@ func TestNotify(t *testing.T) { // Alert sent right away group.Eval(ctx, time.Unix(1, 0)) - require.Equal(t, 1, len(lastNotified)) + require.Len(t, lastNotified, 1) require.NotZero(t, lastNotified[0].ValidUntil, "ValidUntil should not be zero") // Alert is not sent 1s later group.Eval(ctx, time.Unix(2, 0)) - require.Equal(t, 0, len(lastNotified)) + require.Empty(t, lastNotified) // Alert is resent at t=5s group.Eval(ctx, time.Unix(5, 0)) - require.Equal(t, 1, len(lastNotified)) + require.Len(t, lastNotified, 1) // Resolution alert sent right away group.Eval(ctx, time.Unix(6, 0)) - require.Equal(t, 1, len(lastNotified)) + require.Len(t, lastNotified, 1) } func TestMetricsUpdate(t *testing.T) { @@ -1100,7 +1100,7 @@ func TestMetricsStalenessOnManagerShutdown(t *testing.T) { require.NoError(t, err) ruleManager.Stop() stopped = true - require.True(t, time.Since(start) < 1*time.Second, "rule manager does not stop early") + require.Less(t, time.Since(start), 1*time.Second, "rule manager does not stop early") time.Sleep(5 * time.Second) require.Equal(t, 0, countStaleNaN(t, storage), "invalid count of staleness markers after stopping the engine") } @@ -1390,9 +1390,9 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) { require.Equal(t, labels.FromStrings("__name__", "sum:histogram_metric"), s.Labels()) - expHist := hists[0].ToFloat() + expHist := hists[0].ToFloat(nil) for _, h := range hists[1:] { - expHist = expHist.Add(h.ToFloat()) + expHist = expHist.Add(h.ToFloat(nil)) } it := s.Iterator(nil) diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go index c580a5051..43ee0fcec 100644 --- a/scrape/helpers_test.go +++ b/scrape/helpers_test.go @@ -14,10 +14,18 @@ package scrape import ( + "bytes" "context" + "encoding/binary" "fmt" "math/rand" "strings" + "sync" + "testing" + + "github.com/gogo/protobuf/proto" + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -50,6 +58,10 @@ func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.M return 0, nil } +func (a nopAppender) AppendCTZeroSample(storage.SeriesRef, labels.Labels, int64, int64) (storage.SeriesRef, error) { + return 0, nil +} + func (a nopAppender) Commit() error { return nil } func (a nopAppender) Rollback() error { return nil } @@ -65,9 +77,19 @@ type histogramSample struct { fh *histogram.FloatHistogram } +type collectResultAppendable struct { + *collectResultAppender +} + +func (a *collectResultAppendable) Appender(_ context.Context) storage.Appender { + return a +} + // collectResultAppender records all samples that were added through the appender. // It can be used as its zero value or be backed by another appender it writes samples through. type collectResultAppender struct { + mtx sync.Mutex + next storage.Appender resultFloats []floatSample pendingFloats []floatSample @@ -82,6 +104,8 @@ type collectResultAppender struct { } func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() a.pendingFloats = append(a.pendingFloats, floatSample{ metric: lset, t: t, @@ -103,6 +127,8 @@ func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels } func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() a.pendingExemplars = append(a.pendingExemplars, e) if a.next == nil { return 0, nil @@ -112,6 +138,8 @@ func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.L } func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t}) if a.next == nil { return 0, nil @@ -121,6 +149,8 @@ func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels. } func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { + a.mtx.Lock() + defer a.mtx.Unlock() a.pendingMetadata = append(a.pendingMetadata, m) if ref == 0 { ref = storage.SeriesRef(rand.Uint64()) @@ -132,7 +162,13 @@ func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.L return a.next.UpdateMetadata(ref, l, m) } +func (a *collectResultAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) { + return a.Append(ref, l, ct, 0.0) +} + func (a *collectResultAppender) Commit() error { + a.mtx.Lock() + defer a.mtx.Unlock() a.resultFloats = append(a.resultFloats, a.pendingFloats...) a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...) a.resultHistograms = append(a.resultHistograms, a.pendingHistograms...) @@ -148,6 +184,8 @@ func (a *collectResultAppender) Commit() error { } func (a *collectResultAppender) Rollback() error { + a.mtx.Lock() + defer a.mtx.Unlock() a.rolledbackFloats = a.pendingFloats a.rolledbackHistograms = a.pendingHistograms a.pendingFloats = nil @@ -171,3 +209,22 @@ func (a *collectResultAppender) String() string { } return sb.String() } + +// protoMarshalDelimited marshals a MetricFamily into a delimited +// Prometheus proto exposition format bytes (known as 'encoding=delimited`) +// +// See also https://eli.thegreenplace.net/2011/08/02/length-prefix-framing-for-protocol-buffers +func protoMarshalDelimited(t *testing.T, mf *dto.MetricFamily) []byte { + t.Helper() + + protoBuf, err := proto.Marshal(mf) + require.NoError(t, err) + + varintBuf := make([]byte, binary.MaxVarintLen32) + varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) + + buf := &bytes.Buffer{} + buf.Write(varintBuf[:varintLength]) + buf.Write(protoBuf) + return buf.Bytes() +} diff --git a/scrape/manager.go b/scrape/manager.go index 3b70e48a1..faa46f54d 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -78,9 +78,15 @@ type Options struct { EnableMetadataStorage bool // Option to increase the interval used by scrape manager to throttle target groups updates. DiscoveryReloadInterval model.Duration + // Option to enable the ingestion of the created timestamp as a synthetic zero sample. + // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md + EnableCreatedTimestampZeroIngestion bool // Optional HTTP client options to use when scraping. HTTPClientOptions []config_util.HTTPClientOption + + // private option for testability. + skipOffsetting bool } // Manager maintains a set of scrape pools and manages start/stop cycles diff --git a/scrape/manager_test.go b/scrape/manager_test.go index a689c469d..524424269 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -15,14 +15,23 @@ package scrape import ( "context" + "fmt" "net/http" + "net/http/httptest" + "net/url" + "os" "strconv" + "sync" "testing" "time" + "github.com/go-kit/log" + "github.com/gogo/protobuf/proto" "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" @@ -30,6 +39,7 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/util/runutil" ) func TestPopulateLabels(t *testing.T) { @@ -714,3 +724,146 @@ scrape_configs: reload(scrapeManager, cfg2) require.ElementsMatch(t, []string{"job1", "job3"}, scrapeManager.ScrapePools()) } + +// TestManagerCTZeroIngestion tests scrape manager for CT cases. +func TestManagerCTZeroIngestion(t *testing.T) { + const mName = "expected_counter" + + for _, tc := range []struct { + name string + counterSample *dto.Counter + enableCTZeroIngestion bool + + expectedValues []float64 + }{ + { + name: "disabled with CT on counter", + counterSample: &dto.Counter{ + Value: proto.Float64(1.0), + // Timestamp does not matter as long as it exists in this test. + CreatedTimestamp: timestamppb.Now(), + }, + expectedValues: []float64{1.0}, + }, + { + name: "enabled with CT on counter", + counterSample: &dto.Counter{ + Value: proto.Float64(1.0), + // Timestamp does not matter as long as it exists in this test. + CreatedTimestamp: timestamppb.Now(), + }, + enableCTZeroIngestion: true, + expectedValues: []float64{0.0, 1.0}, + }, + { + name: "enabled without CT on counter", + counterSample: &dto.Counter{ + Value: proto.Float64(1.0), + }, + enableCTZeroIngestion: true, + expectedValues: []float64{1.0}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + app := &collectResultAppender{} + scrapeManager, err := NewManager( + &Options{ + EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion, + skipOffsetting: true, + }, + log.NewLogfmtLogger(os.Stderr), + &collectResultAppendable{app}, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + + require.NoError(t, scrapeManager.ApplyConfig(&config.Config{ + GlobalConfig: config.GlobalConfig{ + // Disable regular scrapes. + ScrapeInterval: model.Duration(9999 * time.Minute), + ScrapeTimeout: model.Duration(5 * time.Second), + // Ensure the proto is chosen. We need proto as it's the only protocol + // with the CT parsing support. + ScrapeProtocols: []config.ScrapeProtocol{config.PrometheusProto}, + }, + ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test"}}, + })) + + once := sync.Once{} + // Start fake HTTP target to that allow one scrape only. + server := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fail := true + once.Do(func() { + fail = false + w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`) + + ctrType := dto.MetricType_COUNTER + w.Write(protoMarshalDelimited(t, &dto.MetricFamily{ + Name: proto.String(mName), + Type: &ctrType, + Metric: []*dto.Metric{{Counter: tc.counterSample}}, + })) + }) + + if fail { + w.WriteHeader(http.StatusInternalServerError) + } + }), + ) + defer server.Close() + + serverURL, err := url.Parse(server.URL) + require.NoError(t, err) + + // Add fake target directly into tsets + reload. Normally users would use + // Manager.Run and wait for minimum 5s refresh interval. + scrapeManager.updateTsets(map[string][]*targetgroup.Group{ + "test": {{ + Targets: []model.LabelSet{{ + model.SchemeLabel: model.LabelValue(serverURL.Scheme), + model.AddressLabel: model.LabelValue(serverURL.Host), + }}, + }}, + }) + scrapeManager.reload() + + // Wait for one scrape. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { + if countFloatSamples(app, mName) != len(tc.expectedValues) { + return fmt.Errorf("expected %v samples", tc.expectedValues) + } + return nil + }), "after 1 minute") + scrapeManager.Stop() + + require.Equal(t, tc.expectedValues, getResultFloats(app, mName)) + }) + } +} + +func countFloatSamples(a *collectResultAppender, expectedMetricName string) (count int) { + a.mtx.Lock() + defer a.mtx.Unlock() + + for _, f := range a.resultFloats { + if f.metric.Get(model.MetricNameLabel) == expectedMetricName { + count++ + } + } + return count +} + +func getResultFloats(app *collectResultAppender, expectedMetricName string) (result []float64) { + app.mtx.Lock() + defer app.mtx.Unlock() + + for _, f := range app.resultFloats { + if f.metric.Get(model.MetricNameLabel) == expectedMetricName { + result = append(result, f.f) + } + } + return result +} diff --git a/scrape/scrape.go b/scrape/scrape.go index 9a0ba1d00..be27a5d48 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -106,9 +106,10 @@ type scrapeLoopOptions struct { interval time.Duration timeout time.Duration scrapeClassicHistograms bool - mrc []*relabel.Config - cache *scrapeCache - enableCompression bool + + mrc []*relabel.Config + cache *scrapeCache + enableCompression bool } const maxAheadTime = 10 * time.Minute @@ -168,11 +169,13 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed opts.interval, opts.timeout, opts.scrapeClassicHistograms, + options.EnableCreatedTimestampZeroIngestion, options.ExtraMetrics, options.EnableMetadataStorage, opts.target, options.PassMetadataInContext, metrics, + options.skipOffsetting, ) } sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) @@ -787,6 +790,7 @@ type scrapeLoop struct { interval time.Duration timeout time.Duration scrapeClassicHistograms bool + enableCTZeroIngestion bool appender func(ctx context.Context) storage.Appender sampleMutator labelsMutator @@ -804,6 +808,8 @@ type scrapeLoop struct { appendMetadataToWAL bool metrics *scrapeMetrics + + skipOffsetting bool // For testability. } // scrapeCache tracks mappings of exposed metric strings to label sets and @@ -1076,11 +1082,13 @@ func newScrapeLoop(ctx context.Context, interval time.Duration, timeout time.Duration, scrapeClassicHistograms bool, + enableCTZeroIngestion bool, reportExtraMetrics bool, appendMetadataToWAL bool, target *Target, passMetadataInContext bool, metrics *scrapeMetrics, + skipOffsetting bool, ) *scrapeLoop { if l == nil { l = log.NewNopLogger() @@ -1124,9 +1132,11 @@ func newScrapeLoop(ctx context.Context, interval: interval, timeout: timeout, scrapeClassicHistograms: scrapeClassicHistograms, + enableCTZeroIngestion: enableCTZeroIngestion, reportExtraMetrics: reportExtraMetrics, appendMetadataToWAL: appendMetadataToWAL, metrics: metrics, + skipOffsetting: skipOffsetting, } sl.ctx, sl.cancel = context.WithCancel(ctx) @@ -1134,12 +1144,14 @@ func newScrapeLoop(ctx context.Context, } func (sl *scrapeLoop) run(errc chan<- error) { - select { - case <-time.After(sl.scraper.offset(sl.interval, sl.offsetSeed)): - // Continue after a scraping offset. - case <-sl.ctx.Done(): - close(sl.stopped) - return + if !sl.skipOffsetting { + select { + case <-time.After(sl.scraper.offset(sl.interval, sl.offsetSeed)): + // Continue after a scraping offset. + case <-sl.ctx.Done(): + close(sl.stopped) + return + } } var last time.Time @@ -1557,6 +1569,15 @@ loop: updateMetadata(lset, true) } + if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil { + ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. + // CT is an experimental feature. For now, we don't need to fail the + // scrape on errors updating the created timestamp, log debug. + level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) + } + } + if isHistogram { if h != nil { ref, err = app.AppendHistogram(ref, lset, t, h, nil) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index a9719f9a0..90578f2e9 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -111,14 +111,14 @@ func TestDroppedTargetsList(t *testing.T) { ) sp.Sync(tgs) sp.Sync(tgs) - require.Equal(t, expectedLength, len(sp.droppedTargets)) + require.Len(t, sp.droppedTargets, expectedLength) require.Equal(t, expectedLength, sp.droppedTargetsCount) require.Equal(t, expectedLabelSetString, sp.droppedTargets[0].DiscoveredLabels().String()) // Check that count is still correct when we don't retain all dropped targets. sp.config.KeepDroppedTargets = 1 sp.Sync(tgs) - require.Equal(t, 1, len(sp.droppedTargets)) + require.Len(t, sp.droppedTargets, 1) require.Equal(t, expectedLength, sp.droppedTargetsCount) } @@ -242,11 +242,11 @@ func TestScrapePoolStop(t *testing.T) { } mtx.Lock() - require.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops") + require.Len(t, stopped, numTargets, "Unexpected number of stopped loops") mtx.Unlock() - require.Equal(t, 0, len(sp.activeTargets), "Targets were not cleared on stopping: %d left", len(sp.activeTargets)) - require.Equal(t, 0, len(sp.loops), "Loops were not cleared on stopping: %d left", len(sp.loops)) + require.Empty(t, sp.activeTargets, "Targets were not cleared on stopping: %d left", len(sp.activeTargets)) + require.Empty(t, sp.loops, "Loops were not cleared on stopping: %d left", len(sp.loops)) } func TestScrapePoolReload(t *testing.T) { @@ -333,11 +333,11 @@ func TestScrapePoolReload(t *testing.T) { } mtx.Lock() - require.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops") + require.Len(t, stopped, numTargets, "Unexpected number of stopped loops") mtx.Unlock() require.Equal(t, sp.activeTargets, beforeTargets, "Reloading affected target states unexpectedly") - require.Equal(t, numTargets, len(sp.loops), "Unexpected number of stopped loops after reload") + require.Len(t, sp.loops, numTargets, "Unexpected number of stopped loops after reload") } func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) { @@ -437,10 +437,10 @@ func TestScrapePoolTargetLimit(t *testing.T) { for _, l := range sp.loops { lerr := l.(*testLoop).getForcedError() if shouldErr { - require.NotNil(t, lerr, "error was expected for %d targets with a limit of %d", targets, limit) + require.Error(t, lerr, "error was expected for %d targets with a limit of %d", targets, limit) require.Equal(t, fmt.Sprintf("target_limit exceeded (number of targets: %d, limit: %d)", targets, limit), lerr.Error()) } else { - require.Equal(t, nil, lerr) + require.NoError(t, lerr) } } } @@ -582,8 +582,8 @@ func TestScrapePoolRaces(t *testing.T) { dropped := sp.DroppedTargets() expectedActive, expectedDropped := len(tgts[0].Targets), 0 - require.Equal(t, expectedActive, len(active), "Invalid number of active targets") - require.Equal(t, expectedDropped, len(dropped), "Invalid number of dropped targets") + require.Len(t, active, expectedActive, "Invalid number of active targets") + require.Len(t, dropped, expectedDropped, "Invalid number of dropped targets") for i := 0; i < 20; i++ { time.Sleep(10 * time.Millisecond) @@ -633,7 +633,7 @@ func TestScrapePoolScrapeLoopsStarted(t *testing.T) { })) sp.Sync(tgs) - require.Equal(t, 1, len(sp.loops)) + require.Len(t, sp.loops, 1) wg.Wait() for _, l := range sp.loops { @@ -660,9 +660,11 @@ func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app false, false, false, + false, nil, false, newTestScrapeMetrics(t), + false, ) } @@ -801,9 +803,11 @@ func TestScrapeLoopRun(t *testing.T) { false, false, false, + false, nil, false, scrapeMetrics, + false, ) // The loop must terminate during the initial offset if the context @@ -945,9 +949,11 @@ func TestScrapeLoopMetadata(t *testing.T) { false, false, false, + false, nil, false, scrapeMetrics, + false, ) defer cancel() @@ -1123,7 +1129,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for // each scrape successful or not. - require.Equal(t, 27, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender) + require.Len(t, appender.resultFloats, 27, "Appended samples not as expected:\n%s", appender) require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) @@ -1170,7 +1176,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for // each scrape successful or not. - require.Equal(t, 17, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender) + require.Len(t, appender.resultFloats, 17, "Appended samples not as expected:\n%s", appender) require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) @@ -1237,7 +1243,7 @@ func TestScrapeLoopCache(t *testing.T) { // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for // each scrape successful or not. - require.Equal(t, 26, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender) + require.Len(t, appender.resultFloats, 26, "Appended samples not as expected:\n%s", appender) } func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { @@ -2377,7 +2383,7 @@ func TestTargetScraperScrapeOK(t *testing.T) { runTest(acceptHeader(config.DefaultScrapeProtocols)) protobufParsing = true - runTest(acceptHeader(config.DefaultNativeHistogramScrapeProtocols)) + runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols)) } func TestTargetScrapeScrapeCancel(t *testing.T) { @@ -2529,7 +2535,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) { require.NoError(t, err) _, err = ts.readResponse(context.Background(), resp, &buf) require.NoError(t, err) - require.Equal(t, len(responseBody), buf.Len()) + require.Len(t, responseBody, buf.Len()) // Target response gzip compressed body, scrape without body size limit. gzipResponse = true buf.Reset() @@ -2537,7 +2543,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) { require.NoError(t, err) _, err = ts.readResponse(context.Background(), resp, &buf) require.NoError(t, err) - require.Equal(t, len(responseBody), buf.Len()) + require.Len(t, responseBody, buf.Len()) } // testScraper implements the scraper interface and allows setting values @@ -2642,7 +2648,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { q, err := s.Querier(time.Time{}.UnixNano(), 0) require.NoError(t, err) series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) - require.Equal(t, false, series.Next(), "series found in tsdb") + require.False(t, series.Next(), "series found in tsdb") require.NoError(t, series.Err()) // We add a good metric to check that it is recorded. @@ -2654,9 +2660,9 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { q, err = s.Querier(time.Time{}.UnixNano(), 0) require.NoError(t, err) series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "le", "500")) - require.Equal(t, true, series.Next(), "series not found in tsdb") + require.True(t, series.Next(), "series not found in tsdb") require.NoError(t, series.Err()) - require.Equal(t, false, series.Next(), "more than one series found in tsdb") + require.False(t, series.Next(), "more than one series found in tsdb") } func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { @@ -2684,7 +2690,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { q, err := s.Querier(time.Time{}.UnixNano(), 0) require.NoError(t, err) series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) - require.Equal(t, false, series.Next(), "series found in tsdb") + require.False(t, series.Next(), "series found in tsdb") require.NoError(t, series.Err()) } @@ -2744,14 +2750,14 @@ func TestReusableConfig(t *testing.T) { } for i, m := range match { - require.Equal(t, true, reusableCache(variants[m[0]], variants[m[1]]), "match test %d", i) - require.Equal(t, true, reusableCache(variants[m[1]], variants[m[0]]), "match test %d", i) - require.Equal(t, true, reusableCache(variants[m[1]], variants[m[1]]), "match test %d", i) - require.Equal(t, true, reusableCache(variants[m[0]], variants[m[0]]), "match test %d", i) + require.True(t, reusableCache(variants[m[0]], variants[m[1]]), "match test %d", i) + require.True(t, reusableCache(variants[m[1]], variants[m[0]]), "match test %d", i) + require.True(t, reusableCache(variants[m[1]], variants[m[1]]), "match test %d", i) + require.True(t, reusableCache(variants[m[0]], variants[m[0]]), "match test %d", i) } for i, m := range noMatch { - require.Equal(t, false, reusableCache(variants[m[0]], variants[m[1]]), "not match test %d", i) - require.Equal(t, false, reusableCache(variants[m[1]], variants[m[0]]), "not match test %d", i) + require.False(t, reusableCache(variants[m[0]], variants[m[1]]), "not match test %d", i) + require.False(t, reusableCache(variants[m[1]], variants[m[0]]), "not match test %d", i) } } @@ -3317,7 +3323,7 @@ test_summary_count 199 Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}}, }, }) - require.Equal(t, 1, len(sp.ActiveTargets())) + require.Len(t, sp.ActiveTargets(), 1) select { case <-time.After(5 * time.Second): @@ -3394,7 +3400,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t * // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for // each scrape successful or not. - require.Equal(t, 27, len(appender.resultFloats), "Appended samples not as expected:\n%s", appender) + require.Len(t, appender.resultFloats, 27, "Appended samples not as expected:\n%s", appender) require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected") require.True(t, value.IsStaleNaN(appender.resultFloats[6].f), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f)) @@ -3449,7 +3455,7 @@ func TestScrapeLoopCompression(t *testing.T) { Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}}, }, }) - require.Equal(t, 1, len(sp.ActiveTargets())) + require.Len(t, sp.ActiveTargets(), 1) select { case <-time.After(5 * time.Second): diff --git a/scrape/target_test.go b/scrape/target_test.go index 6631f328c..f37c75a76 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -565,7 +565,7 @@ func TestBucketLimitAppender(t *testing.T) { lbls := labels.FromStrings("__name__", "sparse_histogram_series") var err error if floatHisto { - fh := c.h.Copy().ToFloat() + fh := c.h.Copy().ToFloat(nil) _, err = app.AppendHistogram(0, lbls, ts, nil, fh) if c.expectError { require.Error(t, err) diff --git a/scripts/check-go-mod-version.sh b/scripts/check-go-mod-version.sh new file mode 100755 index 000000000..d651a6203 --- /dev/null +++ b/scripts/check-go-mod-version.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +readarray -t mod_files < <(find . -type f -name go.mod) + +echo "Checking files ${mod_files[@]}" + +matches=$(awk '$1 == "go" {print $2}' "${mod_files[@]}" | sort -u | wc -l) + +if [[ "${matches}" -ne 1 ]]; then + echo 'Not all go.mod files have matching go versions' + exit 1 +fi diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index ffa6b3090..805c59fb7 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -29,4 +29,4 @@ jobs: - name: Lint uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 with: - version: v1.54.2 + version: v1.55.2 diff --git a/storage/buffer.go b/storage/buffer.go index d2d89e042..f686796ca 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -202,7 +202,7 @@ func (s hSample) H() *histogram.Histogram { } func (s hSample) FH() *histogram.FloatHistogram { - return s.h.ToFloat() + return s.h.ToFloat(nil) } func (s hSample) Type() chunkenc.ValueType { @@ -376,7 +376,7 @@ func (it *sampleRingIterator) AtHistogram() (int64, *histogram.Histogram) { func (it *sampleRingIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { if it.fh == nil { - return it.t, it.h.ToFloat() + return it.t, it.h.ToFloat(nil) } return it.t, it.fh } diff --git a/storage/buffer_test.go b/storage/buffer_test.go index c2542f3d9..84f94a008 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -233,7 +233,7 @@ func TestBufferedSeriesIteratorMixedHistograms(t *testing.T) { histograms := tsdbutil.GenerateTestHistograms(2) it := NewBufferIterator(NewListSeriesIterator(samples{ - fhSample{t: 1, fh: histograms[0].ToFloat()}, + fhSample{t: 1, fh: histograms[0].ToFloat(nil)}, hSample{t: 2, h: histograms[1]}, }), 2) @@ -244,11 +244,11 @@ func TestBufferedSeriesIteratorMixedHistograms(t *testing.T) { require.Equal(t, chunkenc.ValFloatHistogram, buf.Next()) _, fh := buf.AtFloatHistogram() - require.Equal(t, histograms[0].ToFloat(), fh) + require.Equal(t, histograms[0].ToFloat(nil), fh) require.Equal(t, chunkenc.ValHistogram, buf.Next()) _, fh = buf.AtFloatHistogram() - require.Equal(t, histograms[1].ToFloat(), fh) + require.Equal(t, histograms[1].ToFloat(nil), fh) } func BenchmarkBufferedSeriesIterator(b *testing.B) { diff --git a/storage/fanout.go b/storage/fanout.go index 33257046f..a9a3f904b 100644 --- a/storage/fanout.go +++ b/storage/fanout.go @@ -202,6 +202,20 @@ func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metada return ref, nil } +func (f *fanoutAppender) AppendCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64) (SeriesRef, error) { + ref, err := f.primary.AppendCTZeroSample(ref, l, t, ct) + if err != nil { + return ref, err + } + + for _, appender := range f.secondaries { + if _, err := appender.AppendCTZeroSample(ref, l, t, ct); err != nil { + return 0, err + } + } + return ref, nil +} + func (f *fanoutAppender) Commit() (err error) { err = f.primary.Commit() diff --git a/storage/fanout_test.go b/storage/fanout_test.go index a99c2f803..913e2fe24 100644 --- a/storage/fanout_test.go +++ b/storage/fanout_test.go @@ -100,7 +100,7 @@ func TestFanout_SelectSorted(t *testing.T) { } require.Equal(t, labelsResult, outputLabel) - require.Equal(t, inputTotalSize, len(result)) + require.Len(t, result, inputTotalSize) }) t.Run("chunk querier", func(t *testing.T) { querier, err := fanoutStorage.ChunkQuerier(0, 8000) @@ -128,7 +128,7 @@ func TestFanout_SelectSorted(t *testing.T) { require.NoError(t, seriesSet.Err()) require.Equal(t, labelsResult, outputLabel) - require.Equal(t, inputTotalSize, len(result)) + require.Len(t, result, inputTotalSize) }) } @@ -178,7 +178,7 @@ func TestFanoutErrors(t *testing.T) { } if tc.warning != nil { - require.Greater(t, len(ss.Warnings()), 0, "warnings expected") + require.NotEmpty(t, ss.Warnings(), "warnings expected") w := ss.Warnings() require.Error(t, w.AsErrors()[0]) require.Equal(t, tc.warning.Error(), w.AsStrings("", 0)[0]) @@ -204,7 +204,7 @@ func TestFanoutErrors(t *testing.T) { } if tc.warning != nil { - require.Greater(t, len(ss.Warnings()), 0, "warnings expected") + require.NotEmpty(t, ss.Warnings(), "warnings expected") w := ss.Warnings() require.Error(t, w.AsErrors()[0]) require.Equal(t, tc.warning.Error(), w.AsStrings("", 0)[0]) diff --git a/storage/interface.go b/storage/interface.go index 2b1b6a63e..675e44c0e 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -43,6 +43,13 @@ var ( ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0") ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled") + + // ErrOutOfOrderCT indicates failed append of CT to the storage + // due to CT being older the then newer sample. + // NOTE(bwplotka): This can be both an instrumentation failure or commonly expected + // behaviour, and we currently don't have a way to determine this. As a result + // it's recommended to ignore this error for now. + ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") ) // SeriesRef is a generic series reference. In prometheus it is either a @@ -237,6 +244,7 @@ type Appender interface { ExemplarAppender HistogramAppender MetadataUpdater + CreatedTimestampAppender } // GetRef is an extra interface on Appenders used by downstream projects @@ -294,6 +302,24 @@ type MetadataUpdater interface { UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) } +// CreatedTimestampAppender provides an interface for appending CT to storage. +type CreatedTimestampAppender interface { + // AppendCTZeroSample adds synthetic zero sample for the given ct timestamp, + // which will be associated with given series, labels and the incoming + // sample's t (timestamp). AppendCTZeroSample returns error if zero sample can't be + // appended, for example when ct is too old, or when it would collide with + // incoming sample (sample has priority). + // + // AppendCTZeroSample has to be called before the corresponding sample Append. + // A series reference number is returned which can be used to modify the + // CT for the given series in the same or later transactions. + // Returned reference numbers are ephemeral and may be rejected in calls + // to AppendCTZeroSample() at any point. + // + // If the reference is 0 it must not be used for caching. + AppendCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64) (SeriesRef, error) +} + // SeriesSet contains a set of series. type SeriesSet interface { Next() bool diff --git a/storage/memoized_iterator_test.go b/storage/memoized_iterator_test.go index 479111380..d1cd56517 100644 --- a/storage/memoized_iterator_test.go +++ b/storage/memoized_iterator_test.go @@ -68,26 +68,26 @@ func TestMemoizedSeriesIterator(t *testing.T) { fSample{t: 400, f: 12}, }), 2) - require.Equal(t, it.Seek(-123), chunkenc.ValFloat, "seek failed") + require.Equal(t, chunkenc.ValFloat, it.Seek(-123), "seek failed") sampleEq(1, 2, nil) prevSampleEq(0, 0, nil, false) - require.Equal(t, it.Seek(5), chunkenc.ValFloat, "seek failed") + require.Equal(t, chunkenc.ValFloat, it.Seek(5), "seek failed") sampleEq(5, 6, nil) prevSampleEq(4, 5, nil, true) // Seek to a histogram sample with a previous float sample. - require.Equal(t, it.Seek(102), chunkenc.ValFloatHistogram, "seek failed") + require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(102), "seek failed") sampleEq(102, 10, tsdbutil.GenerateTestFloatHistogram(0)) prevSampleEq(101, 10, nil, true) // Attempt to seek backwards (no-op). - require.Equal(t, it.Seek(50), chunkenc.ValFloatHistogram, "seek failed") + require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(50), "seek failed") sampleEq(102, 10, tsdbutil.GenerateTestFloatHistogram(0)) prevSampleEq(101, 10, nil, true) // Seek to a float histogram sample with a previous histogram sample. - require.Equal(t, it.Seek(104), chunkenc.ValFloatHistogram, "seek failed") + require.Equal(t, chunkenc.ValFloatHistogram, it.Seek(104), "seek failed") sampleEq(104, 0, tsdbutil.GenerateTestFloatHistogram(2)) prevSampleEq(103, 0, tsdbutil.GenerateTestFloatHistogram(1), true) @@ -101,7 +101,7 @@ func TestMemoizedSeriesIterator(t *testing.T) { sampleEq(400, 12, nil) prevSampleEq(399, 0, tsdbutil.GenerateTestFloatHistogram(6), true) - require.Equal(t, it.Seek(1024), chunkenc.ValNone, "seek succeeded unexpectedly") + require.Equal(t, chunkenc.ValNone, it.Seek(1024), "seek succeeded unexpectedly") } func BenchmarkMemoizedSeriesIterator(b *testing.B) { diff --git a/storage/merge_test.go b/storage/merge_test.go index 25c8fa4a8..02c2a3409 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -1547,7 +1547,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { } require.Subset(t, tcase.expectedWarnings, res.Warnings()) require.Equal(t, tcase.expectedErrs[0], res.Err()) - require.True(t, errors.Is(res.Err(), tcase.expectedErrs[0]), "expected error doesn't match") + require.ErrorIs(t, res.Err(), tcase.expectedErrs[0], "expected error doesn't match") require.Equal(t, tcase.expectedSelectsSeries, lbls) for _, qr := range q.queriers { @@ -1563,7 +1563,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { t.Run("LabelNames", func(t *testing.T) { res, w, err := q.LabelNames(ctx) require.Subset(t, tcase.expectedWarnings, w) - require.True(t, errors.Is(err, tcase.expectedErrs[1]), "expected error doesn't match") + require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match") require.Equal(t, tcase.expectedLabels, res) if err != nil { @@ -1578,7 +1578,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { t.Run("LabelValues", func(t *testing.T) { res, w, err := q.LabelValues(ctx, "test") require.Subset(t, tcase.expectedWarnings, w) - require.True(t, errors.Is(err, tcase.expectedErrs[2]), "expected error doesn't match") + require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match") require.Equal(t, tcase.expectedLabels, res) if err != nil { @@ -1594,7 +1594,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue") res, w, err := q.LabelValues(ctx, "test2", matcher) require.Subset(t, tcase.expectedWarnings, w) - require.True(t, errors.Is(err, tcase.expectedErrs[3]), "expected error doesn't match") + require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match") require.Equal(t, tcase.expectedLabels, res) if err != nil { diff --git a/storage/remote/azuread/azuread_test.go b/storage/remote/azuread/azuread_test.go index ad7ba63a0..5eed2c0b1 100644 --- a/storage/remote/azuread/azuread_test.go +++ b/storage/remote/azuread/azuread_test.go @@ -100,7 +100,7 @@ func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() { ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil) tokenProvider, err := newTokenProvider(c.cfg, ad.mockCredential) - ad.Assert().NoError(err) + ad.Require().NoError(err) rt := &azureADRoundTripper{ next: promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { @@ -113,15 +113,15 @@ func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() { cli := &http.Client{Transport: rt} req, err := http.NewRequest(http.MethodPost, "https://example.com", strings.NewReader("Hello, world!")) - ad.Assert().NoError(err) + ad.Require().NoError(err) _, err = cli.Do(req) - ad.Assert().NoError(err) - ad.Assert().NotNil(gotReq) + ad.Require().NoError(err) + ad.NotNil(gotReq) origReq := gotReq - ad.Assert().NotEmpty(origReq.Header.Get("Authorization")) - ad.Assert().Equal("Bearer "+testTokenString, origReq.Header.Get("Authorization")) + ad.NotEmpty(origReq.Header.Get("Authorization")) + ad.Equal("Bearer "+testTokenString, origReq.Header.Get("Authorization")) } } @@ -258,9 +258,9 @@ func (s *TokenProviderTestSuite) TestNewTokenProvider() { if c.err != "" { actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential) - s.Assert().Nil(actualTokenProvider) - s.Assert().NotNil(actualErr) - s.Assert().ErrorContains(actualErr, c.err) + s.Nil(actualTokenProvider) + s.Require().Error(actualErr) + s.Require().ErrorContains(actualErr, c.err) } else { testToken := &azcore.AccessToken{ Token: testTokenString, @@ -272,21 +272,21 @@ func (s *TokenProviderTestSuite) TestNewTokenProvider() { actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential) - s.Assert().NotNil(actualTokenProvider) - s.Assert().Nil(actualErr) - s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background())) + s.NotNil(actualTokenProvider) + s.Require().NoError(actualErr) + s.NotNil(actualTokenProvider.getAccessToken(context.Background())) // Token set to refresh at half of the expiry time. The test tokens are set to expiry in 5s. // Hence, the 4 seconds wait to check if the token is refreshed. time.Sleep(4 * time.Second) - s.Assert().NotNil(actualTokenProvider.getAccessToken(context.Background())) + s.NotNil(actualTokenProvider.getAccessToken(context.Background())) s.mockCredential.AssertNumberOfCalls(s.T(), "GetToken", 2*mockGetTokenCallCounter) mockGetTokenCallCounter++ accessToken, err := actualTokenProvider.getAccessToken(context.Background()) - s.Assert().Nil(err) - s.Assert().NotEqual(accessToken, testTokenString) + s.Require().NoError(err) + s.NotEqual(testTokenString, accessToken) } } } diff --git a/storage/remote/chunked_test.go b/storage/remote/chunked_test.go index 49a55779f..7c3993ca6 100644 --- a/storage/remote/chunked_test.go +++ b/storage/remote/chunked_test.go @@ -46,7 +46,7 @@ func TestChunkedReaderCanReadFromChunkedWriter(t *testing.T) { for _, msg := range msgs { n, err := w.Write(msg) require.NoError(t, err) - require.Equal(t, len(msg), n) + require.Len(t, msg, n) } i := 0 diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 585bdfd88..d2a7d45be 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -57,7 +57,7 @@ var writeRequestFixture = &prompb.WriteRequest{ }, Samples: []prompb.Sample{{Value: 1, Timestamp: 0}}, Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}}, - Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat())}, + Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))}, }, { Labels: []prompb.Label{ @@ -69,7 +69,7 @@ var writeRequestFixture = &prompb.WriteRequest{ }, Samples: []prompb.Sample{{Value: 2, Timestamp: 1}}, Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}}, - Histograms: []prompb.Histogram{HistogramToHistogramProto(2, &testHistogram), FloatHistogramToHistogramProto(3, testHistogram.ToFloat())}, + Histograms: []prompb.Histogram{HistogramToHistogramProto(2, &testHistogram), FloatHistogramToHistogramProto(3, testHistogram.ToFloat(nil))}, }, }, } @@ -755,7 +755,7 @@ func TestStreamResponse(t *testing.T) { maxBytesInFrame, &sync.Pool{}) require.Nil(t, warning) - require.Nil(t, err) + require.NoError(t, err) expectData := []*prompb.ChunkedSeries{{ Labels: lbs1, Chunks: []prompb.Chunk{chunk, chunk}, diff --git a/storage/remote/intern_test.go b/storage/remote/intern_test.go index 84a74f32c..917c42e91 100644 --- a/storage/remote/intern_test.go +++ b/storage/remote/intern_test.go @@ -32,7 +32,7 @@ func TestIntern(t *testing.T) { interner.intern(testString) interned, ok := interner.pool[testString] - require.Equal(t, true, ok) + require.True(t, ok) require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) } @@ -43,13 +43,13 @@ func TestIntern_MultiRef(t *testing.T) { interner.intern(testString) interned, ok := interner.pool[testString] - require.Equal(t, true, ok) + require.True(t, ok) require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) interner.intern(testString) interned, ok = interner.pool[testString] - require.Equal(t, true, ok) + require.True(t, ok) require.Equal(t, int64(2), interned.refs.Load(), fmt.Sprintf("expected refs to be 2 but it was %d", interned.refs.Load())) } @@ -60,12 +60,12 @@ func TestIntern_DeleteRef(t *testing.T) { interner.intern(testString) interned, ok := interner.pool[testString] - require.Equal(t, true, ok) + require.True(t, ok) require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) interner.release(testString) _, ok = interner.pool[testString] - require.Equal(t, false, ok) + require.False(t, ok) } func TestIntern_MultiRef_Concurrent(t *testing.T) { @@ -74,7 +74,7 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) { interner.intern(testString) interned, ok := interner.pool[testString] - require.Equal(t, true, ok) + require.True(t, ok) require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) go interner.release(testString) @@ -86,6 +86,6 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) { interner.mtx.RLock() interned, ok = interner.pool[testString] interner.mtx.RUnlock() - require.Equal(t, true, ok) + require.True(t, ok) require.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load())) } diff --git a/storage/remote/metadata_watcher_test.go b/storage/remote/metadata_watcher_test.go index bc841a014..cd664bc8b 100644 --- a/storage/remote/metadata_watcher_test.go +++ b/storage/remote/metadata_watcher_test.go @@ -94,7 +94,7 @@ func TestWatchScrapeManager_NotReady(t *testing.T) { } mw := NewMetadataWatcher(nil, smm, "", wt, interval, deadline) - require.Equal(t, false, mw.ready()) + require.False(t, mw.ready()) mw.collect() diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index a141df348..c878c750b 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -188,7 +188,7 @@ func TestMetadataDelivery(t *testing.T) { m.AppendMetadata(context.Background(), metadata) - require.Equal(t, numMetadata, len(c.receivedMetadata)) + require.Len(t, c.receivedMetadata, numMetadata) // One more write than the rounded qoutient should be performed in order to get samples that didn't // fit into MaxSamplesPerSend. require.Equal(t, numMetadata/mcfg.MaxSamplesPerSend+1, c.writesReceived) @@ -318,9 +318,9 @@ func TestSeriesReset(t *testing.T) { } m.StoreSeries(series, i) } - require.Equal(t, numSegments*numSeries, len(m.seriesLabels)) + require.Len(t, m.seriesLabels, numSegments*numSeries) m.SeriesReset(2) - require.Equal(t, numSegments*numSeries/2, len(m.seriesLabels)) + require.Len(t, m.seriesLabels, numSegments*numSeries/2) } func TestReshard(t *testing.T) { @@ -619,7 +619,7 @@ func createHistograms(numSamples, numSeries int, floatHistogram bool) ([]record. fh := record.RefFloatHistogramSample{ Ref: chunks.HeadSeriesRef(i), T: int64(j), - FH: hist.ToFloat(), + FH: hist.ToFloat(nil), } floatHistograms = append(floatHistograms, fh) } else { @@ -1288,7 +1288,7 @@ func TestQueueManagerMetrics(t *testing.T) { // Make sure metrics pass linting. problems, err := client_testutil.GatherAndLint(reg) require.NoError(t, err) - require.Equal(t, 0, len(problems), "Metric linting problems detected: %v", problems) + require.Empty(t, problems, "Metric linting problems detected: %v", problems) // Make sure all metrics were unregistered. A failure here means you need // unregister a metric in `queueManagerMetrics.unregister()`. diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index fdb9f04dd..e83a0cb21 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -97,7 +97,7 @@ func TestSampledReadEndpoint(t *testing.T) { err = proto.Unmarshal(uncompressed, &resp) require.NoError(t, err) - require.Equal(t, 2, len(resp.Results), "Expected 2 results.") + require.Len(t, resp.Results, 2, "Expected 2 results.") require.Equal(t, &prompb.QueryResult{ Timeseries: []*prompb.TimeSeries{ @@ -191,7 +191,7 @@ func BenchmarkStreamReadEndpoint(b *testing.B) { results = append(results, res) } - require.Equal(b, 6, len(results), "Expected 6 results.") + require.Len(b, results, 6, "Expected 6 results.") } } @@ -291,7 +291,7 @@ func TestStreamReadEndpoint(t *testing.T) { results = append(results, res) } - require.Equal(t, 6, len(results), "Expected 6 results.") + require.Len(t, results, 6, "Expected 6 results.") require.Equal(t, []*prompb.ChunkedReadResponse{ { diff --git a/storage/remote/read_test.go b/storage/remote/read_test.go index 54d4825f6..931bacf05 100644 --- a/storage/remote/read_test.go +++ b/storage/remote/read_test.go @@ -186,7 +186,7 @@ func TestSeriesSetFilter(t *testing.T) { filtered := newSeriesSetFilter(FromQueryResult(true, tc.in), tc.toRemove) act, ws, err := ToQueryResult(filtered, 1e6) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, tc.expected, act) } } diff --git a/storage/remote/storage_test.go b/storage/remote/storage_test.go index 040a23a5a..a62cd2da3 100644 --- a/storage/remote/storage_test.go +++ b/storage/remote/storage_test.go @@ -44,10 +44,10 @@ func TestStorageLifecycle(t *testing.T) { require.NoError(t, s.ApplyConfig(conf)) // make sure remote write has a queue. - require.Equal(t, 1, len(s.rws.queues)) + require.Len(t, s.rws.queues, 1) // make sure remote write has a queue. - require.Equal(t, 1, len(s.queryables)) + require.Len(t, s.queryables, 1) err := s.Close() require.NoError(t, err) @@ -62,13 +62,13 @@ func TestUpdateRemoteReadConfigs(t *testing.T) { GlobalConfig: config.GlobalConfig{}, } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 0, len(s.queryables)) + require.Empty(t, s.queryables) conf.RemoteReadConfigs = []*config.RemoteReadConfig{ baseRemoteReadConfig("http://test-storage.com"), } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queryables)) + require.Len(t, s.queryables, 1) err := s.Close() require.NoError(t, err) @@ -85,14 +85,14 @@ func TestFilterExternalLabels(t *testing.T) { }, } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 0, len(s.queryables)) + require.Empty(t, s.queryables) conf.RemoteReadConfigs = []*config.RemoteReadConfig{ baseRemoteReadConfig("http://test-storage.com"), } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queryables)) + require.Len(t, s.queryables, 1) require.Equal(t, 1, s.queryables[0].(*sampleAndChunkQueryableClient).externalLabels.Len()) err := s.Close() @@ -110,7 +110,7 @@ func TestIgnoreExternalLabels(t *testing.T) { }, } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 0, len(s.queryables)) + require.Empty(t, s.queryables) conf.RemoteReadConfigs = []*config.RemoteReadConfig{ baseRemoteReadConfig("http://test-storage.com"), @@ -119,7 +119,7 @@ func TestIgnoreExternalLabels(t *testing.T) { conf.RemoteReadConfigs[0].FilterExternalLabels = false require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queryables)) + require.Len(t, s.queryables, 1) require.Equal(t, 0, s.queryables[0].(*sampleAndChunkQueryableClient).externalLabels.Len()) err := s.Close() diff --git a/storage/remote/write.go b/storage/remote/write.go index 237f8caa9..66455cb4d 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -303,6 +303,11 @@ func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, return 0, nil } +func (t *timestampTracker) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { + // AppendCTZeroSample is no-op for remote-write for now. + return 0, nil +} + // Commit implements storage.Appender. func (t *timestampTracker) Commit() error { t.writeStorage.samplesIn.incr(t.samples + t.exemplars + t.histograms) diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 27d0e9fab..fd5b34ecd 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -134,7 +134,7 @@ func TestOutOfOrderExemplar(t *testing.T) { func TestOutOfOrderHistogram(t *testing.T) { buf, _, err := buildWriteRequest([]prompb.TimeSeries{{ Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}}, - Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat())}, + Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))}, }}, nil, nil, nil) require.NoError(t, err) @@ -228,7 +228,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) { recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) require.Equal(b, http.StatusNoContent, recorder.Code) - require.Equal(b, db.Head().NumSeries(), uint64(1000)) + require.Equal(b, uint64(1000), db.Head().NumSeries()) var bufRequests [][]byte for i := 0; i < 100; i++ { @@ -245,7 +245,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) { recorder = httptest.NewRecorder() handler.ServeHTTP(recorder, req) require.Equal(b, http.StatusNoContent, recorder.Code) - require.Equal(b, db.Head().NumSeries(), uint64(1000)) + require.Equal(b, uint64(1000), db.Head().NumSeries()) } } @@ -339,3 +339,8 @@ func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ // UpdateMetadata is no-op for remote write (where mockAppendable is being used to test) for now. return 0, nil } + +func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { + // AppendCTZeroSample is no-op for remote-write for now. + return 0, nil +} diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go index 824e319c2..4a9a3bafc 100644 --- a/storage/remote/write_test.go +++ b/storage/remote/write_test.go @@ -212,7 +212,7 @@ func TestWriteStorageLifecycle(t *testing.T) { }, } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queues)) + require.Len(t, s.queues, 1) err := s.Close() require.NoError(t, err) @@ -233,14 +233,14 @@ func TestUpdateExternalLabels(t *testing.T) { hash, err := toHash(conf.RemoteWriteConfigs[0]) require.NoError(t, err) require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queues)) - require.Equal(t, 0, len(s.queues[hash].externalLabels)) + require.Len(t, s.queues, 1) + require.Empty(t, s.queues[hash].externalLabels) conf.GlobalConfig.ExternalLabels = externalLabels hash, err = toHash(conf.RemoteWriteConfigs[0]) require.NoError(t, err) require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queues)) + require.Len(t, s.queues, 1) require.Equal(t, []labels.Label{{Name: "external", Value: "true"}}, s.queues[hash].externalLabels) err = s.Close() @@ -262,10 +262,10 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) { require.NoError(t, err) require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queues)) + require.Len(t, s.queues, 1) require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 1, len(s.queues)) + require.Len(t, s.queues, 1) _, hashExists := s.queues[hash] require.True(t, hashExists, "Queue pointer should have remained the same") @@ -312,7 +312,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) { } } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 3, len(s.queues)) + require.Len(t, s.queues, 3) hashes := make([]string, len(conf.RemoteWriteConfigs)) queues := make([]*QueueManager, len(conf.RemoteWriteConfigs)) @@ -334,7 +334,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) { RemoteWriteConfigs: []*config.RemoteWriteConfig{c0, c1, c2}, } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 3, len(s.queues)) + require.Len(t, s.queues, 3) _, hashExists := s.queues[hashes[0]] require.False(t, hashExists, "The queue for the first remote write configuration should have been restarted because the relabel configuration has changed.") @@ -350,7 +350,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) { c1.HTTPClientConfig.BearerToken = "bar" err := s.ApplyConfig(conf) require.NoError(t, err) - require.Equal(t, 3, len(s.queues)) + require.Len(t, s.queues, 3) _, hashExists = s.queues[hashes[0]] require.True(t, hashExists, "Pointer of unchanged queue should have remained the same") @@ -367,7 +367,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) { RemoteWriteConfigs: []*config.RemoteWriteConfig{c1, c2}, } require.NoError(t, s.ApplyConfig(conf)) - require.Equal(t, 2, len(s.queues)) + require.Len(t, s.queues, 2) _, hashExists = s.queues[hashes[0]] require.False(t, hashExists, "If a config is removed, the queue should be stopped and recreated.") @@ -399,9 +399,9 @@ func TestOTLPWriteHandler(t *testing.T) { resp := recorder.Result() require.Equal(t, http.StatusOK, resp.StatusCode) - require.Equal(t, 12, len(appendable.samples)) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count) - require.Equal(t, 1, len(appendable.histograms)) // 1 (exponential histogram) - require.Equal(t, 1, len(appendable.exemplars)) // 1 (exemplar) + require.Len(t, appendable.samples, 12) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count) + require.Len(t, appendable.histograms, 1) // 1 (exponential histogram) + require.Len(t, appendable.exemplars, 1) // 1 (exemplar) } func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest { diff --git a/storage/series_test.go b/storage/series_test.go index ed1e92dd2..6995468b4 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -118,7 +118,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) { j := 0 for iter.Next() == chunkenc.ValFloat { ts, v := iter.At() - require.EqualValues(t, series[i].samples[j], fSample{t: ts, f: v}) + require.EqualValues(t, fSample{t: ts, f: v}, series[i].samples[j]) j++ } } diff --git a/tracing/tracing.go b/tracing/tracing.go index 6ff7dd4c0..d17540a91 100644 --- a/tracing/tracing.go +++ b/tracing/tracing.go @@ -32,6 +32,7 @@ import ( tracesdk "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" "google.golang.org/grpc/credentials" "github.com/prometheus/prometheus/config" @@ -87,7 +88,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { if cfg.TracingConfig.Endpoint == "" { m.config = cfg.TracingConfig m.shutdownFunc = nil - otel.SetTracerProvider(trace.NewNoopTracerProvider()) + otel.SetTracerProvider(noop.NewTracerProvider()) level.Info(m.logger).Log("msg", "Tracing provider uninstalled.") return nil } diff --git a/tracing/tracing_test.go b/tracing/tracing_test.go index 7f0b5d62e..b7996c610 100644 --- a/tracing/tracing_test.go +++ b/tracing/tracing_test.go @@ -20,7 +20,7 @@ import ( config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" "github.com/prometheus/prometheus/config" ) @@ -105,7 +105,7 @@ func TestUninstallingTracerProvider(t *testing.T) { } require.NoError(t, m.ApplyConfig(&cfg)) - require.NotEqual(t, trace.NewNoopTracerProvider(), otel.GetTracerProvider()) + require.NotEqual(t, noop.NewTracerProvider(), otel.GetTracerProvider()) // Uninstall by passing empty config. cfg2 := config.Config{ @@ -114,7 +114,7 @@ func TestUninstallingTracerProvider(t *testing.T) { require.NoError(t, m.ApplyConfig(&cfg2)) // Make sure we get a no-op tracer provider after uninstallation. - require.Equal(t, trace.NewNoopTracerProvider(), otel.GetTracerProvider()) + require.Equal(t, noop.NewTracerProvider(), otel.GetTracerProvider()) } func TestTracerProviderShutdown(t *testing.T) { @@ -131,5 +131,5 @@ func TestTracerProviderShutdown(t *testing.T) { // Check if we closed the done channel. _, ok := <-m.done - require.Equal(t, ok, false) + require.False(t, ok) } diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 6afef1389..d39989713 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -359,7 +359,7 @@ func (db *DB) replayWAL() error { start := time.Now() dir, startFrom, err := wlog.LastCheckpoint(db.wal.Dir()) - if err != nil && err != record.ErrNotFound { + if err != nil && !errors.Is(err, record.ErrNotFound) { return fmt.Errorf("find last checkpoint: %w", err) } @@ -962,6 +962,11 @@ func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Met return 0, nil } +func (a *appender) AppendCTZeroSample(storage.SeriesRef, labels.Labels, int64, int64) (storage.SeriesRef, error) { + // TODO(bwplotka): Wire metadata in the Agent's appender. + return 0, nil +} + // Commit submits the collected samples and purges the batch. func (a *appender) Commit() error { if err := a.log(); err != nil { diff --git a/tsdb/block.go b/tsdb/block.go index a586536b1..e2562de03 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -17,6 +17,7 @@ package tsdb import ( "context" "encoding/json" + "errors" "fmt" "io" "os" @@ -26,7 +27,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" @@ -479,14 +479,19 @@ func (r blockIndexReader) SortedLabelValues(ctx context.Context, name string, ma slices.Sort(st) } } - - return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + if err != nil { + return st, fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err) + } + return st, nil } func (r blockIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { if len(matchers) == 0 { st, err := r.ir.LabelValues(ctx, name) - return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + if err != nil { + return st, fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err) + } + return st, nil } return labelValuesWithMatchers(ctx, r.ir, name, matchers...) @@ -503,7 +508,7 @@ func (r blockIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Ma func (r blockIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) { p, err := r.ir.Postings(ctx, name, values...) if err != nil { - return p, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + return p, fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err) } return p, nil } @@ -514,7 +519,7 @@ func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings { func (r blockIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { if err := r.ir.Series(ref, builder, chks); err != nil { - return errors.Wrapf(err, "block: %s", r.b.Meta().ULID) + return fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err) } return nil } @@ -566,7 +571,7 @@ func (pb *Block) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Mat p, err := PostingsForMatchers(ctx, pb.indexr, ms...) if err != nil { - return errors.Wrap(err, "select series") + return fmt.Errorf("select series: %w", err) } ir := pb.indexr @@ -654,12 +659,12 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, er func (pb *Block) Snapshot(dir string) error { blockDir := filepath.Join(dir, pb.meta.ULID.String()) if err := os.MkdirAll(blockDir, 0o777); err != nil { - return errors.Wrap(err, "create snapshot block dir") + return fmt.Errorf("create snapshot block dir: %w", err) } chunksDir := chunkDir(blockDir) if err := os.MkdirAll(chunksDir, 0o777); err != nil { - return errors.Wrap(err, "create snapshot chunk dir") + return fmt.Errorf("create snapshot chunk dir: %w", err) } // Hardlink meta, index and tombstones @@ -669,7 +674,7 @@ func (pb *Block) Snapshot(dir string) error { tombstones.TombstonesFilename, } { if err := os.Link(filepath.Join(pb.dir, fname), filepath.Join(blockDir, fname)); err != nil { - return errors.Wrapf(err, "create snapshot %s", fname) + return fmt.Errorf("create snapshot %s: %w", fname, err) } } @@ -677,13 +682,13 @@ func (pb *Block) Snapshot(dir string) error { curChunkDir := chunkDir(pb.dir) files, err := os.ReadDir(curChunkDir) if err != nil { - return errors.Wrap(err, "ReadDir the current chunk dir") + return fmt.Errorf("ReadDir the current chunk dir: %w", err) } for _, f := range files { err := os.Link(filepath.Join(curChunkDir, f.Name()), filepath.Join(chunksDir, f.Name())) if err != nil { - return errors.Wrap(err, "hardlink a chunk") + return fmt.Errorf("hardlink a chunk: %w", err) } } diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 46e6ecf84..7858e6b0c 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -59,14 +59,14 @@ func TestSetCompactionFailed(t *testing.T) { blockDir := createBlock(t, tmpdir, genSeries(1, 1, 0, 1)) b, err := OpenBlock(nil, blockDir, nil) require.NoError(t, err) - require.Equal(t, false, b.meta.Compaction.Failed) + require.False(t, b.meta.Compaction.Failed) require.NoError(t, b.setCompactionFailed()) - require.Equal(t, true, b.meta.Compaction.Failed) + require.True(t, b.meta.Compaction.Failed) require.NoError(t, b.Close()) b, err = OpenBlock(nil, blockDir, nil) require.NoError(t, err) - require.Equal(t, true, b.meta.Compaction.Failed) + require.True(t, b.meta.Compaction.Failed) require.NoError(t, b.Close()) } @@ -166,7 +166,7 @@ func TestCorruptedChunk(t *testing.T) { require.NoError(t, err) n, err := f.Write([]byte("x")) require.NoError(t, err) - require.Equal(t, n, 1) + require.Equal(t, 1, n) }, iterErr: errors.New("cannot populate chunk 8 from block 00000000000000000000000000: checksum mismatch expected:cfc0526c, actual:34815eae"), }, @@ -178,7 +178,7 @@ func TestCorruptedChunk(t *testing.T) { blockDir := createBlock(t, tmpdir, []storage.Series{series}) files, err := sequenceFiles(chunkDir(blockDir)) require.NoError(t, err) - require.Greater(t, len(files), 0, "No chunk created.") + require.NotEmpty(t, files, "No chunk created.") f, err := os.OpenFile(files[0], os.O_RDWR, 0o666) require.NoError(t, err) @@ -224,7 +224,7 @@ func TestLabelValuesWithMatchers(t *testing.T) { blockDir := createBlock(t, tmpdir, seriesEntries) files, err := sequenceFiles(chunkDir(blockDir)) require.NoError(t, err) - require.Greater(t, len(files), 0, "No chunk created.") + require.NotEmpty(t, files, "No chunk created.") // Check open err. block, err := OpenBlock(nil, blockDir, nil) @@ -352,16 +352,14 @@ func TestReadIndexFormatV1(t *testing.T) { q, err := NewBlockQuerier(block, 0, 1000) require.NoError(t, err) - require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")), - map[string][]chunks.Sample{`{foo="bar"}`: {sample{t: 1, f: 2}}}) + require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: {sample{t: 1, f: 2}}}, query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))) q, err = NewBlockQuerier(block, 0, 1000) require.NoError(t, err) - require.Equal(t, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$")), - map[string][]chunks.Sample{ - `{foo="bar"}`: {sample{t: 1, f: 2}}, - `{foo="baz"}`: {sample{t: 3, f: 4}}, - }) + require.Equal(t, map[string][]chunks.Sample{ + `{foo="bar"}`: {sample{t: 1, f: 2}}, + `{foo="baz"}`: {sample{t: 3, f: 4}}, + }, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$"))) } func BenchmarkLabelValuesWithMatchers(b *testing.B) { @@ -383,7 +381,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) { blockDir := createBlock(b, tmpdir, seriesEntries) files, err := sequenceFiles(chunkDir(blockDir)) require.NoError(b, err) - require.Greater(b, len(files), 0, "No chunk created.") + require.NotEmpty(b, files, "No chunk created.") // Check open err. block, err := OpenBlock(nil, blockDir, nil) @@ -402,7 +400,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) { for benchIdx := 0; benchIdx < b.N; benchIdx++ { actualValues, err := indexReader.LabelValues(ctx, "b_tens", matchers...) require.NoError(b, err) - require.Equal(b, 9, len(actualValues)) + require.Len(b, actualValues, 9) } } @@ -436,7 +434,7 @@ func TestLabelNamesWithMatchers(t *testing.T) { blockDir := createBlock(t, tmpdir, seriesEntries) files, err := sequenceFiles(chunkDir(blockDir)) require.NoError(t, err) - require.Greater(t, len(files), 0, "No chunk created.") + require.NotEmpty(t, files, "No chunk created.") // Check open err. block, err := OpenBlock(nil, blockDir, nil) @@ -658,7 +656,7 @@ func genHistogramSeries(totalSeries, labelCount int, mint, maxt, step int64, flo h.CounterResetHint = histogram.NotCounterReset } if floatHistogram { - return sample{t: ts, fh: h.ToFloat()} + return sample{t: ts, fh: h.ToFloat(nil)} } return sample{t: ts, h: h} }) @@ -694,7 +692,7 @@ func genHistogramAndFloatSeries(totalSeries, labelCount int, mint, maxt, step in h.CounterResetHint = histogram.NotCounterReset } if floatHistogram { - s = sample{t: ts, fh: h.ToFloat()} + s = sample{t: ts, fh: h.ToFloat(nil)} } else { s = sample{t: ts, h: h} } diff --git a/tsdb/blockwriter.go b/tsdb/blockwriter.go index 0d017e095..73bc5f1e3 100644 --- a/tsdb/blockwriter.go +++ b/tsdb/blockwriter.go @@ -15,13 +15,14 @@ package tsdb import ( "context" + "errors" + "fmt" "math" "os" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/storage" @@ -65,7 +66,7 @@ func NewBlockWriter(logger log.Logger, dir string, blockSize int64) (*BlockWrite func (w *BlockWriter) initHead() error { chunkDir, err := os.MkdirTemp(os.TempDir(), "head") if err != nil { - return errors.Wrap(err, "create temp dir") + return fmt.Errorf("create temp dir: %w", err) } w.chunkDir = chunkDir opts := DefaultHeadOptions() @@ -74,7 +75,7 @@ func (w *BlockWriter) initHead() error { opts.EnableNativeHistograms.Store(true) h, err := NewHead(nil, w.logger, nil, nil, opts, NewHeadStats()) if err != nil { - return errors.Wrap(err, "tsdb.NewHead") + return fmt.Errorf("tsdb.NewHead: %w", err) } w.head = h @@ -102,11 +103,11 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { []int64{w.blockSize}, chunkenc.NewPool(), nil) if err != nil { - return ulid.ULID{}, errors.Wrap(err, "create leveled compactor") + return ulid.ULID{}, fmt.Errorf("create leveled compactor: %w", err) } id, err := compactor.Write(w.destinationDir, w.head, mint, maxt, nil) if err != nil { - return ulid.ULID{}, errors.Wrap(err, "compactor write") + return ulid.ULID{}, fmt.Errorf("compactor write: %w", err) } return id, nil diff --git a/tsdb/chunkenc/chunk_test.go b/tsdb/chunkenc/chunk_test.go index 3d22f74cc..9db1bf364 100644 --- a/tsdb/chunkenc/chunk_test.go +++ b/tsdb/chunkenc/chunk_test.go @@ -14,6 +14,7 @@ package chunkenc import ( + "errors" "fmt" "io" "math/rand" @@ -153,8 +154,8 @@ func benchmarkIterator(b *testing.B, newChunk func() Chunk) { res = v i++ } - if it.Err() != io.EOF { - require.NoError(b, it.Err()) + if err := it.Err(); err != nil && !errors.Is(err, io.EOF) { + require.NoError(b, err) } _ = res } diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go index 33a4bbf1c..6f5a95fb1 100644 --- a/tsdb/chunkenc/float_histogram_test.go +++ b/tsdb/chunkenc/float_histogram_test.go @@ -94,10 +94,10 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) { }, NegativeBuckets: []int64{2, 1, -1, -1}, // counts: 2, 3, 2, 1 (total 8) } - chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.ToFloat(), false) + chk, _, app, err := app.AppendFloatHistogram(nil, ts, h.ToFloat(nil), false) require.NoError(t, err) require.Nil(t, chk) - exp = append(exp, floatResult{t: ts, h: h.ToFloat()}) + exp = append(exp, floatResult{t: ts, h: h.ToFloat(nil)}) require.Equal(t, 1, c.NumSamples()) // Add an updated histogram. @@ -108,10 +108,10 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) { h.Sum = 24.4 h.PositiveBuckets = []int64{5, -2, 1, -2} // counts: 5, 3, 4, 2 (total 14) h.NegativeBuckets = []int64{4, -1, 1, -1} // counts: 4, 3, 4, 4 (total 15) - chk, _, _, err = app.AppendFloatHistogram(nil, ts, h.ToFloat(), false) + chk, _, _, err = app.AppendFloatHistogram(nil, ts, h.ToFloat(nil), false) require.NoError(t, err) require.Nil(t, chk) - expH := h.ToFloat() + expH := h.ToFloat(nil) expH.CounterResetHint = histogram.NotCounterReset exp = append(exp, floatResult{t: ts, h: expH}) require.Equal(t, 2, c.NumSamples()) @@ -127,10 +127,10 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) { h.Sum = 24.4 h.PositiveBuckets = []int64{6, 1, -3, 6} // counts: 6, 7, 4, 10 (total 27) h.NegativeBuckets = []int64{5, 1, -2, 3} // counts: 5, 6, 4, 7 (total 22) - chk, _, _, err = app.AppendFloatHistogram(nil, ts, h.ToFloat(), false) + chk, _, _, err = app.AppendFloatHistogram(nil, ts, h.ToFloat(nil), false) require.NoError(t, err) require.Nil(t, chk) - expH = h.ToFloat() + expH = h.ToFloat(nil) expH.CounterResetHint = histogram.NotCounterReset exp = append(exp, floatResult{t: ts, h: expH}) require.Equal(t, 3, c.NumSamples()) @@ -217,7 +217,7 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) { NegativeBuckets: []int64{1}, } - chk, _, app, err := app.AppendFloatHistogram(nil, ts1, h1.ToFloat(), false) + chk, _, app, err := app.AppendFloatHistogram(nil, ts1, h1.ToFloat(nil), false) require.NoError(t, err) require.Nil(t, chk) require.Equal(t, 1, c.NumSamples()) @@ -245,13 +245,13 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) { h2.NegativeBuckets = []int64{2, -1} // 2 1 (total 3) // This is how span changes will be handled. hApp, _ := app.(*FloatHistogramAppender) - posInterjections, negInterjections, ok, cr := hApp.appendable(h2.ToFloat()) - require.Greater(t, len(posInterjections), 0) - require.Greater(t, len(negInterjections), 0) + posInterjections, negInterjections, ok, cr := hApp.appendable(h2.ToFloat(nil)) + require.NotEmpty(t, posInterjections) + require.NotEmpty(t, negInterjections) require.True(t, ok) // Only new buckets came in. require.False(t, cr) c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans) - chk, _, _, err = app.AppendFloatHistogram(nil, ts2, h2.ToFloat(), false) + chk, _, _, err = app.AppendFloatHistogram(nil, ts2, h2.ToFloat(nil), false) require.NoError(t, err) require.Nil(t, chk) require.Equal(t, 2, c.NumSamples()) @@ -263,10 +263,10 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) { h1.PositiveBuckets = []int64{6, -3, -3, 3, -3, 0, 2, 2, 1, -5, 1} h1.NegativeSpans = h2.NegativeSpans h1.NegativeBuckets = []int64{0, 1} - expH2 := h2.ToFloat() + expH2 := h2.ToFloat(nil) expH2.CounterResetHint = histogram.NotCounterReset exp := []floatResult{ - {t: ts1, h: h1.ToFloat()}, + {t: ts1, h: h1.ToFloat(nil)}, {t: ts2, h: expH2}, } it := c.Iterator(nil) @@ -348,8 +348,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Greater(t, len(posInterjections), 0) - require.Equal(t, 0, len(negInterjections)) + require.NotEmpty(t, posInterjections) + require.Empty(t, negInterjections) require.True(t, ok) // Only new buckets came in. require.False(t, cr) @@ -370,8 +370,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []float64{6, 3, 2, 4, 5, 1} posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -385,8 +385,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1} posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -406,8 +406,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0} posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -433,8 +433,8 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []float64{1, 2, 5, 3, 3, 2, 4, 5, 1} posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -810,10 +810,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) @@ -834,10 +834,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []float64{6, 3, 3, 2, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Len(t, pI, 0) - require.Len(t, nI, 0) - require.Greater(t, len(pBackwardI), 0) - require.Len(t, nBackwardI, 0) + require.Empty(t, pI) + require.Empty(t, nI) + require.NotEmpty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) @@ -856,10 +856,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []float64{6, 3, 2, 4, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Greater(t, len(pBackwardI), 0) - require.Len(t, nI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.NotEmpty(t, pBackwardI) + require.Empty(t, nI) + require.Empty(t, nBackwardI) require.True(t, ok) assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) @@ -872,10 +872,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Len(t, pI, 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.Empty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) @@ -894,10 +894,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) @@ -920,10 +920,10 @@ func TestFloatHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []float64{1, 2, 5, 3, 3, 2, 4, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, GaugeType) diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index 3b6efb811..53aee89db 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -98,7 +98,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) { chk, _, app, err := app.AppendHistogram(nil, ts, h, false) require.NoError(t, err) require.Nil(t, chk) - exp = append(exp, result{t: ts, h: h, fh: h.ToFloat()}) + exp = append(exp, result{t: ts, h: h, fh: h.ToFloat(nil)}) require.Equal(t, 1, c.NumSamples()) // Add an updated histogram. @@ -114,7 +114,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) { require.Nil(t, chk) hExp := h.Copy() hExp.CounterResetHint = histogram.NotCounterReset - exp = append(exp, result{t: ts, h: hExp, fh: hExp.ToFloat()}) + exp = append(exp, result{t: ts, h: hExp, fh: hExp.ToFloat(nil)}) require.Equal(t, 2, c.NumSamples()) // Add update with new appender. @@ -133,7 +133,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) { require.Nil(t, chk) hExp = h.Copy() hExp.CounterResetHint = histogram.NotCounterReset - exp = append(exp, result{t: ts, h: hExp, fh: hExp.ToFloat()}) + exp = append(exp, result{t: ts, h: hExp, fh: hExp.ToFloat(nil)}) require.Equal(t, 3, c.NumSamples()) // 1. Expand iterator in simple case. @@ -257,8 +257,8 @@ func TestHistogramChunkBucketChanges(t *testing.T) { // This is how span changes will be handled. hApp, _ := app.(*HistogramAppender) posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Greater(t, len(posInterjections), 0) - require.Greater(t, len(negInterjections), 0) + require.NotEmpty(t, posInterjections) + require.NotEmpty(t, negInterjections) require.True(t, ok) // Only new buckets came in. require.False(t, cr) c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans) @@ -278,8 +278,8 @@ func TestHistogramChunkBucketChanges(t *testing.T) { hExp := h2.Copy() hExp.CounterResetHint = histogram.NotCounterReset exp := []result{ - {t: ts1, h: h1, fh: h1.ToFloat()}, - {t: ts2, h: hExp, fh: hExp.ToFloat()}, + {t: ts1, h: h1, fh: h1.ToFloat(nil)}, + {t: ts2, h: hExp, fh: hExp.ToFloat(nil)}, } it := c.Iterator(nil) var act []result @@ -365,8 +365,8 @@ func TestHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30) posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Greater(t, len(posInterjections), 0) - require.Equal(t, 0, len(negInterjections)) + require.NotEmpty(t, posInterjections) + require.Empty(t, negInterjections) require.True(t, ok) // Only new buckets came in. require.False(t, cr) @@ -386,8 +386,8 @@ func TestHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []int64{6, -3, -1, 2, 1, -4} // counts: 6, 3, 2, 4, 5, 1 (total 21) posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -401,8 +401,8 @@ func TestHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // counts: 6, 2, 3, 2, 4, 5, 1 (total 23) posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -425,8 +425,8 @@ func TestHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 0} // 7 5 1 3 1 0 2 5 5 0 0 (total 29) posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -455,8 +455,8 @@ func TestHistogramChunkAppendable(t *testing.T) { h2.PositiveBuckets = []int64{1, 1, 3, -2, 0, -1, 2, 1, -4} // counts: 1, 2, 5, 3, 3, 2, 4, 5, 1 (total 26) posInterjections, negInterjections, ok, cr := hApp.appendable(h2) - require.Equal(t, 0, len(posInterjections)) - require.Equal(t, 0, len(negInterjections)) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -987,10 +987,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // {7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) @@ -1015,10 +1015,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []int64{6, -3, 0, -1, 3, -4} // {6, 3, 3, 2, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Len(t, pI, 0) - require.Len(t, nI, 0) - require.Greater(t, len(pBackwardI), 0) - require.Len(t, nBackwardI, 0) + require.Empty(t, pI) + require.Empty(t, nI) + require.NotEmpty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) @@ -1041,10 +1041,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []int64{6, -3, -1, 2, 1, -4} // {6, 3, 2, 4, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Greater(t, len(pBackwardI), 0) - require.Len(t, nI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.NotEmpty(t, pBackwardI) + require.Empty(t, nI) + require.Empty(t, nBackwardI) require.True(t, ok) newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) @@ -1061,10 +1061,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // {6, 2, 3, 2, 4, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Len(t, pI, 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.Empty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) @@ -1087,10 +1087,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 0} // {7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) @@ -1117,10 +1117,10 @@ func TestHistogramChunkAppendableGauge(t *testing.T) { h2.PositiveBuckets = []int64{1, 1, 3, -2, 0, -1, 2, 1, -4} // {1, 2, 5, 3, 3, 2, 4, 5, 1} pI, nI, pBackwardI, nBackwardI, _, _, ok := hApp.appendableGauge(h2) - require.Greater(t, len(pI), 0) - require.Len(t, nI, 0) - require.Len(t, pBackwardI, 0) - require.Len(t, nBackwardI, 0) + require.NotEmpty(t, pI) + require.Empty(t, nI) + require.Empty(t, pBackwardI) + require.Empty(t, nBackwardI) require.True(t, ok) newc, recoded, _, err := hApp.AppendHistogram(nil, ts+1, h2, false) diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go index b495b6182..12c3e7b90 100644 --- a/tsdb/chunks/head_chunks.go +++ b/tsdb/chunks/head_chunks.go @@ -111,6 +111,10 @@ func (e *CorruptionErr) Error() string { return fmt.Errorf("corruption in head chunk file %s: %w", segmentFile(e.Dir, e.FileIndex), e.Err).Error() } +func (e *CorruptionErr) Unwrap() error { + return e.Err +} + // chunkPos keeps track of the position in the head chunk files. // chunkPos is not thread-safe, a lock must be used to protect it. type chunkPos struct { @@ -400,7 +404,7 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro buf := make([]byte, MagicChunksSize) size, err := f.Read(buf) - if err != nil && err != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return files, fmt.Errorf("failed to read magic number during last head chunk file repair: %w", err) } if err := f.Close(); err != nil { @@ -892,7 +896,8 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu // Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding. chkEnc = cdm.RemoveMasks(chkEnc) if err := f(seriesRef, chunkRef, mint, maxt, numSamples, chkEnc, isOOO); err != nil { - if cerr, ok := err.(*CorruptionErr); ok { + var cerr *CorruptionErr + if errors.As(err, &cerr) { cerr.Dir = cdm.dir.Name() cerr.FileIndex = segID return cerr diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index 68c133088..4a4d89e81 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -128,7 +128,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) { } // Checking on-disk bytes for the first file. - require.Equal(t, 3, len(hrw.mmappedChunkFiles), "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles)) + require.Len(t, hrw.mmappedChunkFiles, 3, "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles)) require.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers)) actualBytes, err := os.ReadFile(firstFileName) @@ -173,7 +173,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) { idx++ return nil })) - require.Equal(t, len(expectedData), idx) + require.Len(t, expectedData, idx) } // TestChunkDiskMapper_Truncate tests @@ -214,7 +214,7 @@ func TestChunkDiskMapper_Truncate(t *testing.T) { for _, i := range remainingFiles { _, ok := hrw.mmappedChunkFiles[i] - require.Equal(t, true, ok) + require.True(t, ok) } } @@ -471,7 +471,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) { nonEmptyFile() // 2. nonEmptyFile() // 3. - require.Equal(t, 3, len(hrw.mmappedChunkFiles)) + require.Len(t, hrw.mmappedChunkFiles, 3) lastFile := 0 for idx := range hrw.mmappedChunkFiles { if idx > lastFile { @@ -500,7 +500,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) { hrw = createChunkDiskMapper(t, dir) // Removed from memory. - require.Equal(t, 3, len(hrw.mmappedChunkFiles)) + require.Len(t, hrw.mmappedChunkFiles, 3) for idx := range hrw.mmappedChunkFiles { require.LessOrEqual(t, idx, lastFile, "file index is bigger than previous last file") } @@ -508,7 +508,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) { // Removed even from disk. files, err := os.ReadDir(dir) require.NoError(t, err) - require.Equal(t, 3, len(files)) + require.Len(t, files, 3) for _, fi := range files { seq, err := strconv.ParseUint(fi.Name(), 10, 64) require.NoError(t, err) diff --git a/tsdb/chunks/queue_test.go b/tsdb/chunks/queue_test.go index e678a040b..5756e4585 100644 --- a/tsdb/chunks/queue_test.go +++ b/tsdb/chunks/queue_test.go @@ -29,33 +29,33 @@ func (q *writeJobQueue) assertInvariants(t *testing.T) { totalSize := 0 for s := q.first; s != nil; s = s.nextSegment { - require.True(t, s.segment != nil) + require.NotNil(t, s.segment) // Next read index is lower or equal than next write index (we cannot past written jobs) - require.True(t, s.nextRead <= s.nextWrite) + require.LessOrEqual(t, s.nextRead, s.nextWrite) // Number of unread elements in this segment. totalSize += s.nextWrite - s.nextRead // First segment can be partially read, other segments were not read yet. if s == q.first { - require.True(t, s.nextRead >= 0) + require.GreaterOrEqual(t, s.nextRead, 0) } else { - require.True(t, s.nextRead == 0) + require.Equal(t, 0, s.nextRead) } // If first shard is empty (everything was read from it already), it must have extra capacity for // additional elements, otherwise it would have been removed. if s == q.first && s.nextRead == s.nextWrite { - require.True(t, s.nextWrite < len(s.segment)) + require.Less(t, s.nextWrite, len(s.segment)) } // Segments in the middle are full. if s != q.first && s != q.last { - require.True(t, s.nextWrite == len(s.segment)) + require.Len(t, s.segment, s.nextWrite) } // Last segment must have at least one element, or we wouldn't have created it. - require.True(t, s.nextWrite > 0) + require.Greater(t, s.nextWrite, 0) } require.Equal(t, q.size, totalSize) @@ -307,7 +307,7 @@ func TestQueuePushPopManyGoroutines(t *testing.T) { readersWG.Wait() // Check if we have all expected values - require.Equal(t, writeGoroutines*writes, len(refs)) + require.Len(t, refs, writeGoroutines*writes) } func TestQueueSegmentIsKeptEvenIfEmpty(t *testing.T) { diff --git a/tsdb/compact.go b/tsdb/compact.go index 32c88d2cc..7b4ff9c1b 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -16,6 +16,7 @@ package tsdb import ( "context" "crypto/rand" + "errors" "fmt" "io" "os" @@ -25,7 +26,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "golang.org/x/exp/slices" @@ -485,7 +485,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, if !errors.Is(err, context.Canceled) { for _, b := range bs { if err := b.setCompactionFailed(); err != nil { - errs.Add(errors.Wrapf(err, "setting compaction failed for block: %s", b.Dir())) + errs.Add(fmt.Errorf("setting compaction failed for block: %s: %w", b.Dir(), err)) } } } @@ -586,7 +586,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl chunkw, err = chunks.NewWriterWithSegSize(chunkDir(tmp), c.maxBlockChunkSegmentSize) if err != nil { - return errors.Wrap(err, "open chunk writer") + return fmt.Errorf("open chunk writer: %w", err) } closers = append(closers, chunkw) // Record written chunk sizes on level 1 compactions. @@ -601,12 +601,12 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl indexw, err := index.NewWriter(c.ctx, filepath.Join(tmp, indexFilename)) if err != nil { - return errors.Wrap(err, "open index writer") + return fmt.Errorf("open index writer: %w", err) } closers = append(closers, indexw) if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw); err != nil { - return errors.Wrap(err, "populate block") + return fmt.Errorf("populate block: %w", err) } select { @@ -634,17 +634,17 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl } if _, err = writeMetaFile(c.logger, tmp, meta); err != nil { - return errors.Wrap(err, "write merged meta") + return fmt.Errorf("write merged meta: %w", err) } // Create an empty tombstones file. if _, err := tombstones.WriteFile(c.logger, tmp, tombstones.NewMemTombstones()); err != nil { - return errors.Wrap(err, "write new tombstones file") + return fmt.Errorf("write new tombstones file: %w", err) } df, err := fileutil.OpenDir(tmp) if err != nil { - return errors.Wrap(err, "open temporary block dir") + return fmt.Errorf("open temporary block dir: %w", err) } defer func() { if df != nil { @@ -653,18 +653,18 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl }() if err := df.Sync(); err != nil { - return errors.Wrap(err, "sync temporary dir file") + return fmt.Errorf("sync temporary dir file: %w", err) } // Close temp dir before rename block dir (for windows platform). if err = df.Close(); err != nil { - return errors.Wrap(err, "close temporary dir") + return fmt.Errorf("close temporary dir: %w", err) } df = nil // Block successfully written, make it visible in destination dir by moving it from tmp one. if err := fileutil.Replace(tmp, dir); err != nil { - return errors.Wrap(err, "rename block dir") + return fmt.Errorf("rename block dir: %w", err) } return nil @@ -693,7 +693,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa defer func() { errs := tsdb_errors.NewMulti(err) if cerr := tsdb_errors.CloseAll(closers); cerr != nil { - errs.Add(errors.Wrap(cerr, "close")) + errs.Add(fmt.Errorf("close: %w", cerr)) } err = errs.Err() metrics.PopulatingBlocks.Set(0) @@ -721,19 +721,19 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa indexr, err := b.Index() if err != nil { - return errors.Wrapf(err, "open index reader for block %+v", b.Meta()) + return fmt.Errorf("open index reader for block %+v: %w", b.Meta(), err) } closers = append(closers, indexr) chunkr, err := b.Chunks() if err != nil { - return errors.Wrapf(err, "open chunk reader for block %+v", b.Meta()) + return fmt.Errorf("open chunk reader for block %+v: %w", b.Meta(), err) } closers = append(closers, chunkr) tombsr, err := b.Tombstones() if err != nil { - return errors.Wrapf(err, "open tombstone reader for block %+v", b.Meta()) + return fmt.Errorf("open tombstone reader for block %+v: %w", b.Meta(), err) } closers = append(closers, tombsr) @@ -755,11 +755,11 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa for symbols.Next() { if err := indexw.AddSymbol(symbols.At()); err != nil { - return errors.Wrap(err, "add symbol") + return fmt.Errorf("add symbol: %w", err) } } - if symbols.Err() != nil { - return errors.Wrap(symbols.Err(), "next symbol") + if err := symbols.Err(); err != nil { + return fmt.Errorf("next symbol: %w", err) } var ( @@ -791,8 +791,8 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa // chunk file purposes. chks = append(chks, chksIter.At()) } - if chksIter.Err() != nil { - return errors.Wrap(chksIter.Err(), "chunk iter") + if err := chksIter.Err(); err != nil { + return fmt.Errorf("chunk iter: %w", err) } // Skip the series with all deleted chunks. @@ -801,10 +801,10 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa } if err := chunkw.WriteChunks(chks...); err != nil { - return errors.Wrap(err, "write chunks") + return fmt.Errorf("write chunks: %w", err) } if err := indexw.AddSeries(ref, s.Labels(), chks...); err != nil { - return errors.Wrap(err, "add series") + return fmt.Errorf("add series: %w", err) } meta.Stats.NumChunks += uint64(len(chks)) @@ -815,13 +815,13 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa for _, chk := range chks { if err := chunkPool.Put(chk.Chunk); err != nil { - return errors.Wrap(err, "put chunk") + return fmt.Errorf("put chunk: %w", err) } } ref++ } - if set.Err() != nil { - return errors.Wrap(set.Err(), "iterate compaction set") + if err := set.Err(); err != nil { + return fmt.Errorf("iterate compaction set: %w", err) } return nil diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 94b35e3b4..f33bb73c1 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -15,6 +15,7 @@ package tsdb import ( "context" + "errors" "fmt" "math" "math/rand" @@ -27,7 +28,6 @@ import ( "github.com/go-kit/log" "github.com/oklog/ulid" - "github.com/pkg/errors" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/require" @@ -1222,7 +1222,7 @@ func TestDisableAutoCompactions(t *testing.T) { } require.Greater(t, prom_testutil.ToFloat64(db.metrics.compactionsSkipped), 0.0, "No compaction was skipped after the set timeout.") - require.Equal(t, 0, len(db.blocks)) + require.Empty(t, db.blocks) // Enable the compaction, trigger it and check that the block is persisted. db.EnableCompactions() @@ -1236,7 +1236,7 @@ func TestDisableAutoCompactions(t *testing.T) { } time.Sleep(100 * time.Millisecond) } - require.Greater(t, len(db.Blocks()), 0, "No block was persisted after the set timeout.") + require.NotEmpty(t, db.Blocks(), "No block was persisted after the set timeout.") } // TestCancelCompactions ensures that when the db is closed @@ -1259,7 +1259,7 @@ func TestCancelCompactions(t *testing.T) { { db, err := open(tmpdir, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) require.NoError(t, err) - require.Equal(t, 3, len(db.Blocks()), "initial block count mismatch") + require.Len(t, db.Blocks(), 3, "initial block count mismatch") require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial compaction counter mismatch") db.compactc <- struct{}{} // Trigger a compaction. for prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.PopulatingBlocks) <= 0 { @@ -1278,7 +1278,7 @@ func TestCancelCompactions(t *testing.T) { { db, err := open(tmpdirCopy, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000}, nil) require.NoError(t, err) - require.Equal(t, 3, len(db.Blocks()), "initial block count mismatch") + require.Len(t, db.Blocks(), 3, "initial block count mismatch") require.Equal(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "initial compaction counter mismatch") db.compactc <- struct{}{} // Trigger a compaction. @@ -1291,7 +1291,7 @@ func TestCancelCompactions(t *testing.T) { actT := time.Since(start) expT := timeCompactionUninterrupted / 2 // Closing the db in the middle of compaction should less than half the time. - require.True(t, actT < expT, "closing the db took more than expected. exp: <%v, act: %v", expT, actT) + require.Less(t, actT, expT, "closing the db took more than expected. exp: <%v, act: %v", expT, actT) // Make sure that no blocks were marked as compaction failed. // This checks that the `context.Canceled` error is properly checked at all levels: @@ -1402,8 +1402,8 @@ func TestHeadCompactionWithHistograms(t *testing.T) { for tsMinute := from; tsMinute <= to; tsMinute++ { var err error if floatTest { - _, err = app.AppendHistogram(0, lbls, minute(tsMinute), nil, h.ToFloat()) - efh := h.ToFloat() + _, err = app.AppendHistogram(0, lbls, minute(tsMinute), nil, h.ToFloat(nil)) + efh := h.ToFloat(nil) if tsMinute == from { efh.CounterResetHint = histogram.UnknownCounterReset } else { diff --git a/tsdb/db.go b/tsdb/db.go index 2e3801a9e..b2cc37a19 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -16,6 +16,7 @@ package tsdb import ( "context" + "errors" "fmt" "io" "io/fs" @@ -30,7 +31,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "golang.org/x/exp/slices" @@ -70,20 +70,19 @@ var ErrNotReady = errors.New("TSDB not ready") // millisecond precision timestamps. func DefaultOptions() *Options { return &Options{ - WALSegmentSize: wlog.DefaultSegmentSize, - MaxBlockChunkSegmentSize: chunks.DefaultChunkSegmentSize, - RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), - MinBlockDuration: DefaultBlockDuration, - MaxBlockDuration: DefaultBlockDuration, - NoLockfile: false, - AllowOverlappingCompaction: true, - SamplesPerChunk: DefaultSamplesPerChunk, - WALCompression: wlog.CompressionNone, - StripeSize: DefaultStripeSize, - HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, - IsolationDisabled: defaultIsolationDisabled, - HeadChunksWriteQueueSize: chunks.DefaultWriteQueueSize, - OutOfOrderCapMax: DefaultOutOfOrderCapMax, + WALSegmentSize: wlog.DefaultSegmentSize, + MaxBlockChunkSegmentSize: chunks.DefaultChunkSegmentSize, + RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), + MinBlockDuration: DefaultBlockDuration, + MaxBlockDuration: DefaultBlockDuration, + NoLockfile: false, + SamplesPerChunk: DefaultSamplesPerChunk, + WALCompression: wlog.CompressionNone, + StripeSize: DefaultStripeSize, + HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, + IsolationDisabled: defaultIsolationDisabled, + HeadChunksWriteQueueSize: chunks.DefaultWriteQueueSize, + OutOfOrderCapMax: DefaultOutOfOrderCapMax, } } @@ -115,14 +114,6 @@ type Options struct { // NoLockfile disables creation and consideration of a lock file. NoLockfile bool - // Compaction of overlapping blocks are allowed if AllowOverlappingCompaction is true. - // This is an optional flag for overlapping blocks. - // The reason why this flag exists is because there are various users of the TSDB - // that do not want vertical compaction happening on ingest time. Instead, - // they'd rather keep overlapping blocks and let another component do the overlapping compaction later. - // For Prometheus, this will always be true. - AllowOverlappingCompaction bool - // WALCompression configures the compression type to use on records in the WAL. WALCompression wlog.CompressionType @@ -386,7 +377,7 @@ type DBReadOnly struct { // OpenDBReadOnly opens DB in the given directory for read only operations. func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) { if _, err := os.Stat(dir); err != nil { - return nil, errors.Wrap(err, "opening the db dir") + return nil, fmt.Errorf("opening the db dir: %w", err) } if l == nil { @@ -407,7 +398,7 @@ func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) { func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) { blockReaders, err := db.Blocks() if err != nil { - return errors.Wrap(err, "read blocks") + return fmt.Errorf("read blocks: %w", err) } maxBlockTime := int64(math.MinInt64) if len(blockReaders) > 0 { @@ -432,15 +423,16 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) { return err } defer func() { - returnErr = tsdb_errors.NewMulti( - returnErr, - errors.Wrap(head.Close(), "closing Head"), - ).Err() + errs := tsdb_errors.NewMulti(returnErr) + if err := head.Close(); err != nil { + errs.Add(fmt.Errorf("closing Head: %w", err)) + } + returnErr = errs.Err() }() // Set the min valid time for the ingested wal samples // to be no lower than the maxt of the last block. if err := head.Init(maxBlockTime); err != nil { - return errors.Wrap(err, "read WAL") + return fmt.Errorf("read WAL: %w", err) } mint := head.MinTime() maxt := head.MaxTime() @@ -454,12 +446,15 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) { nil, ) if err != nil { - return errors.Wrap(err, "create leveled compactor") + return fmt.Errorf("create leveled compactor: %w", err) } // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. _, err = compactor.Write(dir, rh, mint, maxt+1, nil) - return errors.Wrap(err, "writing WAL") + if err != nil { + return fmt.Errorf("writing WAL: %w", err) + } + return nil } func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQueryable, error) { @@ -518,7 +513,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue // Set the min valid time for the ingested wal samples // to be no lower than the maxt of the last block. if err := head.Init(maxBlockTime); err != nil { - return nil, errors.Wrap(err, "read WAL") + return nil, fmt.Errorf("read WAL: %w", err) } // Set the wal to nil to disable all wal operations. // This is mainly to avoid blocking when closing the head. @@ -580,7 +575,9 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { } errs := tsdb_errors.NewMulti() for ulid, err := range corrupted { - errs.Add(errors.Wrapf(err, "corrupted block %s", ulid.String())) + if err != nil { + errs.Add(fmt.Errorf("corrupted block %s: %w", ulid.String(), err)) + } } return nil, errs.Err() } @@ -761,7 +758,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs // Fixup bad format written by Prometheus 2.1. if err := repairBadIndexVersion(l, dir); err != nil { - return nil, errors.Wrap(err, "repair bad index version") + return nil, fmt.Errorf("repair bad index version: %w", err) } walDir := filepath.Join(dir, "wal") @@ -769,12 +766,12 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs // Migrate old WAL if one exists. if err := MigrateWAL(l, walDir); err != nil { - return nil, errors.Wrap(err, "migrate WAL") + return nil, fmt.Errorf("migrate WAL: %w", err) } for _, tmpDir := range []string{walDir, dir} { // Remove tmp dirs. if err := removeBestEffortTmpDirs(l, tmpDir); err != nil { - return nil, errors.Wrap(err, "remove tmp dirs") + return nil, fmt.Errorf("remove tmp dirs: %w", err) } } @@ -797,11 +794,11 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } close(db.donec) // DB is never run if it was an error, so close this channel here. - - returnedErr = tsdb_errors.NewMulti( - returnedErr, - errors.Wrap(db.Close(), "close DB after failed startup"), - ).Err() + errs := tsdb_errors.NewMulti(returnedErr) + if err := db.Close(); err != nil { + errs.Add(fmt.Errorf("close DB after failed startup: %w", err)) + } + returnedErr = errs.Err() }() if db.blocksToDelete == nil { @@ -823,7 +820,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs db.compactor, err = NewLeveledCompactorWithChunkSize(ctx, r, l, rngs, db.chunkPool, opts.MaxBlockChunkSegmentSize, nil) if err != nil { cancel() - return nil, errors.Wrap(err, "create leveled compactor") + return nil, fmt.Errorf("create leveled compactor: %w", err) } db.compactCancel = cancel @@ -905,17 +902,17 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs if initErr := db.head.Init(minValidTime); initErr != nil { db.head.metrics.walCorruptionsTotal.Inc() - e, ok := initErr.(*errLoadWbl) - if ok { + var e *errLoadWbl + if errors.As(initErr, &e) { level.Warn(db.logger).Log("msg", "Encountered WBL read error, attempting repair", "err", initErr) if err := wbl.Repair(e.err); err != nil { - return nil, errors.Wrap(err, "repair corrupted WBL") + return nil, fmt.Errorf("repair corrupted WBL: %w", err) } level.Info(db.logger).Log("msg", "Successfully repaired WBL") } else { level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr) if err := wal.Repair(initErr); err != nil { - return nil, errors.Wrap(err, "repair corrupted WAL") + return nil, fmt.Errorf("repair corrupted WAL: %w", err) } level.Info(db.logger).Log("msg", "Successfully repaired WAL") } @@ -1131,10 +1128,11 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { lastBlockMaxt := int64(math.MinInt64) defer func() { - returnErr = tsdb_errors.NewMulti( - returnErr, - errors.Wrap(db.head.truncateWAL(lastBlockMaxt), "WAL truncation in Compact defer"), - ).Err() + errs := tsdb_errors.NewMulti(returnErr) + if err := db.head.truncateWAL(lastBlockMaxt); err != nil { + errs.Add(fmt.Errorf("WAL truncation in Compact defer: %w", err)) + } + returnErr = errs.Err() }() start := time.Now() @@ -1168,7 +1166,7 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { db.head.WaitForAppendersOverlapping(rh.MaxTime()) if err := db.compactHead(rh); err != nil { - return errors.Wrap(err, "compact head") + return fmt.Errorf("compact head: %w", err) } // Consider only successful compactions for WAL truncation. lastBlockMaxt = maxt @@ -1177,7 +1175,7 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { // Clear some disk space before compacting blocks, especially important // when Head compaction happened over a long time range. if err := db.head.truncateWAL(lastBlockMaxt); err != nil { - return errors.Wrap(err, "WAL truncation in Compact") + return fmt.Errorf("WAL truncation in Compact: %w", err) } compactionDuration := time.Since(start) @@ -1192,7 +1190,7 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { if lastBlockMaxt != math.MinInt64 { // The head was compacted, so we compact OOO head as well. if err := db.compactOOOHead(ctx); err != nil { - return errors.Wrap(err, "compact ooo head") + return fmt.Errorf("compact ooo head: %w", err) } } @@ -1205,11 +1203,11 @@ func (db *DB) CompactHead(head *RangeHead) error { defer db.cmtx.Unlock() if err := db.compactHead(head); err != nil { - return errors.Wrap(err, "compact head") + return fmt.Errorf("compact head: %w", err) } if err := db.head.truncateWAL(head.BlockMaxTime()); err != nil { - return errors.Wrap(err, "WAL truncation") + return fmt.Errorf("WAL truncation: %w", err) } return nil } @@ -1228,12 +1226,12 @@ func (db *DB) compactOOOHead(ctx context.Context) error { } oooHead, err := NewOOOCompactionHead(ctx, db.head) if err != nil { - return errors.Wrap(err, "get ooo compaction head") + return fmt.Errorf("get ooo compaction head: %w", err) } ulids, err := db.compactOOO(db.dir, oooHead) if err != nil { - return errors.Wrap(err, "compact ooo head") + return fmt.Errorf("compact ooo head: %w", err) } if err := db.reloadBlocks(); err != nil { errs := tsdb_errors.NewMulti(err) @@ -1242,7 +1240,7 @@ func (db *DB) compactOOOHead(ctx context.Context) error { errs.Add(errRemoveAll) } } - return errors.Wrap(errs.Err(), "reloadBlocks blocks after failed compact ooo head") + return fmt.Errorf("reloadBlocks blocks after failed compact ooo head: %w", errs.Err()) } lastWBLFile, minOOOMmapRef := oooHead.LastWBLFile(), oooHead.LastMmapRef() @@ -1262,7 +1260,7 @@ func (db *DB) compactOOOHead(ctx context.Context) error { } if err := db.head.truncateOOO(lastWBLFile, minOOOMmapRef); err != nil { - return errors.Wrap(err, "truncate ooo wbl") + return fmt.Errorf("truncate ooo wbl: %w", err) } } @@ -1298,12 +1296,12 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID blockDir := filepath.Join(dest, uid.String()) meta, _, err := readMetaFile(blockDir) if err != nil { - return ulids, errors.Wrap(err, "read meta") + return ulids, fmt.Errorf("read meta: %w", err) } meta.Compaction.SetOutOfOrder() _, err = writeMetaFile(db.logger, blockDir, meta) if err != nil { - return ulids, errors.Wrap(err, "write meta") + return ulids, fmt.Errorf("write meta: %w", err) } } } @@ -1329,20 +1327,20 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID func (db *DB) compactHead(head *RangeHead) error { uid, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil) if err != nil { - return errors.Wrap(err, "persist head block") + return fmt.Errorf("persist head block: %w", err) } if err := db.reloadBlocks(); err != nil { if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil { return tsdb_errors.NewMulti( - errors.Wrap(err, "reloadBlocks blocks"), - errors.Wrapf(errRemoveAll, "delete persisted head block after failed db reloadBlocks:%s", uid), + fmt.Errorf("reloadBlocks blocks: %w", err), + fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll), ).Err() } - return errors.Wrap(err, "reloadBlocks blocks") + return fmt.Errorf("reloadBlocks blocks: %w", err) } if err = db.head.truncateMemory(head.BlockMaxTime()); err != nil { - return errors.Wrap(err, "head memory truncate") + return fmt.Errorf("head memory truncate: %w", err) } return nil } @@ -1354,7 +1352,7 @@ func (db *DB) compactBlocks() (err error) { for { plan, err := db.compactor.Plan(db.dir) if err != nil { - return errors.Wrap(err, "plan compaction") + return fmt.Errorf("plan compaction: %w", err) } if len(plan) == 0 { break @@ -1368,14 +1366,14 @@ func (db *DB) compactBlocks() (err error) { uid, err := db.compactor.Compact(db.dir, plan, db.blocks) if err != nil { - return errors.Wrapf(err, "compact %s", plan) + return fmt.Errorf("compact %s: %w", plan, err) } if err := db.reloadBlocks(); err != nil { if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil { - return errors.Wrapf(err, "delete compacted block after failed db reloadBlocks:%s", uid) + return fmt.Errorf("delete compacted block after failed db reloadBlocks:%s: %w", uid, err) } - return errors.Wrap(err, "reloadBlocks blocks") + return fmt.Errorf("reloadBlocks blocks: %w", err) } } @@ -1396,14 +1394,14 @@ func getBlock(allBlocks []*Block, id ulid.ULID) (*Block, bool) { // reload reloads blocks and truncates the head and its WAL. func (db *DB) reload() error { if err := db.reloadBlocks(); err != nil { - return errors.Wrap(err, "reloadBlocks") + return fmt.Errorf("reloadBlocks: %w", err) } maxt, ok := db.inOrderBlocksMaxTime() if !ok { return nil } if err := db.head.Truncate(maxt); err != nil { - return errors.Wrap(err, "head truncate") + return fmt.Errorf("head truncate: %w", err) } return nil } @@ -1457,7 +1455,9 @@ func (db *DB) reloadBlocks() (err error) { } errs := tsdb_errors.NewMulti() for ulid, err := range corrupted { - errs.Add(errors.Wrapf(err, "corrupted block %s", ulid.String())) + if err != nil { + errs.Add(fmt.Errorf("corrupted block %s: %w", ulid.String(), err)) + } } return errs.Err() } @@ -1509,7 +1509,7 @@ func (db *DB) reloadBlocks() (err error) { } } if err := db.deleteBlocks(deletable); err != nil { - return errors.Wrapf(err, "delete %v blocks", len(deletable)) + return fmt.Errorf("delete %v blocks: %w", len(deletable), err) } return nil } @@ -1517,7 +1517,7 @@ func (db *DB) reloadBlocks() (err error) { func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { bDirs, err := blockDirs(dir) if err != nil { - return nil, nil, errors.Wrap(err, "find blocks") + return nil, nil, fmt.Errorf("find blocks: %w", err) } corrupted = make(map[ulid.ULID]error) @@ -1651,16 +1651,16 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { // Noop. continue case err != nil: - return errors.Wrapf(err, "stat dir %v", toDelete) + return fmt.Errorf("stat dir %v: %w", toDelete, err) } // Replace atomically to avoid partial block when process would crash during deletion. tmpToDelete := filepath.Join(db.dir, fmt.Sprintf("%s%s", ulid, tmpForDeletionBlockDirSuffix)) if err := fileutil.Replace(toDelete, tmpToDelete); err != nil { - return errors.Wrapf(err, "replace of obsolete block for deletion %s", ulid) + return fmt.Errorf("replace of obsolete block for deletion %s: %w", ulid, err) } if err := os.RemoveAll(tmpToDelete); err != nil { - return errors.Wrapf(err, "delete obsolete block %s", ulid) + return fmt.Errorf("delete obsolete block %s: %w", ulid, err) } level.Info(db.logger).Log("msg", "Deleting obsolete block", "block", ulid) } @@ -1868,7 +1868,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { level.Info(db.logger).Log("msg", "Snapshotting block", "block", b) if err := b.Snapshot(dir); err != nil { - return errors.Wrapf(err, "error snapshotting block: %s", b.Dir()) + return fmt.Errorf("error snapshotting block: %s: %w", b.Dir(), err) } } if !withHead { @@ -1881,7 +1881,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. if _, err := db.compactor.Write(dir, head, mint, maxt+1, nil); err != nil { - return errors.Wrap(err, "snapshot head block") + return fmt.Errorf("snapshot head block: %w", err) } return nil } @@ -1916,7 +1916,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { var err error inOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open block querier for head %s", rh) + return nil, fmt.Errorf("open block querier for head %s: %w", rh, err) } // Getting the querier above registers itself in the queue that the truncation waits on. @@ -1925,7 +1925,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt) if shouldClose { if err := inOrderHeadQuerier.Close(); err != nil { - return nil, errors.Wrapf(err, "closing head block querier %s", rh) + return nil, fmt.Errorf("closing head block querier %s: %w", rh, err) } inOrderHeadQuerier = nil } @@ -1933,7 +1933,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { rh := NewRangeHead(db.head, newMint, maxt) inOrderHeadQuerier, err = NewBlockQuerier(rh, newMint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open block querier for head while getting new querier %s", rh) + return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err) } } @@ -1950,7 +1950,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { // If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead. rh.isoState.Close() - return nil, errors.Wrapf(err, "open block querier for ooo head %s", rh) + return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err) } blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) @@ -1959,7 +1959,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { for _, b := range blocks { q, err := NewBlockQuerier(b, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for block %s", b) + return nil, fmt.Errorf("open querier for block %s: %w", b, err) } blockQueriers = append(blockQueriers, q) } @@ -1997,7 +1997,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer rh := NewRangeHead(db.head, mint, maxt) inOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for head %s", rh) + return nil, fmt.Errorf("open querier for head %s: %w", rh, err) } // Getting the querier above registers itself in the queue that the truncation waits on. @@ -2006,7 +2006,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt) if shouldClose { if err := inOrderHeadQuerier.Close(); err != nil { - return nil, errors.Wrapf(err, "closing head querier %s", rh) + return nil, fmt.Errorf("closing head querier %s: %w", rh, err) } inOrderHeadQuerier = nil } @@ -2014,7 +2014,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer rh := NewRangeHead(db.head, newMint, maxt) inOrderHeadQuerier, err = NewBlockChunkQuerier(rh, newMint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for head while getting new querier %s", rh) + return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err) } } @@ -2027,7 +2027,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) outOfOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open block chunk querier for ooo head %s", rh) + return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err) } blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) @@ -2036,7 +2036,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer for _, b := range blocks { q, err := NewBlockChunkQuerier(b, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for block %s", b) + return nil, fmt.Errorf("open querier for block %s: %w", b, err) } blockQueriers = append(blockQueriers, q) } @@ -2105,7 +2105,7 @@ func (db *DB) CleanTombstones() (err error) { for _, pb := range db.Blocks() { uid, safeToDelete, cleanErr := pb.CleanTombstones(db.Dir(), db.compactor) if cleanErr != nil { - return errors.Wrapf(cleanErr, "clean tombstones: %s", pb.Dir()) + return fmt.Errorf("clean tombstones: %s: %w", pb.Dir(), cleanErr) } if !safeToDelete { // There was nothing to clean. @@ -2133,7 +2133,10 @@ func (db *DB) CleanTombstones() (err error) { level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) } } - return errors.Wrap(err, "reload blocks") + if err != nil { + return fmt.Errorf("reload blocks: %w", err) + } + return nil } } return nil diff --git a/tsdb/db_test.go b/tsdb/db_test.go index f602f5ee9..3bc094a3d 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -34,7 +34,6 @@ import ( "github.com/go-kit/log" "github.com/oklog/ulid" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/require" @@ -127,7 +126,7 @@ func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[str result[name] = samples } require.NoError(t, ss.Err()) - require.Equal(t, 0, len(ss.Warnings())) + require.Empty(t, ss.Warnings()) return result } @@ -177,7 +176,7 @@ func queryChunks(t testing.TB, q storage.ChunkQuerier, matchers ...*labels.Match result[name] = chks } require.NoError(t, ss.Err()) - require.Equal(t, 0, len(ss.Warnings())) + require.Empty(t, ss.Warnings()) return result } @@ -200,7 +199,7 @@ func TestDB_reloadOrder(t *testing.T) { require.NoError(t, db.reloadBlocks()) blocks := db.Blocks() - require.Equal(t, 3, len(blocks)) + require.Len(t, blocks, 3) require.Equal(t, metas[1].MinTime, blocks[0].Meta().MinTime) require.Equal(t, metas[1].MaxTime, blocks[0].Meta().MaxTime) require.Equal(t, metas[0].MinTime, blocks[1].Meta().MinTime) @@ -355,7 +354,7 @@ func TestDBAppenderAddRef(t *testing.T) { // Missing labels & invalid refs should fail. _, err = app2.Append(9999999, labels.EmptyLabels(), 1, 1) - require.Equal(t, ErrInvalidSample, errors.Cause(err)) + require.ErrorIs(t, err, ErrInvalidSample) require.NoError(t, app2.Commit()) @@ -471,7 +470,7 @@ Outer: require.Equal(t, eok, rok) if !eok { - require.Equal(t, 0, len(res.Warnings())) + require.Empty(t, res.Warnings()) continue Outer } sexp := expss.At() @@ -519,7 +518,7 @@ func TestAmendHistogramDatapointCausesError(t *testing.T) { }, PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, } - fh := h.ToFloat() + fh := h.ToFloat(nil) app = db.Appender(ctx) _, err = app.AppendHistogram(0, labels.FromStrings("a", "c"), 0, h.Copy(), nil) @@ -679,7 +678,7 @@ func TestDB_Snapshot(t *testing.T) { require.NoError(t, series.Err()) } require.NoError(t, seriesSet.Err()) - require.Equal(t, 0, len(seriesSet.Warnings())) + require.Empty(t, seriesSet.Warnings()) require.Equal(t, 1000.0, sum) } @@ -728,7 +727,7 @@ func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) { require.NoError(t, series.Err()) } require.NoError(t, seriesSet.Err()) - require.Equal(t, 0, len(seriesSet.Warnings())) + require.Empty(t, seriesSet.Warnings()) // Since we snapshotted with MaxTime - 10, so expect 10 less samples. require.Equal(t, 1000.0-10, sum) @@ -804,7 +803,7 @@ Outer: require.Equal(t, eok, rok) if !eok { - require.Equal(t, 0, len(res.Warnings())) + require.Empty(t, res.Warnings()) continue Outer } sexp := expss.At() @@ -972,7 +971,7 @@ func TestDB_e2e(t *testing.T) { } require.NoError(t, ss.Err()) - require.Equal(t, 0, len(ss.Warnings())) + require.Empty(t, ss.Warnings()) require.Equal(t, expected, result) q.Close() @@ -1004,7 +1003,7 @@ func TestWALFlushedOnDBClose(t *testing.T) { values, ws, err := q.LabelValues(ctx, "labelname") require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, []string{"labelvalue"}, values) } @@ -1041,7 +1040,7 @@ func TestWALSegmentSizeOptions(t *testing.T) { files = append(files, fi) } } - require.Greater(t, len(files), 1, "current WALSegmentSize should result in more than a single WAL file.") + require.NotEmpty(t, files, "current WALSegmentSize should result in more than a single WAL file.") // All the full segment files (all but the last) should match the segment size option. for _, f := range files[:len(files)-1] { require.Equal(t, int64(segmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name()) @@ -1254,7 +1253,7 @@ func TestTombstoneClean(t *testing.T) { require.Equal(t, errExp, errRes) require.Equal(t, smplExp, smplRes) } - require.Equal(t, 0, len(res.Warnings())) + require.Empty(t, res.Warnings()) for _, b := range db.Blocks() { require.Equal(t, tombstones.NewMemTombstones(), b.tombstones) @@ -1302,7 +1301,7 @@ func TestTombstoneCleanResultEmptyBlock(t *testing.T) { // After cleaning tombstones that covers the entire block, no blocks should be left behind. actualBlockDirs, err := blockDirs(db.dir) require.NoError(t, err) - require.Equal(t, 0, len(actualBlockDirs)) + require.Empty(t, actualBlockDirs) } // TestTombstoneCleanFail tests that a failing TombstoneClean doesn't leave any blocks behind. @@ -1348,7 +1347,7 @@ func TestTombstoneCleanFail(t *testing.T) { require.NoError(t, err) // Only one block should have been replaced by a new block. require.Equal(t, len(oldBlockDirs), len(actualBlockDirs)) - require.Equal(t, len(intersection(oldBlockDirs, actualBlockDirs)), len(actualBlockDirs)-1) + require.Len(t, intersection(oldBlockDirs, actualBlockDirs), len(actualBlockDirs)-1) } // TestTombstoneCleanRetentionLimitsRace tests that a CleanTombstones operation @@ -1640,9 +1639,9 @@ func TestSizeRetention(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, actRetentionCount, "metric retention count mismatch") - require.Equal(t, actSize, expSize, "metric db size doesn't match actual disk size") + require.Equal(t, expSize, actSize, "metric db size doesn't match actual disk size") require.LessOrEqual(t, expSize, sizeLimit, "actual size (%v) is expected to be less than or equal to limit (%v)", expSize, sizeLimit) - require.Equal(t, len(blocks)-1, len(actBlocks), "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1) + require.Len(t, actBlocks, len(blocks)-1, "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1) require.Equal(t, expBlocks[0].MaxTime, actBlocks[0].meta.MaxTime, "maxT mismatch of the first block") require.Equal(t, expBlocks[len(expBlocks)-1].MaxTime, actBlocks[len(actBlocks)-1].meta.MaxTime, "maxT mismatch of the last block") } @@ -1666,7 +1665,7 @@ func TestSizeRetentionMetric(t *testing.T) { }() actMaxBytes := int64(prom_testutil.ToFloat64(db.metrics.maxBytes)) - require.Equal(t, actMaxBytes, c.expMaxBytes, "metric retention limit bytes mismatch") + require.Equal(t, c.expMaxBytes, actMaxBytes, "metric retention limit bytes mismatch") } } @@ -1739,7 +1738,7 @@ func TestNotMatcherSelectsLabelsUnsetSeries(t *testing.T) { ss := q.Select(ctx, false, nil, c.selector...) lres, _, ws, err := expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, c.series, lres) } } @@ -1772,7 +1771,7 @@ func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) { metas[i] = BlockMeta{MinTime: int64(i * 10), MaxTime: int64((i + 1) * 10)} } - require.Equal(t, 0, len(OverlappingBlocks(metas)), "we found unexpected overlaps") + require.Empty(t, OverlappingBlocks(metas), "we found unexpected overlaps") // Add overlapping blocks. We've to establish order again since we aren't interested // in trivial overlaps caused by unorderedness. @@ -2071,7 +2070,7 @@ func TestNoEmptyBlocks(t *testing.T) { actBlocks, err := blockDirs(db.Dir()) require.NoError(t, err) require.Equal(t, len(db.Blocks()), len(actBlocks)) - require.Equal(t, 0, len(actBlocks)) + require.Empty(t, actBlocks) require.Equal(t, 0, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "no compaction should be triggered here") }) @@ -2091,7 +2090,7 @@ func TestNoEmptyBlocks(t *testing.T) { actBlocks, err := blockDirs(db.Dir()) require.NoError(t, err) require.Equal(t, len(db.Blocks()), len(actBlocks)) - require.Equal(t, 0, len(actBlocks)) + require.Empty(t, actBlocks) app = db.Appender(ctx) _, err = app.Append(0, defaultLabel, 1, 0) @@ -2112,7 +2111,7 @@ func TestNoEmptyBlocks(t *testing.T) { actBlocks, err = blockDirs(db.Dir()) require.NoError(t, err) require.Equal(t, len(db.Blocks()), len(actBlocks)) - require.Equal(t, 1, len(actBlocks), "No blocks created when compacting with >0 samples") + require.Len(t, actBlocks, 1, "No blocks created when compacting with >0 samples") }) t.Run(`When no new block is created from head, and there are some blocks on disk @@ -2144,8 +2143,8 @@ func TestNoEmptyBlocks(t *testing.T) { } oldBlocks := db.Blocks() - require.NoError(t, db.reloadBlocks()) // Reload the db to register the new blocks. - require.Equal(t, len(blocks)+len(oldBlocks), len(db.Blocks())) // Ensure all blocks are registered. + require.NoError(t, db.reloadBlocks()) // Reload the db to register the new blocks. + require.Len(t, db.Blocks(), len(blocks)+len(oldBlocks)) // Ensure all blocks are registered. require.NoError(t, db.Delete(ctx, math.MinInt64, math.MaxInt64, defaultMatcher)) require.NoError(t, db.Compact(ctx)) require.Equal(t, 5, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here once for each block that have tombstones") @@ -2153,7 +2152,7 @@ func TestNoEmptyBlocks(t *testing.T) { actBlocks, err := blockDirs(db.Dir()) require.NoError(t, err) require.Equal(t, len(db.Blocks()), len(actBlocks)) - require.Equal(t, 1, len(actBlocks), "All samples are deleted. Only the most recent block should remain after compaction.") + require.Len(t, actBlocks, 1, "All samples are deleted. Only the most recent block should remain after compaction.") }) } @@ -2254,7 +2253,7 @@ func TestDB_LabelNames(t *testing.T) { var ws annotations.Annotations labelNames, ws, err = q.LabelNames(ctx) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.NoError(t, q.Close()) require.Equal(t, tst.exp2, labelNames) } @@ -2283,7 +2282,7 @@ func TestCorrectNumTombstones(t *testing.T) { err := db.Compact(ctx) require.NoError(t, err) - require.Equal(t, 1, len(db.blocks)) + require.Len(t, db.blocks, 1) require.NoError(t, db.Delete(ctx, 0, 1, defaultMatcher)) require.Equal(t, uint64(1), db.blocks[0].meta.Stats.NumTombstones) @@ -2346,7 +2345,7 @@ func TestBlockRanges(t *testing.T) { } time.Sleep(100 * time.Millisecond) } - require.Equal(t, 2, len(db.Blocks()), "no new block created after the set timeout") + require.Len(t, db.Blocks(), 2, "no new block created after the set timeout") if db.Blocks()[0].Meta().MaxTime > db.Blocks()[1].Meta().MinTime { t.Fatalf("new block overlaps old:%v,new:%v", db.Blocks()[0].Meta(), db.Blocks()[1].Meta()) @@ -2374,7 +2373,7 @@ func TestBlockRanges(t *testing.T) { require.NoError(t, err) defer db.Close() - require.Equal(t, 3, len(db.Blocks()), "db doesn't include expected number of blocks") + require.Len(t, db.Blocks(), 3, "db doesn't include expected number of blocks") require.Equal(t, db.Blocks()[2].Meta().MaxTime, thirdBlockMaxt, "unexpected maxt of the last block") app = db.Appender(ctx) @@ -2388,7 +2387,7 @@ func TestBlockRanges(t *testing.T) { time.Sleep(100 * time.Millisecond) } - require.Equal(t, 4, len(db.Blocks()), "no new block created after the set timeout") + require.Len(t, db.Blocks(), 4, "no new block created after the set timeout") if db.Blocks()[2].Meta().MaxTime > db.Blocks()[3].Meta().MinTime { t.Fatalf("new block overlaps old:%v,new:%v", db.Blocks()[2].Meta(), db.Blocks()[3].Meta()) @@ -2569,7 +2568,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) { defer func() { require.NoError(t, db.Close()) }() blocks, err := db.Blocks() require.NoError(t, err) - require.Equal(t, len(blocks), 1) + require.Len(t, blocks, 1) querier, err := db.Querier(0, int64(maxt)-1) require.NoError(t, err) @@ -2589,7 +2588,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) { require.NoError(t, series.Err()) } require.NoError(t, seriesSet.Err()) - require.Equal(t, 0, len(seriesSet.Warnings())) + require.Empty(t, seriesSet.Warnings()) require.Equal(t, 1000.0, sum) } @@ -2648,7 +2647,7 @@ func TestDBCannotSeePartialCommits(t *testing.T) { ss := querier.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) _, seriesSet, ws, err := expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) values := map[float64]struct{}{} for _, series := range seriesSet { @@ -2692,13 +2691,13 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) { ss := querierBeforeAdd.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) _, seriesSet, ws, err := expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, map[string][]sample{}, seriesSet) ss = querierAfterAddButBeforeCommit.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) _, seriesSet, ws, err = expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, map[string][]sample{}, seriesSet) // This commit is after the queriers are created, so should not be returned. @@ -2709,14 +2708,14 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) { ss = querierBeforeAdd.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) _, seriesSet, ws, err = expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, map[string][]sample{}, seriesSet) // Series exists but has no samples for querier created after Add. ss = querierAfterAddButBeforeCommit.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) _, seriesSet, ws, err = expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, map[string][]sample{`{foo="bar"}`: {}}, seriesSet) querierAfterCommit, err := db.Querier(0, 1000000) @@ -2727,7 +2726,7 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) { ss = querierAfterCommit.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) _, seriesSet, ws, err = expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) require.Equal(t, map[string][]sample{`{foo="bar"}`: {{t: 0, f: 0}}}, seriesSet) } @@ -2874,7 +2873,7 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) { files, err := os.ReadDir(tempDir) require.NoError(t, err) - require.Equal(t, test.expSegmentsCount, len(files), "expected segments count mismatch") + require.Len(t, files, test.expSegmentsCount, "expected segments count mismatch") // Verify that all data is written to the segments. sizeExp := 0 @@ -3024,7 +3023,7 @@ func TestCompactHead(t *testing.T) { require.NoError(t, deleteNonBlocks(db.Dir())) db, err = Open(dbDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) require.NoError(t, err) - require.Equal(t, 1, len(db.Blocks())) + require.Len(t, db.Blocks(), 1) require.Equal(t, int64(maxt), db.Head().MinTime()) defer func() { require.NoError(t, db.Close()) }() querier, err := db.Querier(0, int64(maxt)-1) @@ -3186,7 +3185,7 @@ func TestOpen_VariousBlockStates(t *testing.T) { } loaded++ } - require.Equal(t, len(expectedLoadedDirs), loaded) + require.Len(t, expectedLoadedDirs, loaded) require.NoError(t, db.Close()) files, err := os.ReadDir(tmpDir) @@ -3201,7 +3200,7 @@ func TestOpen_VariousBlockStates(t *testing.T) { ignored++ } } - require.Equal(t, len(expectedIgnoredDirs), ignored) + require.Len(t, expectedIgnoredDirs, ignored) _, err = os.Stat(tmpCheckpointDir) require.True(t, os.IsNotExist(err)) _, err = os.Stat(tmpChunkSnapshotDir) @@ -3254,7 +3253,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) { require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.checkpointCreationTotal)) // As the data spans for 59 blocks, 58 go to disk and 1 remains in Head. - require.Equal(t, 58, len(db.Blocks())) + require.Len(t, db.Blocks(), 58) // Though WAL was truncated only once, head should be truncated after each compaction. require.Equal(t, 58.0, prom_testutil.ToFloat64(db.head.metrics.headTruncateTotal)) @@ -3287,7 +3286,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) { db.DisableCompactions() // 1 block more. - require.Equal(t, 59, len(db.Blocks())) + require.Len(t, db.Blocks(), 59) // No series in Head because of this new block. require.Equal(t, 0, int(db.head.NumSeries())) @@ -3312,7 +3311,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) { require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.checkpointCreationTotal)) // No new blocks should be created as there was not data in between the new samples and the blocks. - require.Equal(t, 59, len(db.Blocks())) + require.Len(t, db.Blocks(), 59) // The compaction should have only truncated first 2/3 of WAL (while also rotating the files). first, last, err = wlog.Segments(db.head.wal.Dir()) @@ -3449,7 +3448,7 @@ func testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t iterators = append(iterators, it) } require.NoError(t, seriesSet.Err()) - require.Equal(t, actualSeries, numSeries) + require.Equal(t, numSeries, actualSeries) // Compact the TSDB head again. require.NoError(t, db.Compact(ctx)) @@ -3583,7 +3582,7 @@ func testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChun } } require.NoError(t, seriesSet.Err()) - require.Equal(t, actualSeries, numSeries) + require.Equal(t, numSeries, actualSeries) // Compact the TSDB head again. require.NoError(t, db.Compact(ctx)) @@ -4106,9 +4105,9 @@ func TestDBPanicOnMmappingHeadChunk(t *testing.T) { numSamples := int(170*time.Minute/time.Millisecond) / int(itvl) addSamples(numSamples) - require.Len(t, db.Blocks(), 0) + require.Empty(t, db.Blocks()) require.NoError(t, db.Compact(ctx)) - require.Len(t, db.Blocks(), 0) + require.Empty(t, db.Blocks()) // Restarting. require.NoError(t, db.Close()) @@ -4121,7 +4120,7 @@ func TestDBPanicOnMmappingHeadChunk(t *testing.T) { numSamples = int(20*time.Minute/time.Millisecond) / int(itvl) addSamples(numSamples) - require.Len(t, db.Blocks(), 0) + require.Empty(t, db.Blocks()) require.NoError(t, db.Compact(ctx)) require.Len(t, db.Blocks(), 1) @@ -4467,13 +4466,13 @@ func TestOOOCompaction(t *testing.T) { require.NoError(t, err) require.False(t, created) require.Greater(t, ms.ooo.oooHeadChunk.chunk.NumSamples(), 0) - require.Equal(t, 14, len(ms.ooo.oooMmappedChunks)) // 7 original, 7 duplicate. + require.Len(t, ms.ooo.oooMmappedChunks, 14) // 7 original, 7 duplicate. } checkNonEmptyOOOChunk(series1) checkNonEmptyOOOChunk(series2) // No blocks before compaction. - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) // There is a 0th WBL file. require.NoError(t, db.head.wbl.Sync()) // syncing to make sure wbl is flushed in windows @@ -4489,7 +4488,7 @@ func TestOOOCompaction(t *testing.T) { require.NoError(t, db.CompactOOOHead(ctx)) // 3 blocks exist now. [0, 120), [120, 240), [240, 360) - require.Equal(t, len(db.Blocks()), 3) + require.Len(t, db.Blocks(), 3) verifyDBSamples() // Blocks created out of OOO head now. @@ -4541,7 +4540,7 @@ func TestOOOCompaction(t *testing.T) { // Since this is a forced compaction, this block is not aligned with 2h. err = db.CompactHead(NewRangeHead(db.head, 250*time.Minute.Milliseconds(), 350*time.Minute.Milliseconds())) require.NoError(t, err) - require.Equal(t, len(db.Blocks()), 4) // [0, 120), [120, 240), [240, 360), [250, 351) + require.Len(t, db.Blocks(), 4) // [0, 120), [120, 240), [240, 360), [250, 351) verifySamples(db.Blocks()[3], 250, 350) verifyDBSamples() // Blocks created out of normal and OOO head now. But not merged. @@ -4556,7 +4555,7 @@ func TestOOOCompaction(t *testing.T) { // This will merge overlapping block. require.NoError(t, db.Compact(ctx)) - require.Equal(t, len(db.Blocks()), 3) // [0, 120), [120, 240), [240, 360) + require.Len(t, db.Blocks(), 3) // [0, 120), [120, 240), [240, 360) verifySamples(db.Blocks()[0], 90, 119) verifySamples(db.Blocks()[1], 120, 239) verifySamples(db.Blocks()[2], 240, 350) // Merged block. @@ -4612,19 +4611,19 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) { // If the normal Head is not compacted, the OOO head compaction does not take place. require.NoError(t, db.Compact(ctx)) - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) // Add more in-order samples in future that would trigger the compaction. addSamples(400, 450) // No blocks before compaction. - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) // Compacts normal and OOO head. require.NoError(t, db.Compact(ctx)) // 2 blocks exist now. [0, 120), [250, 360) - require.Equal(t, len(db.Blocks()), 2) + require.Len(t, db.Blocks(), 2) require.Equal(t, int64(0), db.Blocks()[0].MinTime()) require.Equal(t, 120*time.Minute.Milliseconds(), db.Blocks()[0].MaxTime()) require.Equal(t, 250*time.Minute.Milliseconds(), db.Blocks()[1].MinTime()) @@ -4713,19 +4712,19 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) { // If the normal Head is not compacted, the OOO head compaction does not take place. require.NoError(t, db.Compact(ctx)) - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) // Add more in-order samples in future that would trigger the compaction. addSamples(400, 450) // No blocks before compaction. - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) // Compacts normal and OOO head. require.NoError(t, db.Compact(ctx)) // 2 blocks exist now. [0, 120), [250, 360) - require.Equal(t, len(db.Blocks()), 2) + require.Len(t, db.Blocks(), 2) require.Equal(t, int64(0), db.Blocks()[0].MinTime()) require.Equal(t, 120*time.Minute.Milliseconds(), db.Blocks()[0].MaxTime()) require.Equal(t, 250*time.Minute.Milliseconds(), db.Blocks()[1].MinTime()) @@ -4809,7 +4808,7 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) { ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) require.NoError(t, err) require.False(t, created) - require.Equal(t, 2, len(ms.ooo.oooMmappedChunks)) + require.Len(t, ms.ooo.oooMmappedChunks, 2) require.NotNil(t, ms.ooo.oooHeadChunk) } @@ -4828,7 +4827,7 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) { ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) require.NoError(t, err) require.False(t, created) - require.Equal(t, 2, len(ms.ooo.oooMmappedChunks)) + require.Len(t, ms.ooo.oooMmappedChunks, 2) require.Equal(t, 109*time.Minute.Milliseconds(), ms.ooo.oooMmappedChunks[1].maxTime) require.Nil(t, ms.ooo.oooHeadChunk) // Because of missing wbl. } @@ -4857,9 +4856,9 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) { verifySamples(90, 109) // Compaction should also work fine. - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) require.NoError(t, db.CompactOOOHead(ctx)) - require.Equal(t, len(db.Blocks()), 1) // One block from OOO data. + require.Len(t, db.Blocks(), 1) // One block from OOO data. require.Equal(t, int64(0), db.Blocks()[0].MinTime()) require.Equal(t, 120*time.Minute.Milliseconds(), db.Blocks()[0].MaxTime()) @@ -4878,7 +4877,6 @@ func Test_Querier_OOOQuery(t *testing.T) { opts := DefaultOptions() opts.OutOfOrderCapMax = 30 opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds() - opts.AllowOverlappingCompaction = false series1 := labels.FromStrings("foo", "bar1") @@ -4952,7 +4950,7 @@ func Test_Querier_OOOQuery(t *testing.T) { seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")) require.NotNil(t, seriesSet[series1.String()]) - require.Equal(t, 1, len(seriesSet)) + require.Len(t, seriesSet, 1) require.Equal(t, expSamples, seriesSet[series1.String()]) require.GreaterOrEqual(t, float64(oooSamples), prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended), "number of ooo appended samples mismatch") }) @@ -4963,7 +4961,6 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { opts := DefaultOptions() opts.OutOfOrderCapMax = 30 opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds() - opts.AllowOverlappingCompaction = false series1 := labels.FromStrings("foo", "bar1") @@ -5037,7 +5034,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { chks := queryChunks(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")) require.NotNil(t, chks[series1.String()]) - require.Equal(t, 1, len(chks)) + require.Len(t, chks, 1) require.Equal(t, float64(oooSamples), prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended), "number of ooo appended samples mismatch") var gotSamples []chunks.Sample for _, chunk := range chks[series1.String()] { @@ -5312,7 +5309,7 @@ func TestWBLAndMmapReplay(t *testing.T) { s1MmapSamples = append(s1MmapSamples, sample{t: ts, f: val}) } } - require.Greater(t, len(s1MmapSamples), 0) + require.NotEmpty(t, s1MmapSamples) require.NoError(t, db.Close()) @@ -5461,7 +5458,7 @@ func TestOOOCompactionFailure(t *testing.T) { addSample(90, 310) // No blocks before compaction. - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) // There is a 0th WBL file. verifyFirstWBLFileIs0 := func(count int) { @@ -5494,7 +5491,7 @@ func TestOOOCompactionFailure(t *testing.T) { for i := 0; i < 5; i++ { require.Error(t, db.CompactOOOHead(ctx)) } - require.Equal(t, len(db.Blocks()), 0) + require.Empty(t, db.Blocks()) // M-map files don't change after failed compaction. verifyMmapFiles("000001") @@ -5505,7 +5502,7 @@ func TestOOOCompactionFailure(t *testing.T) { db.compactor = originalCompactor require.NoError(t, db.CompactOOOHead(ctx)) oldBlocks := db.Blocks() - require.Equal(t, len(db.Blocks()), 3) + require.Len(t, db.Blocks(), 3) // Check that the ooo chunks were removed. ms, created, err := db.head.getOrCreate(series1.Hash(), series1) @@ -5516,7 +5513,7 @@ func TestOOOCompactionFailure(t *testing.T) { // The failed compaction should not have left the ooo Head corrupted. // Hence, expect no new blocks with another OOO compaction call. require.NoError(t, db.CompactOOOHead(ctx)) - require.Equal(t, len(db.Blocks()), 3) + require.Len(t, db.Blocks(), 3) require.Equal(t, oldBlocks, db.Blocks()) // There should be a single m-map file @@ -5558,7 +5555,7 @@ func TestOOOCompactionFailure(t *testing.T) { // Since this is a forced compaction, this block is not aligned with 2h. err = db.CompactHead(NewRangeHead(db.head, 250*time.Minute.Milliseconds(), 350*time.Minute.Milliseconds())) require.NoError(t, err) - require.Equal(t, len(db.Blocks()), 4) // [0, 120), [120, 240), [240, 360), [250, 351) + require.Len(t, db.Blocks(), 4) // [0, 120), [120, 240), [240, 360), [250, 351) verifySamples(db.Blocks()[3], 250, 350) // The compaction also clears out the old m-map files. Including @@ -5903,9 +5900,9 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { require.NoError(t, err) require.Greater(t, size, int64(0)) - require.Len(t, db.Blocks(), 0) + require.Empty(t, db.Blocks()) require.NoError(t, db.compactOOOHead(ctx)) - require.Greater(t, len(db.Blocks()), 0) + require.NotEmpty(t, db.Blocks()) // WBL is empty. size, err = db.head.wbl.Size() @@ -5925,7 +5922,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { // OOO of 59m old fails. s := addSamples(t, db, 251, 260, false, nil) - require.Len(t, s, 0) + require.Empty(t, s) verifySamples(t, db, allSamples) oldWblPtr := fmt.Sprintf("%p", db.head.wbl) @@ -5960,7 +5957,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { // OOO of 49m old fails. s := addSamples(t, db, 261, 270, false, nil) - require.Len(t, s, 0) + require.Empty(t, s) // WBL does not change. newWblPtr := fmt.Sprintf("%p", db.head.wbl) @@ -5991,7 +5988,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { // OOO fails. s := addSamples(t, db, 251, 260, false, nil) - require.Len(t, s, 0) + require.Empty(t, s) verifySamples(t, db, allSamples) require.Nil(t, db.head.wbl) @@ -6028,7 +6025,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { // OOO within old time window fails. s := addSamples(t, db, 290, 309, false, nil) - require.Len(t, s, 0) + require.Empty(t, s) // WBL does not change and is not removed. newWblPtr := fmt.Sprintf("%p", db.head.wbl) @@ -6050,7 +6047,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { // OOO fails. s := addSamples(t, db, 290, 309, false, nil) - require.Len(t, s, 0) + require.Empty(t, s) verifySamples(t, db, allSamples) require.Nil(t, db.head.wbl) @@ -6060,7 +6057,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) { // OOO still fails. s = addSamples(t, db, 290, 309, false, nil) - require.Len(t, s, 0) + require.Empty(t, s) verifySamples(t, db, allSamples) require.Nil(t, db.head.wbl) }) @@ -6320,7 +6317,7 @@ func TestDiskFillingUpAfterDisablingOOO(t *testing.T) { db.DisableCompactions() ms := db.head.series.getByHash(series1.Hash(), series1) - require.Greater(t, len(ms.ooo.oooMmappedChunks), 0, "OOO mmap chunk was not replayed") + require.NotEmpty(t, ms.ooo.oooMmappedChunks, "OOO mmap chunk was not replayed") checkMmapFileContents := func(contains, notContains []string) { mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot) @@ -6392,8 +6389,8 @@ func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) { var err error app := db.Appender(ctx) if floatHistogram { - _, err = app.AppendHistogram(0, lbls, minute(tsMinute), nil, h.ToFloat()) - efh := h.ToFloat() + _, err = app.AppendHistogram(0, lbls, minute(tsMinute), nil, h.ToFloat(nil)) + efh := h.ToFloat(nil) efh.CounterResetHint = expCRH *exp = append(*exp, sample{t: minute(tsMinute), fh: efh}) } else { @@ -6647,7 +6644,6 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { t.Helper() opts := DefaultOptions() - opts.AllowOverlappingCompaction = true // TODO(jesusvazquez): This replaced AllowOverlappingBlocks, make sure that works. db := openTestDB(t, opts, nil) t.Cleanup(func() { require.NoError(t, db.Close()) @@ -6684,7 +6680,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) { } } - require.Len(t, db.Blocks(), 0) + require.Empty(t, db.Blocks()) require.NoError(t, db.reload()) require.Len(t, db.Blocks(), len(blockSeries)) @@ -6814,20 +6810,20 @@ func TestNativeHistogramFlag(t *testing.T) { // Disabled by default. _, err = app.AppendHistogram(0, l, 100, h, nil) require.Equal(t, storage.ErrNativeHistogramsDisabled, err) - _, err = app.AppendHistogram(0, l, 105, nil, h.ToFloat()) + _, err = app.AppendHistogram(0, l, 105, nil, h.ToFloat(nil)) require.Equal(t, storage.ErrNativeHistogramsDisabled, err) // Enable and append. db.EnableNativeHistograms() _, err = app.AppendHistogram(0, l, 200, h, nil) require.NoError(t, err) - _, err = app.AppendHistogram(0, l, 205, nil, h.ToFloat()) + _, err = app.AppendHistogram(0, l, 205, nil, h.ToFloat(nil)) require.NoError(t, err) db.DisableNativeHistograms() _, err = app.AppendHistogram(0, l, 300, h, nil) require.Equal(t, storage.ErrNativeHistogramsDisabled, err) - _, err = app.AppendHistogram(0, l, 305, nil, h.ToFloat()) + _, err = app.AppendHistogram(0, l, 305, nil, h.ToFloat(nil)) require.Equal(t, storage.ErrNativeHistogramsDisabled, err) require.NoError(t, app.Commit()) @@ -6836,7 +6832,7 @@ func TestNativeHistogramFlag(t *testing.T) { require.NoError(t, err) act := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) require.Equal(t, map[string][]chunks.Sample{ - l.String(): {sample{t: 200, h: h}, sample{t: 205, fh: h.ToFloat()}}, + l.String(): {sample{t: 200, h: h}, sample{t: 205, fh: h.ToFloat(nil)}}, }, act) } diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 8eaf42653..805de70da 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -15,6 +15,7 @@ package tsdb import ( "context" + "errors" "sync" "unicode/utf8" @@ -363,7 +364,7 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp err := ce.validateExemplar(seriesLabels, e, true) if err != nil { - if err == storage.ErrDuplicateExemplar { + if errors.Is(err, storage.ErrDuplicateExemplar) { // Duplicate exemplar, noop. return nil } diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go index 64121cbfc..24b46e0fa 100644 --- a/tsdb/exemplar_test.go +++ b/tsdb/exemplar_test.go @@ -88,7 +88,7 @@ func TestAddExemplar(t *testing.T) { } require.NoError(t, es.AddExemplar(l, e)) - require.Equal(t, es.index[string(l.Bytes(nil))].newest, 0, "exemplar was not stored correctly") + require.Equal(t, 0, es.index[string(l.Bytes(nil))].newest, "exemplar was not stored correctly") e2 := exemplar.Exemplar{ Labels: labels.FromStrings("traceID", "zxcvb"), @@ -97,7 +97,7 @@ func TestAddExemplar(t *testing.T) { } require.NoError(t, es.AddExemplar(l, e2)) - require.Equal(t, es.index[string(l.Bytes(nil))].newest, 1, "exemplar was not stored correctly, location of newest exemplar for series in index did not update") + require.Equal(t, 1, es.index[string(l.Bytes(nil))].newest, "exemplar was not stored correctly, location of newest exemplar for series in index did not update") require.True(t, es.exemplars[es.index[string(l.Bytes(nil))].newest].exemplar.Equals(e2), "exemplar was not stored correctly, expected %+v got: %+v", e2, es.exemplars[es.index[string(l.Bytes(nil))].newest].exemplar) require.NoError(t, es.AddExemplar(l, e2), "no error is expected attempting to add duplicate exemplar") @@ -145,7 +145,7 @@ func TestStorageOverflow(t *testing.T) { require.NoError(t, err, "error creating label matcher for exemplar query") ret, err := es.Select(100, 110, []*labels.Matcher{m}) require.NoError(t, err) - require.True(t, len(ret) == 1, "select should have returned samples for a single series only") + require.Len(t, ret, 1, "select should have returned samples for a single series only") require.True(t, reflect.DeepEqual(eList[1:], ret[0].Exemplars), "select did not return expected exemplars\n\texpected: %+v\n\tactual: %+v\n", eList[1:], ret[0].Exemplars) } @@ -171,7 +171,7 @@ func TestSelectExemplar(t *testing.T) { require.NoError(t, err, "error creating label matcher for exemplar query") ret, err := es.Select(0, 100, []*labels.Matcher{m}) require.NoError(t, err) - require.True(t, len(ret) == 1, "select should have returned samples for a single series only") + require.Len(t, ret, 1, "select should have returned samples for a single series only") expectedResult := []exemplar.Exemplar{e} require.True(t, reflect.DeepEqual(expectedResult, ret[0].Exemplars), "select did not return expected exemplars\n\texpected: %+v\n\tactual: %+v\n", expectedResult, ret[0].Exemplars) @@ -209,15 +209,15 @@ func TestSelectExemplar_MultiSeries(t *testing.T) { require.NoError(t, err, "error creating label matcher for exemplar query") ret, err := es.Select(100, 200, []*labels.Matcher{m}) require.NoError(t, err) - require.True(t, len(ret) == 1, "select should have returned samples for a single series only") - require.True(t, len(ret[0].Exemplars) == 3, "didn't get expected 8 exemplars, got %d", len(ret[0].Exemplars)) + require.Len(t, ret, 1, "select should have returned samples for a single series only") + require.Len(t, ret[0].Exemplars, 3, "didn't get expected 8 exemplars, got %d", len(ret[0].Exemplars)) m, err = labels.NewMatcher(labels.MatchEqual, labels.MetricName, l1Name) require.NoError(t, err, "error creating label matcher for exemplar query") ret, err = es.Select(100, 200, []*labels.Matcher{m}) require.NoError(t, err) - require.True(t, len(ret) == 1, "select should have returned samples for a single series only") - require.True(t, len(ret[0].Exemplars) == 2, "didn't get expected 8 exemplars, got %d", len(ret[0].Exemplars)) + require.Len(t, ret, 1, "select should have returned samples for a single series only") + require.Len(t, ret[0].Exemplars, 2, "didn't get expected 8 exemplars, got %d", len(ret[0].Exemplars)) } func TestSelectExemplar_TimeRange(t *testing.T) { @@ -243,8 +243,8 @@ func TestSelectExemplar_TimeRange(t *testing.T) { require.NoError(t, err, "error creating label matcher for exemplar query") ret, err := es.Select(102, 104, []*labels.Matcher{m}) require.NoError(t, err) - require.True(t, len(ret) == 1, "select should have returned samples for a single series only") - require.True(t, len(ret[0].Exemplars) == 3, "didn't get expected two exemplars %d, %+v", len(ret[0].Exemplars), ret) + require.Len(t, ret, 1, "select should have returned samples for a single series only") + require.Len(t, ret[0].Exemplars, 3, "didn't get expected two exemplars %d, %+v", len(ret[0].Exemplars), ret) } // Test to ensure that even though a series matches more than one matcher from the @@ -281,7 +281,7 @@ func TestSelectExemplar_DuplicateSeries(t *testing.T) { ret, err := es.Select(0, 100, m...) require.NoError(t, err) - require.True(t, len(ret) == 1, "select should have returned samples for a single series only") + require.Len(t, ret, 1, "select should have returned samples for a single series only") } func TestIndexOverwrite(t *testing.T) { diff --git a/tsdb/head.go b/tsdb/head.go index 3ff2bee71..63d8e9ea1 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -15,6 +15,7 @@ package tsdb import ( "context" + "errors" "fmt" "io" "math" @@ -27,7 +28,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/pkg/errors" "go.uber.org/atomic" "github.com/prometheus/client_golang/prometheus" @@ -149,6 +149,10 @@ type HeadOptions struct { // EnableNativeHistograms enables the ingestion of native histograms. EnableNativeHistograms atomic.Bool + // EnableCreatedTimestampZeroIngestion enables the ingestion of the created timestamp as a synthetic zero sample. + // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md + EnableCreatedTimestampZeroIngestion bool + ChunkRange int64 // ChunkDirRoot is the parent directory of the chunks directory. ChunkDirRoot string @@ -619,11 +623,11 @@ func (h *Head) Init(minValidTime int64) error { if h.wal != nil { _, endAt, err := wlog.Segments(h.wal.Dir()) if err != nil { - return errors.Wrap(err, "finding WAL segments") + return fmt.Errorf("finding WAL segments: %w", err) } _, idx, _, err := LastChunkSnapshot(h.opts.ChunkDirRoot) - if err != nil && err != record.ErrNotFound { + if err != nil && !errors.Is(err, record.ErrNotFound) { level.Error(h.logger).Log("msg", "Could not find last snapshot", "err", err) } @@ -670,7 +674,8 @@ func (h *Head) Init(minValidTime int64) error { if err != nil { // TODO(codesome): clear out all m-map chunks here for refSeries. level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err) - if _, ok := errors.Cause(err).(*chunks.CorruptionErr); ok { + var cerr *chunks.CorruptionErr + if errors.As(err, &cerr) { h.metrics.mmapChunkCorruptionTotal.Inc() } @@ -697,14 +702,14 @@ func (h *Head) Init(minValidTime int64) error { checkpointReplayStart := time.Now() // Backfill the checkpoint first if it exists. dir, startFrom, err := wlog.LastCheckpoint(h.wal.Dir()) - if err != nil && err != record.ErrNotFound { - return errors.Wrap(err, "find last checkpoint") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return fmt.Errorf("find last checkpoint: %w", err) } // Find the last segment. _, endAt, e := wlog.Segments(h.wal.Dir()) if e != nil { - return errors.Wrap(e, "finding WAL segments") + return fmt.Errorf("finding WAL segments: %w", e) } h.startWALReplayStatus(startFrom, endAt) @@ -713,7 +718,7 @@ func (h *Head) Init(minValidTime int64) error { if err == nil && startFrom >= snapIdx { sr, err := wlog.NewSegmentsReader(dir) if err != nil { - return errors.Wrap(err, "open checkpoint") + return fmt.Errorf("open checkpoint: %w", err) } defer func() { if err := sr.Close(); err != nil { @@ -724,7 +729,7 @@ func (h *Head) Init(minValidTime int64) error { // A corrupted checkpoint is a hard error for now and requires user // intervention. There's likely little data that can be recovered anyway. if err := h.loadWAL(wlog.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks); err != nil { - return errors.Wrap(err, "backfill checkpoint") + return fmt.Errorf("backfill checkpoint: %w", err) } h.updateWALReplayStatusRead(startFrom) startFrom++ @@ -741,7 +746,7 @@ func (h *Head) Init(minValidTime int64) error { for i := startFrom; i <= endAt; i++ { s, err := wlog.OpenReadSegment(wlog.SegmentName(h.wal.Dir(), i)) if err != nil { - return errors.Wrap(err, fmt.Sprintf("open WAL segment: %d", i)) + return fmt.Errorf("open WAL segment: %d: %w", i, err) } offset := 0 @@ -754,7 +759,7 @@ func (h *Head) Init(minValidTime int64) error { continue } if err != nil { - return errors.Wrapf(err, "segment reader (offset=%d)", offset) + return fmt.Errorf("segment reader (offset=%d): %w", offset, err) } err = h.loadWAL(wlog.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks) if err := sr.Close(); err != nil { @@ -773,14 +778,14 @@ func (h *Head) Init(minValidTime int64) error { // Replay WBL. startFrom, endAt, e = wlog.Segments(h.wbl.Dir()) if e != nil { - return &errLoadWbl{errors.Wrap(e, "finding WBL segments")} + return &errLoadWbl{fmt.Errorf("finding WBL segments: %w", e)} } h.startWALReplayStatus(startFrom, endAt) for i := startFrom; i <= endAt; i++ { s, err := wlog.OpenReadSegment(wlog.SegmentName(h.wbl.Dir(), i)) if err != nil { - return &errLoadWbl{errors.Wrap(err, fmt.Sprintf("open WBL segment: %d", i))} + return &errLoadWbl{fmt.Errorf("open WBL segment: %d: %w", i, err)} } sr := wlog.NewSegmentBufReader(s) @@ -901,7 +906,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) return nil }); err != nil { // secondLastRef because the lastRef caused an error. - return nil, nil, secondLastRef, errors.Wrap(err, "iterate on on-disk chunks") + return nil, nil, secondLastRef, fmt.Errorf("iterate on on-disk chunks: %w", err) } return mmappedChunks, oooMmappedChunks, lastRef, nil } @@ -1220,12 +1225,12 @@ func (h *Head) truncateWAL(mint int64) error { first, last, err := wlog.Segments(h.wal.Dir()) if err != nil { - return errors.Wrap(err, "get segment range") + return fmt.Errorf("get segment range: %w", err) } // Start a new segment, so low ingestion volume TSDB don't have more WAL than // needed. if _, err := h.wal.NextSegment(); err != nil { - return errors.Wrap(err, "next segment") + return fmt.Errorf("next segment: %w", err) } last-- // Never consider last segment for checkpoint. if last < 0 { @@ -1252,10 +1257,11 @@ func (h *Head) truncateWAL(mint int64) error { h.metrics.checkpointCreationTotal.Inc() if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, keep, mint); err != nil { h.metrics.checkpointCreationFail.Inc() - if _, ok := errors.Cause(err).(*wlog.CorruptionErr); ok { + var cerr *chunks.CorruptionErr + if errors.As(err, &cerr) { h.metrics.walCorruptionsTotal.Inc() } - return errors.Wrap(err, "create checkpoint") + return fmt.Errorf("create checkpoint: %w", err) } if err := h.wal.Truncate(last + 1); err != nil { // If truncating fails, we'll just try again at the next checkpoint. @@ -1348,7 +1354,7 @@ func (h *Head) truncateSeriesAndChunkDiskMapper(caller string) error { // Truncate the chunk m-mapper. if err := h.chunkDiskMapper.Truncate(uint32(minMmapFile)); err != nil { - return errors.Wrap(err, "truncate chunks.HeadReadWriter by file number") + return fmt.Errorf("truncate chunks.HeadReadWriter by file number: %w", err) } return nil } @@ -1463,13 +1469,13 @@ func (h *Head) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Match p, err := PostingsForMatchers(ctx, ir, ms...) if err != nil { - return errors.Wrap(err, "select series") + return fmt.Errorf("select series: %w", err) } var stones []tombstones.Stone for p.Next() { if err := ctx.Err(); err != nil { - return errors.Wrap(err, "select series") + return fmt.Errorf("select series: %w", err) } series := h.series.getByID(chunks.HeadSeriesRef(p.At())) @@ -1491,8 +1497,8 @@ func (h *Head) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Match if p.Err() != nil { return p.Err() } - if ctx.Err() != nil { - return errors.Wrap(err, "select series") + if err := ctx.Err(); err != nil { + return fmt.Errorf("select series: %w", err) } if h.wal != nil { diff --git a/tsdb/head_append.go b/tsdb/head_append.go index be53a4f3f..f509317c8 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -15,11 +15,11 @@ package tsdb import ( "context" + "errors" "fmt" "math" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -87,6 +87,17 @@ func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m return a.app.UpdateMetadata(ref, l, m) } +func (a *initAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64) (storage.SeriesRef, error) { + if a.app != nil { + return a.app.AppendCTZeroSample(ref, lset, t, ct) + } + + a.head.initTime(t) + a.app = a.head.appender() + + return a.app.AppendCTZeroSample(ref, lset, t, ct) +} + // initTime initializes a head with the first timestamp. This only needs to be called // for a completely fresh head with an empty WAL. func (h *Head) initTime(t int64) { @@ -319,28 +330,11 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) if s == nil { - // Ensure no empty labels have gotten through. - lset = lset.WithoutEmpty() - if lset.IsEmpty() { - return 0, errors.Wrap(ErrInvalidSample, "empty labelset") - } - - if l, dup := lset.HasDuplicateLabelNames(); dup { - return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l)) - } - - var created bool var err error - s, created, err = a.head.getOrCreate(lset.Hash(), lset) + s, err = a.getOrCreate(lset) if err != nil { return 0, err } - if created { - a.series = append(a.series, record.RefSeries{ - Ref: s.ref, - Labels: lset, - }) - } } if value.IsStaleNaN(v) { @@ -364,10 +358,10 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 a.head.metrics.oooHistogram.Observe(float64(delta) / 1000) } if err != nil { - switch err { - case storage.ErrOutOfOrderSample: + switch { + case errors.Is(err, storage.ErrOutOfOrderSample): a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Inc() - case storage.ErrTooOldSample: + case errors.Is(err, storage.ErrTooOldSample): a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Inc() } return 0, err @@ -389,6 +383,71 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 return storage.SeriesRef(s.ref), nil } +// AppendCTZeroSample appends synthetic zero sample for ct timestamp. It returns +// error when sample can't be appended. See +// storage.CreatedTimestampAppender.AppendCTZeroSample for further documentation. +func (a *headAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64) (storage.SeriesRef, error) { + if ct >= t { + return 0, fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") + } + + s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) + if s == nil { + var err error + s, err = a.getOrCreate(lset) + if err != nil { + return 0, err + } + } + + // Check if CT wouldn't be OOO vs samples we already might have for this series. + // NOTE(bwplotka): This will be often hit as it's expected for long living + // counters to share the same CT. + s.Lock() + isOOO, _, err := s.appendable(ct, 0, a.headMaxt, a.minValidTime, a.oooTimeWindow) + if err == nil { + s.pendingCommit = true + } + s.Unlock() + if err != nil { + return 0, err + } + if isOOO { + return storage.SeriesRef(s.ref), storage.ErrOutOfOrderCT + } + + if ct > a.maxt { + a.maxt = ct + } + a.samples = append(a.samples, record.RefSample{Ref: s.ref, T: ct, V: 0.0}) + a.sampleSeries = append(a.sampleSeries, s) + return storage.SeriesRef(s.ref), nil +} + +func (a *headAppender) getOrCreate(lset labels.Labels) (*memSeries, error) { + // Ensure no empty labels have gotten through. + lset = lset.WithoutEmpty() + if lset.IsEmpty() { + return nil, fmt.Errorf("empty labelset: %w", ErrInvalidSample) + } + if l, dup := lset.HasDuplicateLabelNames(); dup { + return nil, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample) + } + var created bool + var err error + s, created, err := a.head.getOrCreate(lset.Hash(), lset) + if err != nil { + return nil, err + } + if created { + a.series = append(a.series, record.RefSeries{ + Ref: s.ref, + Labels: lset, + }) + } + return s, nil +} + // appendable checks whether the given sample is valid for appending to the series. (if we return false and no error) // The sample belongs to the out of order chunk if we return true and no error. // An error signifies the sample cannot be handled. @@ -498,7 +557,7 @@ func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, err := a.head.exemplars.ValidateExemplar(s.lset, e) if err != nil { - if err == storage.ErrDuplicateExemplar || err == storage.ErrExemplarsDisabled { + if errors.Is(err, storage.ErrDuplicateExemplar) || errors.Is(err, storage.ErrExemplarsDisabled) { // Duplicate, don't return an error but don't accept the exemplar. return 0, nil } @@ -537,11 +596,11 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels // Ensure no empty labels have gotten through. lset = lset.WithoutEmpty() if lset.IsEmpty() { - return 0, errors.Wrap(ErrInvalidSample, "empty labelset") + return 0, fmt.Errorf("empty labelset: %w", ErrInvalidSample) } if l, dup := lset.HasDuplicateLabelNames(); dup { - return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l)) + return 0, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample) } var created bool @@ -569,7 +628,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels s.Lock() if err := s.appendableHistogram(t, h); err != nil { s.Unlock() - if err == storage.ErrOutOfOrderSample { + if errors.Is(err, storage.ErrOutOfOrderSample) { a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() } return 0, err @@ -586,7 +645,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels s.Lock() if err := s.appendableFloatHistogram(t, fh); err != nil { s.Unlock() - if err == storage.ErrOutOfOrderSample { + if errors.Is(err, storage.ErrOutOfOrderSample) { a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc() } return 0, err @@ -670,7 +729,7 @@ func (a *headAppender) log() error { buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log series") + return fmt.Errorf("log series: %w", err) } } if len(a.metadata) > 0 { @@ -678,7 +737,7 @@ func (a *headAppender) log() error { buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log metadata") + return fmt.Errorf("log metadata: %w", err) } } if len(a.samples) > 0 { @@ -686,21 +745,21 @@ func (a *headAppender) log() error { buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log samples") + return fmt.Errorf("log samples: %w", err) } } if len(a.histograms) > 0 { rec = enc.HistogramSamples(a.histograms, buf) buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log histograms") + return fmt.Errorf("log histograms: %w", err) } } if len(a.floatHistograms) > 0 { rec = enc.FloatHistogramSamples(a.floatHistograms, buf) buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log float histograms") + return fmt.Errorf("log float histograms: %w", err) } } // Exemplars should be logged after samples (float/native histogram/etc), @@ -712,7 +771,7 @@ func (a *headAppender) log() error { buf = rec[:0] if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log exemplars") + return fmt.Errorf("log exemplars: %w", err) } } return nil @@ -741,7 +800,7 @@ func (a *headAppender) Commit() (err error) { if err := a.log(); err != nil { _ = a.Rollback() // Most likely the same error will happen again. - return errors.Wrap(err, "write to WAL") + return fmt.Errorf("write to WAL: %w", err) } if a.head.writeNotified != nil { @@ -759,7 +818,7 @@ func (a *headAppender) Commit() (err error) { } // We don't instrument exemplar appends here, all is instrumented by storage. if err := a.head.exemplars.AddExemplar(s.lset, e.exemplar); err != nil { - if err == storage.ErrOutOfOrderExemplar { + if errors.Is(err, storage.ErrOutOfOrderExemplar) { continue } level.Debug(a.head.logger).Log("msg", "Unknown error while adding exemplar", "err", err) @@ -839,16 +898,16 @@ func (a *headAppender) Commit() (err error) { series.Lock() oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow) - switch err { - case nil: + switch { + case err == nil: // Do nothing. - case storage.ErrOutOfOrderSample: + case errors.Is(err, storage.ErrOutOfOrderSample): samplesAppended-- oooRejected++ - case storage.ErrOutOfBounds: + case errors.Is(err, storage.ErrOutOfBounds): samplesAppended-- oobRejected++ - case storage.ErrTooOldSample: + case errors.Is(err, storage.ErrTooOldSample): samplesAppended-- tooOldRejected++ default: @@ -1428,7 +1487,7 @@ func (s *memSeries) mmapChunks(chunkDiskMapper *chunks.ChunkDiskMapper) (count i } func handleChunkWriteError(err error) { - if err != nil && err != chunks.ErrChunkDiskMapperClosed { + if err != nil && !errors.Is(err, chunks.ErrChunkDiskMapperClosed) { panic(err) } } diff --git a/tsdb/head_bench_test.go b/tsdb/head_bench_test.go index 8fdf94db0..a03794810 100644 --- a/tsdb/head_bench_test.go +++ b/tsdb/head_bench_test.go @@ -14,10 +14,10 @@ package tsdb import ( + "errors" "strconv" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/require" "go.uber.org/atomic" diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 35ef26a58..362764480 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -15,11 +15,12 @@ package tsdb import ( "context" + "errors" + "fmt" "math" "sync" "github.com/go-kit/log/level" - "github.com/pkg/errors" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" @@ -133,7 +134,7 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { } } if err := p.Err(); err != nil { - return index.ErrPostings(errors.Wrap(err, "expand postings")) + return index.ErrPostings(fmt.Errorf("expand postings: %w", err)) } slices.SortFunc(series, func(a, b *memSeries) int { @@ -388,7 +389,8 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi if ix < len(s.mmappedChunks) { chk, err := chunkDiskMapper.Chunk(s.mmappedChunks[ix].ref) if err != nil { - if _, ok := err.(*chunks.CorruptionErr); ok { + var cerr *chunks.CorruptionErr + if errors.As(err, &cerr) { panic(err) } return nil, false, false, err @@ -516,14 +518,15 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe xor, err = s.ooo.oooHeadChunk.chunk.ToXORBetweenTimestamps(meta.OOOLastMinTime, meta.OOOLastMaxTime) } if err != nil { - return nil, errors.Wrap(err, "failed to convert ooo head chunk to xor chunk") + return nil, fmt.Errorf("failed to convert ooo head chunk to xor chunk: %w", err) } iterable = xor } else { chk, err := cdm.Chunk(c.ref) if err != nil { - if _, ok := err.(*chunks.CorruptionErr); ok { - return nil, errors.Wrap(err, "invalid ooo mmapped chunk") + var cerr *chunks.CorruptionErr + if errors.As(err, &cerr) { + return nil, fmt.Errorf("invalid ooo mmapped chunk: %w", err) } return nil, err } diff --git a/tsdb/head_read_test.go b/tsdb/head_read_test.go index 4f19db4c1..b06351201 100644 --- a/tsdb/head_read_test.go +++ b/tsdb/head_read_test.go @@ -152,7 +152,7 @@ func TestBoundedChunk(t *testing.T) { // it.Next() should keep returning no value. for i := 0; i < 10; i++ { - require.True(t, it.Next() == chunkenc.ValNone) + require.Equal(t, chunkenc.ValNone, it.Next()) } require.Equal(t, tc.expSamples, samples) @@ -216,8 +216,8 @@ func TestMemSeries_chunk(t *testing.T) { name: "firstChunkID > ix", setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { appendSamples(t, s, 0, chunkRange, cdm) - require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") s.firstChunkID = 5 @@ -229,8 +229,8 @@ func TestMemSeries_chunk(t *testing.T) { name: "call ix=0 on memSeries with no mmapped chunks", setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { appendSamples(t, s, 0, chunkRange, cdm) - require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -241,8 +241,8 @@ func TestMemSeries_chunk(t *testing.T) { name: "call ix=1 on memSeries with no mmapped chunks", setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { appendSamples(t, s, 0, chunkRange, cdm) - require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -253,8 +253,8 @@ func TestMemSeries_chunk(t *testing.T) { name: "call ix=10 on memSeries with no mmapped chunks", setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { appendSamples(t, s, 0, chunkRange, cdm) - require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, chunkRange-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -267,7 +267,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -280,7 +280,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -293,7 +293,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -306,7 +306,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") s.headChunks = nil @@ -320,7 +320,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") s.headChunks = nil @@ -334,7 +334,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") s.headChunks = nil @@ -348,7 +348,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") cdm.Close() @@ -362,7 +362,7 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*4)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") cdm.Close() @@ -374,8 +374,8 @@ func TestMemSeries_chunk(t *testing.T) { name: "call ix=0 on memSeries with 3 head chunks and no mmapped chunks", setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { appendSamples(t, s, 0, chunkRange*3, cdm) - require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -386,8 +386,8 @@ func TestMemSeries_chunk(t *testing.T) { name: "call ix=1 on memSeries with 3 head chunks and no mmapped chunks", setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { appendSamples(t, s, 0, chunkRange*3, cdm) - require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -398,8 +398,8 @@ func TestMemSeries_chunk(t *testing.T) { name: "call ix=10 on memSeries with 3 head chunks and no mmapped chunks", setup: func(t *testing.T, s *memSeries, cdm *chunks.ChunkDiskMapper) { appendSamples(t, s, 0, chunkRange*3, cdm) - require.Len(t, s.mmappedChunks, 0, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Empty(t, s.mmappedChunks, "wrong number of mmappedChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Equal(t, int64(0), s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*3)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") }, @@ -412,10 +412,10 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") @@ -429,10 +429,10 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") @@ -446,10 +446,10 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") @@ -463,10 +463,10 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") @@ -480,10 +480,10 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") @@ -498,10 +498,10 @@ func TestMemSeries_chunk(t *testing.T) { appendSamples(t, s, 0, chunkRange*4, cdm) s.mmapChunks(cdm) require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") - require.Equal(t, s.headChunks.len(), 1, "wrong number of headChunks") + require.Equal(t, 1, s.headChunks.len(), "wrong number of headChunks") appendSamples(t, s, chunkRange*4, chunkRange*6, cdm) - require.Equal(t, s.headChunks.len(), 3, "wrong number of headChunks") + require.Equal(t, 3, s.headChunks.len(), "wrong number of headChunks") require.Len(t, s.mmappedChunks, 3, "wrong number of mmappedChunks") require.Equal(t, chunkRange*3, s.headChunks.oldest().minTime, "wrong minTime on last headChunks element") require.Equal(t, (chunkRange*6)-chunkStep, s.headChunks.maxTime, "wrong maxTime on first headChunks element") diff --git a/tsdb/head_test.go b/tsdb/head_test.go index fd8dd024e..5c2749bed 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -30,9 +30,9 @@ import ( "testing" "time" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/atomic" "golang.org/x/sync/errgroup" @@ -697,7 +697,7 @@ func TestHead_ReadWAL(t *testing.T) { require.NoError(t, err) e, err := q.Select(0, 1000, []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "1")}) require.NoError(t, err) - require.Equal(t, e[0].Exemplars[0], exemplar.Exemplar{Ts: 100, Value: 1, Labels: labels.FromStrings("traceID", "asdf")}) + require.Equal(t, exemplar.Exemplar{Ts: 100, Value: 1, Labels: labels.FromStrings("traceID", "asdf")}, e[0].Exemplars[0]) }) } } @@ -1086,12 +1086,12 @@ func TestMemSeries_truncateChunks_scenarios(t *testing.T) { } else { require.Nil(t, series.headChunks, "head chunk is present") } - require.Equal(t, tc.mmappedChunks, len(series.mmappedChunks), "wrong number of mmapped chunks") + require.Len(t, series.mmappedChunks, tc.mmappedChunks, "wrong number of mmapped chunks") truncated := series.truncateChunksBefore(tc.truncateBefore, 0) require.Equal(t, tc.expectedTruncated, truncated, "wrong number of truncated chunks returned") - require.Equal(t, tc.expectedMmap, len(series.mmappedChunks), "wrong number of mmappedChunks after truncation") + require.Len(t, series.mmappedChunks, tc.expectedMmap, "wrong number of mmappedChunks after truncation") if tc.expectedHead > 0 { require.NotNil(t, series.headChunks, "headChunks should is nil after truncation") @@ -1256,7 +1256,7 @@ func TestHeadDeleteSimple(t *testing.T) { if !eok { require.NoError(t, h.Close()) require.NoError(t, actSeriesSet.Err()) - require.Equal(t, 0, len(actSeriesSet.Warnings())) + require.Empty(t, actSeriesSet.Warnings()) continue Outer } expSeries := expSeriesSet.At() @@ -1304,7 +1304,7 @@ func TestDeleteUntilCurMax(t *testing.T) { for res.Next() { } require.NoError(t, res.Err()) - require.Equal(t, 0, len(res.Warnings())) + require.Empty(t, res.Warnings()) // Add again and test for presence. app = hb.Appender(context.Background()) @@ -1323,7 +1323,7 @@ func TestDeleteUntilCurMax(t *testing.T) { for res.Next() { } require.NoError(t, res.Err()) - require.Equal(t, 0, len(res.Warnings())) + require.Empty(t, res.Warnings()) } func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) { @@ -1524,7 +1524,7 @@ func TestDelete_e2e(t *testing.T) { require.Equal(t, smplExp, smplRes) } require.NoError(t, ss.Err()) - require.Equal(t, 0, len(ss.Warnings())) + require.Empty(t, ss.Warnings()) } } } @@ -1654,7 +1654,7 @@ func TestMemSeries_append(t *testing.T) { require.False(t, chunkCreated, "second sample should use same chunk") s.mmapChunks(chunkDiskMapper) - require.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk") + require.Len(t, s.mmappedChunks, 1, "there should be only 1 mmapped chunk") require.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range") require.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range") require.Equal(t, int64(1000), s.headChunks.minTime, "wrong chunk range") @@ -1721,7 +1721,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { require.False(t, chunkCreated, "second sample should use same chunk") s.mmapChunks(chunkDiskMapper) - require.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk") + require.Len(t, s.mmappedChunks, 1, "there should be only 1 mmapped chunk") require.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range") require.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range") require.Equal(t, int64(1000), s.headChunks.minTime, "wrong chunk range") @@ -1732,7 +1732,7 @@ func TestMemSeries_appendHistogram(t *testing.T) { require.False(t, chunkCreated, "third sample should trigger a re-encoded chunk") s.mmapChunks(chunkDiskMapper) - require.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk") + require.Len(t, s.mmappedChunks, 1, "there should be only 1 mmapped chunk") require.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range") require.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range") require.Equal(t, int64(1000), s.headChunks.minTime, "wrong chunk range") @@ -1831,7 +1831,7 @@ func TestGCChunkAccess(t *testing.T) { require.NoError(t, idx.Series(1, &builder, &chunks)) require.Equal(t, labels.FromStrings("a", "1"), builder.Labels()) - require.Equal(t, 2, len(chunks)) + require.Len(t, chunks, 2) cr, err := h.chunksRange(0, 1500, nil) require.NoError(t, err) @@ -1890,7 +1890,7 @@ func TestGCSeriesAccess(t *testing.T) { require.NoError(t, idx.Series(1, &builder, &chunks)) require.Equal(t, labels.FromStrings("a", "1"), builder.Labels()) - require.Equal(t, 2, len(chunks)) + require.Len(t, chunks, 2) cr, err := h.chunksRange(0, 2000, nil) require.NoError(t, err) @@ -1932,11 +1932,11 @@ func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) { defer q.Close() ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1")) - require.Equal(t, true, ss.Next()) + require.True(t, ss.Next()) for ss.Next() { } require.NoError(t, ss.Err()) - require.Equal(t, 0, len(ss.Warnings())) + require.Empty(t, ss.Warnings()) } func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) { @@ -1961,8 +1961,8 @@ func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) { require.NoError(t, err) ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1")) - require.Equal(t, false, ss.Next()) - require.Equal(t, 0, len(ss.Warnings())) + require.False(t, ss.Next()) + require.Empty(t, ss.Warnings()) require.NoError(t, q.Close()) // Truncate again, this time the series should be deleted @@ -1985,7 +1985,7 @@ func TestHead_LogRollback(t *testing.T) { require.NoError(t, app.Rollback()) recs := readTestWAL(t, w.Dir()) - require.Equal(t, 1, len(recs)) + require.Len(t, recs, 1) series, ok := recs[0].([]record.RefSeries) require.True(t, ok, "expected series record but got %+v", recs[0]) @@ -2055,9 +2055,8 @@ func TestWalRepair_DecodingError(t *testing.T) { require.Equal(t, 0.0, prom_testutil.ToFloat64(h.metrics.walCorruptionsTotal)) initErr := h.Init(math.MinInt64) - err = errors.Cause(initErr) // So that we can pick up errors even if wrapped. - _, corrErr := err.(*wlog.CorruptionErr) - require.True(t, corrErr, "reading the wal didn't return corruption error") + var cerr *wlog.CorruptionErr + require.ErrorAs(t, initErr, &cerr, "reading the wal didn't return corruption error") require.NoError(t, h.Close()) // Head will close the wal as well. } @@ -2128,12 +2127,11 @@ func TestWblRepair_DecodingError(t *testing.T) { require.Equal(t, 0.0, prom_testutil.ToFloat64(h.metrics.walCorruptionsTotal)) initErr := h.Init(math.MinInt64) - _, ok := initErr.(*errLoadWbl) - require.True(t, ok) // Wbl errors are wrapped into errLoadWbl, make sure we can unwrap it. + var elb *errLoadWbl + require.ErrorAs(t, initErr, &elb) // Wbl errors are wrapped into errLoadWbl, make sure we can unwrap it. - err = errors.Cause(initErr) // So that we can pick up errors even if wrapped. - _, corrErr := err.(*wlog.CorruptionErr) - require.True(t, corrErr, "reading the wal didn't return corruption error") + var cerr *wlog.CorruptionErr + require.ErrorAs(t, initErr, &cerr, "reading the wal didn't return corruption error") require.NoError(t, h.Close()) // Head will close the wal as well. } @@ -2209,7 +2207,7 @@ func TestHeadReadWriterRepair(t *testing.T) { // take effect without another chunk being written. files, err := os.ReadDir(mmappedChunksDir(dir)) require.NoError(t, err) - require.Equal(t, 6, len(files)) + require.Len(t, files, 6) // Corrupt the 4th file by writing a random byte to series ref. f, err := os.OpenFile(filepath.Join(mmappedChunksDir(dir), files[3].Name()), os.O_WRONLY, 0o666) @@ -2235,7 +2233,7 @@ func TestHeadReadWriterRepair(t *testing.T) { { files, err := os.ReadDir(mmappedChunksDir(dir)) require.NoError(t, err) - require.Equal(t, 3, len(files)) + require.Len(t, files, 3) } } @@ -2321,7 +2319,7 @@ func TestMemSeriesIsolation(t *testing.T) { ss := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) _, seriesSet, ws, err := expandSeriesSet(ss) require.NoError(t, err) - require.Equal(t, 0, len(ws)) + require.Empty(t, ws) for _, series := range seriesSet { return int(series[len(series)-1].f) @@ -2725,8 +2723,8 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) { require.NoError(t, err) } require.NoError(t, app.Commit()) - require.Equal(t, head.MinTime(), firstSeriesTimestamp) - require.Equal(t, head.MaxTime(), lastSeriesTimestamp) + require.Equal(t, firstSeriesTimestamp, head.MinTime()) + require.Equal(t, lastSeriesTimestamp, head.MaxTime()) testCases := []struct { name string @@ -3007,7 +3005,7 @@ func BenchmarkHeadLabelValuesWithMatchers(b *testing.B) { for benchIdx := 0; benchIdx < b.N; benchIdx++ { actualValues, err := headIdxReader.LabelValues(ctx, "b_tens", matchers...) require.NoError(b, err) - require.Equal(b, 9, len(actualValues)) + require.Len(b, actualValues, 9) } } @@ -3115,7 +3113,7 @@ func TestChunkNotFoundHeadGCRace(t *testing.T) { defer wg.Done() // Compacting head while the querier spans the compaction time. require.NoError(t, db.Compact(ctx)) - require.Greater(t, len(db.Blocks()), 0) + require.NotEmpty(t, db.Blocks()) }() // Give enough time for compaction to finish. @@ -3178,7 +3176,7 @@ func TestDataMissingOnQueryDuringCompaction(t *testing.T) { defer wg.Done() // Compacting head while the querier spans the compaction time. require.NoError(t, db.Compact(ctx)) - require.Greater(t, len(db.Blocks()), 0) + require.NotEmpty(t, db.Blocks()) }() // Give enough time for compaction to finish. @@ -3907,7 +3905,7 @@ func TestSnapshotError(t *testing.T) { require.NotNil(t, head.series.getByHash(lbls.Hash(), lbls)) tm, err := head.tombstones.Get(1) require.NoError(t, err) - require.NotEqual(t, 0, len(tm)) + require.NotEmpty(t, tm) head.opts.EnableMemorySnapshotOnShutdown = true require.NoError(t, head.Close()) // This will create a snapshot. @@ -3939,7 +3937,7 @@ func TestSnapshotError(t *testing.T) { require.Nil(t, head.series.getByHash(lbls.Hash(), lbls)) tm, err = head.tombstones.Get(1) require.NoError(t, err) - require.Equal(t, 0, len(tm)) + require.Empty(t, tm) } func TestHistogramMetrics(t *testing.T) { @@ -4082,8 +4080,8 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { for _, h := range tsdbutil.GenerateTestHistograms(numHistograms) { var err error if floatHistogram { - _, err = app.AppendHistogram(0, l, 100*int64(len(expHistograms)), nil, h.ToFloat()) - expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), fh: h.ToFloat()}) + _, err = app.AppendHistogram(0, l, 100*int64(len(expHistograms)), nil, h.ToFloat(nil)) + expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), fh: h.ToFloat(nil)}) } else { _, err = app.AppendHistogram(0, l, 100*int64(len(expHistograms)), h, nil) expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), h: h}) @@ -4104,8 +4102,8 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { s := head.series.getByHash(l.Hash(), l) require.NotNil(t, s) require.NotNil(t, s.headChunks) - require.Equal(t, s.headChunks.len(), 1) - require.Equal(t, 0, len(s.mmappedChunks)) + require.Equal(t, 1, s.headChunks.len()) + require.Empty(t, s.mmappedChunks) testQuery(1) // Adding stale in different appender and continuing series after a stale sample. @@ -4113,8 +4111,8 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { for _, h := range tsdbutil.GenerateTestHistograms(2 * numHistograms)[numHistograms:] { var err error if floatHistogram { - _, err = app.AppendHistogram(0, l, 100*int64(len(expHistograms)), nil, h.ToFloat()) - expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), fh: h.ToFloat()}) + _, err = app.AppendHistogram(0, l, 100*int64(len(expHistograms)), nil, h.ToFloat(nil)) + expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), fh: h.ToFloat(nil)}) } else { _, err = app.AppendHistogram(0, l, 100*int64(len(expHistograms)), h, nil) expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), h: h}) @@ -4139,8 +4137,8 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { s = head.series.getByHash(l.Hash(), l) require.NotNil(t, s) require.NotNil(t, s.headChunks) - require.Equal(t, s.headChunks.len(), 1) - require.Equal(t, 1, len(s.mmappedChunks)) + require.Equal(t, 1, s.headChunks.len()) + require.Len(t, s.mmappedChunks, 1) testQuery(2) } @@ -4160,7 +4158,7 @@ func TestHistogramCounterResetHeader(t *testing.T) { app := head.Appender(context.Background()) var err error if floatHisto { - _, err = app.AppendHistogram(0, l, ts, nil, h.ToFloat()) + _, err = app.AppendHistogram(0, l, ts, nil, h.ToFloat(nil)) } else { _, err = app.AppendHistogram(0, l, ts, h.Copy(), nil) } @@ -4480,7 +4478,7 @@ func TestChunkSnapshotReplayBug(t *testing.T) { labels.MustNewMatcher(labels.MatchEqual, "__name__", "request_duration"), labels.MustNewMatcher(labels.MatchNotEqual, "status_code", "200"), ) - require.Len(t, series, 0, "there should be no series found") + require.Empty(t, series, "there should be no series found") } func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) { @@ -4514,7 +4512,7 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) { // Verify the snapshot. name, idx, offset, err := LastChunkSnapshot(dir) require.NoError(t, err) - require.True(t, name != "") + require.NotEqual(t, "", name) require.Equal(t, 0, idx) require.Greater(t, offset, 0) } @@ -4873,7 +4871,7 @@ func TestReplayAfterMmapReplayError(t *testing.T) { } files, err := os.ReadDir(filepath.Join(dir, "chunks_head")) - require.Equal(t, 5, len(files)) + require.Len(t, files, 5) // Corrupt a m-map file. mmapFilePath := filepath.Join(dir, "chunks_head", "000002") @@ -4888,7 +4886,7 @@ func TestReplayAfterMmapReplayError(t *testing.T) { // There should be less m-map files due to corruption. files, err = os.ReadDir(filepath.Join(dir, "chunks_head")) - require.Equal(t, 2, len(files)) + require.Len(t, files, 2) // Querying should not panic. q, err := NewBlockQuerier(h, 0, lastTs) @@ -5641,3 +5639,93 @@ func TestPostingsCardinalityStats(t *testing.T) { // Using cache. require.Equal(t, statsForSomeLabel1, head.PostingsCardinalityStats("n", 1)) } + +func TestHeadAppender_AppendCTZeroSample(t *testing.T) { + type appendableSamples struct { + ts int64 + val float64 + ct int64 + } + for _, tc := range []struct { + name string + appendableSamples []appendableSamples + expectedSamples []model.Sample + }{ + { + name: "In order ct+normal sample", + appendableSamples: []appendableSamples{ + {ts: 100, val: 10, ct: 1}, + }, + expectedSamples: []model.Sample{ + {Timestamp: 1, Value: 0}, + {Timestamp: 100, Value: 10}, + }, + }, + { + name: "Consecutive appends with same ct ignore ct", + appendableSamples: []appendableSamples{ + {ts: 100, val: 10, ct: 1}, + {ts: 101, val: 10, ct: 1}, + }, + expectedSamples: []model.Sample{ + {Timestamp: 1, Value: 0}, + {Timestamp: 100, Value: 10}, + {Timestamp: 101, Value: 10}, + }, + }, + { + name: "Consecutive appends with newer ct do not ignore ct", + appendableSamples: []appendableSamples{ + {ts: 100, val: 10, ct: 1}, + {ts: 102, val: 10, ct: 101}, + }, + expectedSamples: []model.Sample{ + {Timestamp: 1, Value: 0}, + {Timestamp: 100, Value: 10}, + {Timestamp: 101, Value: 0}, + {Timestamp: 102, Value: 10}, + }, + }, + { + name: "CT equals to previous sample timestamp is ignored", + appendableSamples: []appendableSamples{ + {ts: 100, val: 10, ct: 1}, + {ts: 101, val: 10, ct: 100}, + }, + expectedSamples: []model.Sample{ + {Timestamp: 1, Value: 0}, + {Timestamp: 100, Value: 10}, + {Timestamp: 101, Value: 10}, + }, + }, + } { + h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) + defer func() { + require.NoError(t, h.Close()) + }() + a := h.Appender(context.Background()) + lbls := labels.FromStrings("foo", "bar") + for _, sample := range tc.appendableSamples { + _, err := a.AppendCTZeroSample(0, lbls, sample.ts, sample.ct) + require.NoError(t, err) + _, err = a.Append(0, lbls, sample.ts, sample.val) + require.NoError(t, err) + } + require.NoError(t, a.Commit()) + + q, err := NewBlockQuerier(h, math.MinInt64, math.MaxInt64) + require.NoError(t, err) + ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) + require.True(t, ss.Next()) + s := ss.At() + require.False(t, ss.Next()) + it := s.Iterator(nil) + for _, sample := range tc.expectedSamples { + require.Equal(t, chunkenc.ValFloat, it.Next()) + timestamp, value := it.At() + require.Equal(t, sample.Timestamp, model.Time(timestamp)) + require.Equal(t, sample.Value, model.SampleValue(value)) + } + require.Equal(t, chunkenc.ValNone, it.Next()) + } +} diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 07fa8280c..a492a85a0 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -14,6 +14,7 @@ package tsdb import ( + "errors" "fmt" "math" "os" @@ -24,7 +25,6 @@ import ( "time" "github.com/go-kit/log/level" - "github.com/pkg/errors" "go.uber.org/atomic" "github.com/prometheus/prometheus/model/exemplar" @@ -128,7 +128,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. // At the moment the only possible error here is out of order exemplars, which we shouldn't see when // replaying the WAL, so lets just log the error if it's not that type. err = h.exemplars.AddExemplar(ms.lset, exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels}) - if err != nil && err == storage.ErrOutOfOrderExemplar { + if err != nil && errors.Is(err, storage.ErrOutOfOrderExemplar) { level.Warn(h.logger).Log("msg", "Unexpected error when replaying WAL on exemplar record", "err", err) } } @@ -145,7 +145,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. series, err = dec.Series(rec, series) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode series"), + Err: fmt.Errorf("decode series: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -157,7 +157,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. samples, err = dec.Samples(rec, samples) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode samples"), + Err: fmt.Errorf("decode samples: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -169,7 +169,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. tstones, err = dec.Tombstones(rec, tstones) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode tombstones"), + Err: fmt.Errorf("decode tombstones: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -181,7 +181,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. exemplars, err = dec.Exemplars(rec, exemplars) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode exemplars"), + Err: fmt.Errorf("decode exemplars: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -193,7 +193,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. hists, err = dec.HistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode histograms"), + Err: fmt.Errorf("decode histograms: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -205,7 +205,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode float histograms"), + Err: fmt.Errorf("decode float histograms: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -217,7 +217,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. meta, err := dec.Metadata(rec, meta) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode metadata"), + Err: fmt.Errorf("decode metadata: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -416,8 +416,8 @@ Outer: close(exemplarsInput) wg.Wait() - if r.Err() != nil { - return errors.Wrap(r.Err(), "read records") + if err := r.Err(); err != nil { + return fmt.Errorf("read records: %w", err) } if unknownRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load() > 0 { @@ -708,7 +708,7 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. samples, err = dec.Samples(rec, samples) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode samples"), + Err: fmt.Errorf("decode samples: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -720,7 +720,7 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. markers, err = dec.MmapMarkers(rec, markers) if err != nil { decodeErr = &wlog.CorruptionErr{ - Err: errors.Wrap(err, "decode mmap markers"), + Err: fmt.Errorf("decode mmap markers: %w", err), Segment: r.Segment(), Offset: r.Offset(), } @@ -806,8 +806,8 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. } wg.Wait() - if r.Err() != nil { - return errors.Wrap(r.Err(), "read records") + if err := r.Err(); err != nil { + return fmt.Errorf("read records: %w", err) } if unknownRefs.Load() > 0 || mmapMarkerUnknownRefs.Load() > 0 { @@ -995,7 +995,7 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh chk, err := chunkenc.FromData(enc, chunkBytesCopy) if err != nil { - return csr, errors.Wrap(err, "chunk from data") + return csr, fmt.Errorf("chunk from data: %w", err) } csr.mc.chunk = chk @@ -1030,7 +1030,7 @@ func encodeTombstonesToSnapshotRecord(tr tombstones.Reader) ([]byte, error) { buf.PutByte(chunkSnapshotRecordTypeTombstones) b, err := tombstones.Encode(tr) if err != nil { - return nil, errors.Wrap(err, "encode tombstones") + return nil, fmt.Errorf("encode tombstones: %w", err) } buf.PutUvarintBytes(b) @@ -1045,7 +1045,10 @@ func decodeTombstonesSnapshotRecord(b []byte) (tombstones.Reader, error) { } tr, err := tombstones.Decode(dec.UvarintBytes()) - return tr, errors.Wrap(err, "decode tombstones") + if err != nil { + return tr, fmt.Errorf("decode tombstones: %w", err) + } + return tr, nil } const chunkSnapshotPrefix = "chunk_snapshot." @@ -1072,13 +1075,13 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { stats := &ChunkSnapshotStats{} wlast, woffset, err := h.wal.LastSegmentAndOffset() - if err != nil && err != record.ErrNotFound { - return stats, errors.Wrap(err, "get last wal segment and offset") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return stats, fmt.Errorf("get last wal segment and offset: %w", err) } _, cslast, csoffset, err := LastChunkSnapshot(h.opts.ChunkDirRoot) - if err != nil && err != record.ErrNotFound { - return stats, errors.Wrap(err, "find last chunk snapshot") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return stats, fmt.Errorf("find last chunk snapshot: %w", err) } if wlast == cslast && woffset == csoffset { @@ -1093,11 +1096,11 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { stats.Dir = cpdir if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { - return stats, errors.Wrap(err, "create chunk snapshot dir") + return stats, fmt.Errorf("create chunk snapshot dir: %w", err) } cp, err := wlog.New(nil, nil, cpdirtmp, h.wal.CompressionType()) if err != nil { - return stats, errors.Wrap(err, "open chunk snapshot") + return stats, fmt.Errorf("open chunk snapshot: %w", err) } // Ensures that an early return caused by an error doesn't leave any tmp files. @@ -1126,7 +1129,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { if len(buf) > 10*1024*1024 { if err := cp.Log(recs...); err != nil { h.series.locks[i].RUnlock() - return stats, errors.Wrap(err, "flush records") + return stats, fmt.Errorf("flush records: %w", err) } buf, recs = buf[:0], recs[:0] } @@ -1139,16 +1142,16 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { // Add tombstones to the snapshot. tombstonesReader, err := h.Tombstones() if err != nil { - return stats, errors.Wrap(err, "get tombstones") + return stats, fmt.Errorf("get tombstones: %w", err) } rec, err := encodeTombstonesToSnapshotRecord(tombstonesReader) if err != nil { - return stats, errors.Wrap(err, "encode tombstones") + return stats, fmt.Errorf("encode tombstones: %w", err) } recs = append(recs, rec) // Flush remaining series records and tombstones. if err := cp.Log(recs...); err != nil { - return stats, errors.Wrap(err, "flush records") + return stats, fmt.Errorf("flush records: %w", err) } buf = buf[:0] @@ -1167,7 +1170,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { encbuf.PutByte(chunkSnapshotRecordTypeExemplars) enc.EncodeExemplarsIntoBuffer(batch, &encbuf) if err := cp.Log(encbuf.Get()); err != nil { - return errors.Wrap(err, "log exemplars") + return fmt.Errorf("log exemplars: %w", err) } buf, batch = buf[:0], batch[:0] return nil @@ -1175,7 +1178,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { err = h.exemplars.IterateExemplars(func(seriesLabels labels.Labels, e exemplar.Exemplar) error { if len(batch) >= maxExemplarsPerRecord { if err := flushExemplars(); err != nil { - return errors.Wrap(err, "flush exemplars") + return fmt.Errorf("flush exemplars: %w", err) } } @@ -1193,19 +1196,19 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { return nil }) if err != nil { - return stats, errors.Wrap(err, "iterate exemplars") + return stats, fmt.Errorf("iterate exemplars: %w", err) } // Flush remaining exemplars. if err := flushExemplars(); err != nil { - return stats, errors.Wrap(err, "flush exemplars at the end") + return stats, fmt.Errorf("flush exemplars at the end: %w", err) } if err := cp.Close(); err != nil { - return stats, errors.Wrap(err, "close chunk snapshot") + return stats, fmt.Errorf("close chunk snapshot: %w", err) } if err := fileutil.Replace(cpdirtmp, cpdir); err != nil { - return stats, errors.Wrap(err, "rename chunk snapshot directory") + return stats, fmt.Errorf("rename chunk snapshot directory: %w", err) } if err := DeleteChunkSnapshots(h.opts.ChunkDirRoot, wlast, woffset); err != nil { @@ -1229,7 +1232,10 @@ func (h *Head) performChunkSnapshot() error { if err == nil { level.Info(h.logger).Log("msg", "chunk snapshot complete", "duration", elapsed.String(), "num_series", stats.TotalSeries, "dir", stats.Dir) } - return errors.Wrap(err, "chunk snapshot") + if err != nil { + return fmt.Errorf("chunk snapshot: %w", err) + } + return nil } // ChunkSnapshotStats returns stats about a created chunk snapshot. @@ -1327,16 +1333,16 @@ func DeleteChunkSnapshots(dir string, maxIndex, maxOffset int) error { func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSeries, error) { dir, snapIdx, snapOffset, err := LastChunkSnapshot(h.opts.ChunkDirRoot) if err != nil { - if err == record.ErrNotFound { + if errors.Is(err, record.ErrNotFound) { return snapIdx, snapOffset, nil, nil } - return snapIdx, snapOffset, nil, errors.Wrap(err, "find last chunk snapshot") + return snapIdx, snapOffset, nil, fmt.Errorf("find last chunk snapshot: %w", err) } start := time.Now() sr, err := wlog.NewSegmentsReader(dir) if err != nil { - return snapIdx, snapOffset, nil, errors.Wrap(err, "open chunk snapshot") + return snapIdx, snapOffset, nil, fmt.Errorf("open chunk snapshot: %w", err) } defer func() { if err := sr.Close(); err != nil { @@ -1424,7 +1430,7 @@ Outer: numSeries++ csr, err := decodeSeriesFromChunkSnapshot(&dec, rec) if err != nil { - loopErr = errors.Wrap(err, "decode series record") + loopErr = fmt.Errorf("decode series record: %w", err) break Outer } recordChan <- csr @@ -1432,7 +1438,7 @@ Outer: case chunkSnapshotRecordTypeTombstones: tr, err := decodeTombstonesSnapshotRecord(rec) if err != nil { - loopErr = errors.Wrap(err, "decode tombstones") + loopErr = fmt.Errorf("decode tombstones: %w", err) break Outer } @@ -1440,7 +1446,7 @@ Outer: h.tombstones.AddInterval(ref, ivs...) return nil }); err != nil { - loopErr = errors.Wrap(err, "iterate tombstones") + loopErr = fmt.Errorf("iterate tombstones: %w", err) break Outer } @@ -1468,7 +1474,7 @@ Outer: exemplarBuf = exemplarBuf[:0] exemplarBuf, err = dec.ExemplarsFromBuffer(&decbuf, exemplarBuf) if err != nil { - loopErr = errors.Wrap(err, "exemplars from buffer") + loopErr = fmt.Errorf("exemplars from buffer: %w", err) break Outer } @@ -1484,7 +1490,7 @@ Outer: Value: e.V, Ts: e.T, }); err != nil { - loopErr = errors.Wrap(err, "add exemplar") + loopErr = fmt.Errorf("add exemplar: %w", err) break Outer } } @@ -1502,16 +1508,19 @@ Outer: } close(errChan) - merr := tsdb_errors.NewMulti(errors.Wrap(loopErr, "decode loop")) + merr := tsdb_errors.NewMulti() + if loopErr != nil { + merr.Add(fmt.Errorf("decode loop: %w", loopErr)) + } for err := range errChan { - merr.Add(errors.Wrap(err, "record processing")) + merr.Add(fmt.Errorf("record processing: %w", err)) } if err := merr.Err(); err != nil { return -1, -1, nil, err } - if r.Err() != nil { - return -1, -1, nil, errors.Wrap(r.Err(), "read records") + if err := r.Err(); err != nil { + return -1, -1, nil, fmt.Errorf("read records: %w", err) } if len(refSeries) == 0 { diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 44ee66386..2b025a352 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -425,7 +425,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... // We add padding to 16 bytes to increase the addressable space we get through 4 byte // series references. if err := w.addPadding(16); err != nil { - return fmt.Errorf("failed to write padding bytes: %v", err) + return fmt.Errorf("failed to write padding bytes: %w", err) } if w.f.pos%16 != 0 { @@ -442,7 +442,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... if !ok { nameIndex, err = w.symbols.ReverseLookup(l.Name) if err != nil { - return fmt.Errorf("symbol entry for %q does not exist, %v", l.Name, err) + return fmt.Errorf("symbol entry for %q does not exist, %w", l.Name, err) } } w.labelNames[l.Name]++ @@ -452,7 +452,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... if !ok || cacheEntry.lastValue != l.Value { valueIndex, err = w.symbols.ReverseLookup(l.Value) if err != nil { - return fmt.Errorf("symbol entry for %q does not exist, %v", l.Value, err) + return fmt.Errorf("symbol entry for %q does not exist, %w", l.Value, err) } w.symbolCache[l.Name] = symbolCacheEntry{ index: nameIndex, diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index 6c5e313d4..369d33738 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -205,7 +205,7 @@ func TestIndexRW_Postings(t *testing.T) { err := ir.Series(p.At(), &builder, &c) require.NoError(t, err) - require.Equal(t, 0, len(c)) + require.Empty(t, c) require.Equal(t, series[i], builder.Labels()) } require.NoError(t, p.Err()) diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 04282c332..c20b4506f 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -977,7 +977,7 @@ func TestMemPostings_Delete(t *testing.T) { deleted := p.Get("lbl1", "b") expanded, err = ExpandPostings(deleted) require.NoError(t, err) - require.Equal(t, 0, len(expanded), "expected empty postings, got %v", expanded) + require.Empty(t, expanded, "expected empty postings, got %v", expanded) } func TestFindIntersectingPostings(t *testing.T) { diff --git a/tsdb/index/postingsstats_test.go b/tsdb/index/postingsstats_test.go index 7ce51c795..674e1c052 100644 --- a/tsdb/index/postingsstats_test.go +++ b/tsdb/index/postingsstats_test.go @@ -33,7 +33,7 @@ func TestPostingsStats(t *testing.T) { stats.push(Stat{Name: "Stuff", Count: 3000000}) data := stats.get() - require.Equal(t, 10, len(data)) + require.Len(t, data, 10) for i := 0; i < heapLength; i++ { require.Equal(t, uint64(max-i), data[i].Count) } @@ -51,7 +51,7 @@ func TestPostingsStats2(t *testing.T) { data := stats.get() - require.Equal(t, 4, len(data)) + require.Len(t, data, 4) require.Equal(t, uint64(11), data[0].Count) } diff --git a/tsdb/mocks_test.go b/tsdb/mocks_test.go index 268017caa..d7c2b0a4f 100644 --- a/tsdb/mocks_test.go +++ b/tsdb/mocks_test.go @@ -14,7 +14,7 @@ package tsdb import ( - "github.com/pkg/errors" + "fmt" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -41,7 +41,7 @@ func (m *mockIndexWriter) AddSeries(_ storage.SeriesRef, l labels.Labels, chks . for i, chk := range chks { c, err := copyChunk(chk.Chunk) if err != nil { - return errors.Wrap(err, "mockIndexWriter: copy chunk") + return fmt.Errorf("mockIndexWriter: copy chunk: %w", err) } chksNew[i] = chunks.Meta{MaxTime: chk.MaxTime, MinTime: chk.MinTime, Chunk: c} } diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index 64577872f..b50a268b7 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -491,7 +491,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) { }) require.Nil(t, iterable) require.Equal(t, err, fmt.Errorf("not found")) - require.Equal(t, c, nil) + require.Nil(t, c) }) tests := []struct { diff --git a/tsdb/querier.go b/tsdb/querier.go index 6584d7da0..f88e4415e 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -15,13 +15,13 @@ package tsdb import ( "context" + "errors" "fmt" "math" "strings" "unicode/utf8" "github.com/oklog/ulid" - "github.com/pkg/errors" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/histogram" @@ -63,18 +63,18 @@ type blockBaseQuerier struct { func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, error) { indexr, err := b.Index() if err != nil { - return nil, errors.Wrap(err, "open index reader") + return nil, fmt.Errorf("open index reader: %w", err) } chunkr, err := b.Chunks() if err != nil { indexr.Close() - return nil, errors.Wrap(err, "open chunk reader") + return nil, fmt.Errorf("open chunk reader: %w", err) } tombsr, err := b.Tombstones() if err != nil { indexr.Close() chunkr.Close() - return nil, errors.Wrap(err, "open tombstone reader") + return nil, fmt.Errorf("open tombstone reader: %w", err) } if tombsr == nil { @@ -442,12 +442,12 @@ func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Ma func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, matchers ...*labels.Matcher) ([]string, error) { p, err := PostingsForMatchers(ctx, r, matchers...) if err != nil { - return nil, errors.Wrap(err, "fetching postings for matchers") + return nil, fmt.Errorf("fetching postings for matchers: %w", err) } allValues, err := r.LabelValues(ctx, name) if err != nil { - return nil, errors.Wrapf(err, "fetching values of label %s", name) + return nil, fmt.Errorf("fetching values of label %s: %w", name, err) } // If we have a matcher for the label name, we can filter out values that don't match @@ -473,12 +473,12 @@ func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, ma for i, value := range allValues { valuesPostings[i], err = r.Postings(ctx, name, value) if err != nil { - return nil, errors.Wrapf(err, "fetching postings for %s=%q", name, value) + return nil, fmt.Errorf("fetching postings for %s=%q: %w", name, value, err) } } indexes, err := index.FindIntersectingPostings(p, valuesPostings) if err != nil { - return nil, errors.Wrap(err, "intersecting postings") + return nil, fmt.Errorf("intersecting postings: %w", err) } values := make([]string, 0, len(indexes)) @@ -499,8 +499,8 @@ func labelNamesWithMatchers(ctx context.Context, r IndexReader, matchers ...*lab for p.Next() { postings = append(postings, p.At()) } - if p.Err() != nil { - return nil, errors.Wrapf(p.Err(), "postings for label names with matchers") + if err := p.Err(); err != nil { + return nil, fmt.Errorf("postings for label names with matchers: %w", err) } return r.LabelNamesFor(ctx, postings...) @@ -539,10 +539,10 @@ func (b *blockBaseSeriesSet) Next() bool { for b.p.Next() { if err := b.index.Series(b.p.At(), &b.builder, &b.bufChks); err != nil { // Postings may be stale. Skip if no underlying series exists. - if errors.Cause(err) == storage.ErrNotFound { + if errors.Is(err, storage.ErrNotFound) { continue } - b.err = errors.Wrapf(err, "get series %d", b.p.At()) + b.err = fmt.Errorf("get series %d: %w", b.p.At(), err) return false } @@ -552,7 +552,7 @@ func (b *blockBaseSeriesSet) Next() bool { intervals, err := b.tombstones.Get(b.p.At()) if err != nil { - b.err = errors.Wrap(err, "get tombstones") + b.err = fmt.Errorf("get tombstones: %w", err) return false } @@ -702,7 +702,7 @@ func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool { } if p.err != nil { - p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currMeta.Ref, p.blockID.String()) + p.err = fmt.Errorf("cannot populate chunk %d from block %s: %w", p.currMeta.Ref, p.blockID.String(), p.err) return false } @@ -900,7 +900,7 @@ func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool { valueType := p.currDelIter.Next() if valueType == chunkenc.ValNone { if err := p.currDelIter.Err(); err != nil { - p.err = errors.Wrap(err, "iterate chunk while re-encoding") + p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err) } return false } @@ -968,11 +968,11 @@ func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool { } if err != nil { - p.err = errors.Wrap(err, "iterate chunk while re-encoding") + p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err) return false } if err := p.currDelIter.Err(); err != nil { - p.err = errors.Wrap(err, "iterate chunk while re-encoding") + p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err) return false } @@ -991,7 +991,7 @@ func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool { firstValueType := p.currDelIter.Next() if firstValueType == chunkenc.ValNone { if err := p.currDelIter.Err(); err != nil { - p.err = errors.Wrap(err, "populateChunksFromIterable: no samples could be read") + p.err = fmt.Errorf("populateChunksFromIterable: no samples could be read: %w", err) return false } return false @@ -1075,11 +1075,11 @@ func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool { } if err != nil { - p.err = errors.Wrap(err, "populateChunksFromIterable: error when writing new chunks") + p.err = fmt.Errorf("populateChunksFromIterable: error when writing new chunks: %w", err) return false } if err = p.currDelIter.Err(); err != nil { - p.err = errors.Wrap(err, "populateChunksFromIterable: currDelIter error when writing new chunks") + p.err = fmt.Errorf("populateChunksFromIterable: currDelIter error when writing new chunks: %w", err) return false } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 6307587f5..18d81b85b 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -15,6 +15,7 @@ package tsdb import ( "context" + "errors" "fmt" "math" "math/rand" @@ -26,7 +27,6 @@ import ( "time" "github.com/oklog/ulid" - "github.com/pkg/errors" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" @@ -213,7 +213,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C require.Equal(t, eok, rok) if !eok { - require.Equal(t, 0, len(res.Warnings())) + require.Empty(t, res.Warnings()) break } sexp := c.exp.At() @@ -248,7 +248,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C require.Equal(t, eok, rok) if !eok { - require.Equal(t, 0, len(res.Warnings())) + require.Empty(t, res.Warnings()) break } sexpChks := c.expChks.At() @@ -2068,7 +2068,7 @@ func BenchmarkMergedSeriesSet(b *testing.B) { i++ } require.NoError(b, ms.Err()) - require.Equal(b, len(lbls), i) + require.Len(b, lbls, i) } }) } @@ -2317,7 +2317,7 @@ func (m mockIndex) Postings(ctx context.Context, name string, values ...string) func (m mockIndex) SortedPostings(p index.Postings) index.Postings { ep, err := index.ExpandPostings(p) if err != nil { - return index.ErrPostings(errors.Wrap(err, "expand postings")) + return index.ErrPostings(fmt.Errorf("expand postings: %w", err)) } sort.Slice(ep, func(i, j int) bool { @@ -2503,7 +2503,7 @@ func BenchmarkQuerySeek(b *testing.B) { require.NoError(b, it.Err()) } require.NoError(b, ss.Err()) - require.Equal(b, 0, len(ss.Warnings())) + require.Empty(b, ss.Warnings()) }) } } @@ -2631,7 +2631,7 @@ func BenchmarkSetMatcher(b *testing.B) { for ss.Next() { } require.NoError(b, ss.Err()) - require.Equal(b, 0, len(ss.Warnings())) + require.Empty(b, ss.Warnings()) } }) } @@ -3233,7 +3233,7 @@ func benchQuery(b *testing.B, expExpansions int, q storage.Querier, selectors la actualExpansions++ } require.NoError(b, ss.Err()) - require.Equal(b, 0, len(ss.Warnings())) + require.Empty(b, ss.Warnings()) require.Equal(b, expExpansions, actualExpansions) require.NoError(b, ss.Err()) } @@ -3415,7 +3415,7 @@ func TestBlockBaseSeriesSet(t *testing.T) { i++ } - require.Equal(t, len(tc.expIdxs), i) + require.Len(t, tc.expIdxs, i) require.NoError(t, bcs.Err()) } } @@ -3654,7 +3654,7 @@ func TestQueryWithOneChunkCompletelyDeleted(t *testing.T) { chk := it.At() cit := chk.Chunk.Iterator(nil) for vt := cit.Next(); vt != chunkenc.ValNone; vt = cit.Next() { - require.Equal(t, vt, chunkenc.ValFloatHistogram, "Only float histograms expected, other sample types should have been deleted.") + require.Equal(t, chunkenc.ValFloatHistogram, vt, "Only float histograms expected, other sample types should have been deleted.") sampleCount++ } } diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index 9111350a7..57599ef6d 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -15,7 +15,6 @@ package record import ( - "errors" "math/rand" "testing" @@ -159,7 +158,7 @@ func TestRecord_EncodeDecode(t *testing.T) { floatHistograms[i] = RefFloatHistogramSample{ Ref: h.Ref, T: h.T, - FH: h.H.ToFloat(), + FH: h.H.ToFloat(nil), } } decFloatHistograms, err := dec.FloatHistogramSamples(enc.FloatHistogramSamples(floatHistograms, nil), nil) @@ -209,7 +208,7 @@ func TestRecord_Corrupted(t *testing.T) { corrupted := enc.Samples(samples, nil)[:8] _, err := dec.Samples(corrupted, nil) - require.True(t, errors.Is(err, encoding.ErrInvalidSize)) + require.ErrorIs(t, err, encoding.ErrInvalidSize) }) t.Run("Test corrupted tombstone record", func(t *testing.T) { @@ -232,7 +231,7 @@ func TestRecord_Corrupted(t *testing.T) { corrupted := enc.Exemplars(exemplars, nil)[:8] _, err := dec.Exemplars(corrupted, nil) - require.True(t, errors.Is(err, encoding.ErrInvalidSize)) + require.ErrorIs(t, err, encoding.ErrInvalidSize) }) t.Run("Test corrupted metadata record", func(t *testing.T) { @@ -242,7 +241,7 @@ func TestRecord_Corrupted(t *testing.T) { corrupted := enc.Metadata(meta, nil)[:8] _, err := dec.Metadata(corrupted, nil) - require.True(t, errors.Is(err, encoding.ErrInvalidSize)) + require.ErrorIs(t, err, encoding.ErrInvalidSize) }) t.Run("Test corrupted histogram record", func(t *testing.T) { @@ -267,7 +266,7 @@ func TestRecord_Corrupted(t *testing.T) { corrupted := enc.HistogramSamples(histograms, nil)[:8] _, err := dec.HistogramSamples(corrupted, nil) - require.True(t, errors.Is(err, encoding.ErrInvalidSize)) + require.ErrorIs(t, err, encoding.ErrInvalidSize) }) } diff --git a/tsdb/repair.go b/tsdb/repair.go index 081116454..9d2c5738d 100644 --- a/tsdb/repair.go +++ b/tsdb/repair.go @@ -22,7 +22,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" @@ -35,7 +34,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { // We must actually set the index file version to 2 and revert the meta.json version back to 1. dirs, err := blockDirs(dir) if err != nil { - return errors.Wrapf(err, "list block dirs in %q", dir) + return fmt.Errorf("list block dirs in %q: %w", dir, err) } tmpFiles := make([]string, 0, len(dirs)) @@ -71,44 +70,54 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { repl, err := os.Create(filepath.Join(d, "index.repaired")) if err != nil { - return errors.Wrapf(err, "create index.repaired for block dir: %v", d) + return fmt.Errorf("create index.repaired for block dir: %v: %w", d, err) } tmpFiles = append(tmpFiles, repl.Name()) broken, err := os.Open(filepath.Join(d, indexFilename)) if err != nil { - return errors.Wrapf(err, "open broken index for block dir: %v", d) + return fmt.Errorf("open broken index for block dir: %v: %w", d, err) } if _, err := io.Copy(repl, broken); err != nil { - return errors.Wrapf(err, "copy content of index to index.repaired for block dir: %v", d) + return fmt.Errorf("copy content of index to index.repaired for block dir: %v: %w", d, err) } // Set the 5th byte to 2 to indicate the correct file format version. if _, err := repl.WriteAt([]byte{2}, 4); err != nil { - return tsdb_errors.NewMulti( - errors.Wrapf(err, "rewrite of index.repaired for block dir: %v", d), - errors.Wrap(repl.Close(), "close"), - ).Err() + errs := tsdb_errors.NewMulti( + fmt.Errorf("rewrite of index.repaired for block dir: %v: %w", d, err)) + if err := repl.Close(); err != nil { + errs.Add(fmt.Errorf("close: %w", err)) + } + return errs.Err() } if err := repl.Sync(); err != nil { - return tsdb_errors.NewMulti( - errors.Wrapf(err, "sync of index.repaired for block dir: %v", d), - errors.Wrap(repl.Close(), "close"), - ).Err() + errs := tsdb_errors.NewMulti( + fmt.Errorf("sync of index.repaired for block dir: %v: %w", d, err)) + if err := repl.Close(); err != nil { + errs.Add(fmt.Errorf("close: %w", err)) + } + return errs.Err() } if err := repl.Close(); err != nil { - return errors.Wrapf(repl.Close(), "close repaired index for block dir: %v", d) + return fmt.Errorf("close repaired index for block dir: %v: %w", d, err) } if err := broken.Close(); err != nil { - return errors.Wrapf(repl.Close(), "close broken index for block dir: %v", d) + if err := repl.Close(); err != nil { + return fmt.Errorf("close broken index for block dir: %v: %w", d, err) + } } if err := fileutil.Replace(repl.Name(), broken.Name()); err != nil { - return errors.Wrapf(repl.Close(), "replaced broken index with index.repaired for block dir: %v", d) + if err := repl.Close(); err != nil { + return fmt.Errorf("replaced broken index with index.repaired for block dir: %v: %w", d, err) + } } // Reset version of meta.json to 1. meta.Version = metaVersion1 if _, err := writeMetaFile(logger, d, meta); err != nil { - return errors.Wrapf(repl.Close(), "write meta for block dir: %v", d) + if err := repl.Close(); err != nil { + return fmt.Errorf("write meta for block dir: %v: %w", d, err) + } } } return nil diff --git a/tsdb/wal.go b/tsdb/wal.go index bc7db35bf..1509c9cd9 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -16,6 +16,7 @@ package tsdb import ( "bufio" "encoding/binary" + "errors" "fmt" "hash" "hash/crc32" @@ -28,7 +29,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/storage" @@ -210,7 +210,7 @@ func OpenSegmentWAL(dir string, logger log.Logger, flushInterval time.Duration, for _, fn := range fns[i:] { if err := os.Remove(fn); err != nil { - return w, errors.Wrap(err, "removing segment failed") + return w, fmt.Errorf("removing segment failed: %w", err) } } break @@ -237,8 +237,8 @@ func (r *repairingWALReader) Read( if err == nil { return nil } - cerr, ok := errors.Cause(err).(walCorruptionErr) - if !ok { + var cerr *walCorruptionErr + if !errors.As(err, &cerr) { return err } r.wal.metrics.corruptions.Inc() @@ -309,7 +309,7 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) // Past WAL files are closed. We have to reopen them for another read. f, err := w.openSegmentFile(sf.Name()) if err != nil { - return errors.Wrap(err, "open old WAL segment for read") + return fmt.Errorf("open old WAL segment for read: %w", err) } candidates = append(candidates, &segmentFile{ File: f, @@ -326,7 +326,7 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) // Create a new tmp file. f, err := w.createSegmentFile(filepath.Join(w.dirFile.Name(), "compact.tmp")) if err != nil { - return errors.Wrap(err, "create compaction segment") + return fmt.Errorf("create compaction segment: %w", err) } defer func() { if err := os.RemoveAll(f.Name()); err != nil { @@ -352,7 +352,7 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) err := r.decodeSeries(flag, byt, &decSeries) if err != nil { - return errors.Wrap(err, "decode samples while truncating") + return fmt.Errorf("decode samples while truncating: %w", err) } for _, s := range decSeries { if keep(s.Ref) { @@ -367,11 +367,11 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) w.putBuffer(buf) if err != nil { - return errors.Wrap(err, "write to compaction segment") + return fmt.Errorf("write to compaction segment: %w", err) } } - if r.Err() != nil { - return errors.Wrap(r.Err(), "read candidate WAL files") + if err := r.Err(); err != nil { + return fmt.Errorf("read candidate WAL files: %w", err) } off, err := csf.Seek(0, io.SeekCurrent) @@ -390,12 +390,12 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(chunks.HeadSeriesRef) bool) _ = candidates[0].Close() // need close before remove on platform windows if err := fileutil.Replace(csf.Name(), candidates[0].Name()); err != nil { - return errors.Wrap(err, "rename compaction segment") + return fmt.Errorf("rename compaction segment: %w", err) } for _, f := range candidates[1:] { f.Close() // need close before remove on platform windows if err := os.RemoveAll(f.Name()); err != nil { - return errors.Wrap(err, "delete WAL segment file") + return fmt.Errorf("delete WAL segment file: %w", err) } } if err := w.dirFile.Sync(); err != nil { @@ -435,7 +435,7 @@ func (w *SegmentWAL) LogSeries(series []record.RefSeries) error { w.putBuffer(buf) if err != nil { - return errors.Wrap(err, "log series") + return fmt.Errorf("log series: %w", err) } tf := w.head() @@ -462,7 +462,7 @@ func (w *SegmentWAL) LogSamples(samples []record.RefSample) error { w.putBuffer(buf) if err != nil { - return errors.Wrap(err, "log series") + return fmt.Errorf("log series: %w", err) } tf := w.head() @@ -488,7 +488,7 @@ func (w *SegmentWAL) LogDeletes(stones []tombstones.Stone) error { w.putBuffer(buf) if err != nil { - return errors.Wrap(err, "log series") + return fmt.Errorf("log series: %w", err) } tf := w.head() @@ -523,7 +523,7 @@ func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) { switch n, err := f.Read(metab); { case err != nil: - return nil, errors.Wrapf(err, "validate meta %q", f.Name()) + return nil, fmt.Errorf("validate meta %q: %w", f.Name(), err) case n != 8: return nil, fmt.Errorf("invalid header size %d in %q", n, f.Name()) } @@ -573,16 +573,16 @@ func (w *SegmentWAL) cut() error { w.actorc <- func() error { off, err := hf.Seek(0, io.SeekCurrent) if err != nil { - return errors.Wrapf(err, "finish old segment %s", hf.Name()) + return fmt.Errorf("finish old segment %s: %w", hf.Name(), err) } if err := hf.Truncate(off); err != nil { - return errors.Wrapf(err, "finish old segment %s", hf.Name()) + return fmt.Errorf("finish old segment %s: %w", hf.Name(), err) } if err := hf.Sync(); err != nil { - return errors.Wrapf(err, "finish old segment %s", hf.Name()) + return fmt.Errorf("finish old segment %s: %w", hf.Name(), err) } if err := hf.Close(); err != nil { - return errors.Wrapf(err, "finish old segment %s", hf.Name()) + return fmt.Errorf("finish old segment %s: %w", hf.Name(), err) } return nil } @@ -600,7 +600,10 @@ func (w *SegmentWAL) cut() error { go func() { w.actorc <- func() error { - return errors.Wrap(w.dirFile.Sync(), "sync WAL directory") + if err := w.dirFile.Sync(); err != nil { + return fmt.Errorf("sync WAL directory: %w", err) + } + return nil } }() @@ -635,7 +638,7 @@ func (w *SegmentWAL) Sync() error { head = w.head() }() if err != nil { - return errors.Wrap(err, "flush buffer") + return fmt.Errorf("flush buffer: %w", err) } if head != nil { // But only fsync the head segment after releasing the mutex as it will block on disk I/O. @@ -726,11 +729,13 @@ func (w *SegmentWAL) Close() error { // only the current segment will still be open. if hf := w.head(); hf != nil { if err := hf.Close(); err != nil { - return errors.Wrapf(err, "closing WAL head %s", hf.Name()) + return fmt.Errorf("closing WAL head %s: %w", hf.Name(), err) } } - - return errors.Wrapf(w.dirFile.Close(), "closing WAL dir %s", w.dirFile.Name()) + if err := w.dirFile.Close(); err != nil { + return fmt.Errorf("closing WAL dir %s: %w", w.dirFile.Name(), err) + } + return nil } func (w *SegmentWAL) write(t WALEntryType, flag uint8, buf []byte) error { @@ -921,7 +926,7 @@ func (r *walReader) Read( err = r.decodeSeries(flag, b, &series) if err != nil { - err = errors.Wrap(err, "decode series entry") + err = fmt.Errorf("decode series entry: %w", err) break } datac <- series @@ -940,7 +945,7 @@ func (r *walReader) Read( err = r.decodeSamples(flag, b, &samples) if err != nil { - err = errors.Wrap(err, "decode samples entry") + err = fmt.Errorf("decode samples entry: %w", err) break } datac <- samples @@ -960,7 +965,7 @@ func (r *walReader) Read( err = r.decodeDeletes(flag, b, &deletes) if err != nil { - err = errors.Wrap(err, "decode delete entry") + err = fmt.Errorf("decode delete entry: %w", err) break } datac <- deletes @@ -982,8 +987,8 @@ func (r *walReader) Read( if err != nil { return err } - if r.Err() != nil { - return errors.Wrap(r.Err(), "read entry") + if err := r.Err(); err != nil { + return fmt.Errorf("read entry: %w", err) } return nil } @@ -1046,12 +1051,16 @@ type walCorruptionErr struct { lastOffset int64 } -func (e walCorruptionErr) Error() string { +func (e *walCorruptionErr) Error() string { return fmt.Sprintf("%s ", e.err, e.file, e.lastOffset) } +func (e *walCorruptionErr) Unwrap() error { + return e.err +} + func (r *walReader) corruptionErr(s string, args ...interface{}) error { - return walCorruptionErr{ + return &walCorruptionErr{ err: fmt.Errorf(s, args...), file: r.cur, lastOffset: r.lastOffset, @@ -1152,8 +1161,8 @@ func (r *walReader) decodeSamples(flag byte, b []byte, res *[]record.RefSample) }) } - if dec.Err() != nil { - return errors.Wrapf(dec.Err(), "decode error after %d samples", len(*res)) + if err := dec.Err(); err != nil { + return fmt.Errorf("decode error after %d samples: %w", len(*res), err) } if len(dec.B) > 0 { return fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) @@ -1185,7 +1194,7 @@ func deprecatedWALExists(logger log.Logger, dir string) (bool, error) { // Detect whether we still have the old WAL. fns, err := sequenceFiles(dir) if err != nil && !os.IsNotExist(err) { - return false, errors.Wrap(err, "list sequence files") + return false, fmt.Errorf("list sequence files: %w", err) } if len(fns) == 0 { return false, nil // No WAL at all yet. @@ -1194,13 +1203,13 @@ func deprecatedWALExists(logger log.Logger, dir string) (bool, error) { // old WAL. f, err := os.Open(fns[0]) if err != nil { - return false, errors.Wrap(err, "check first existing segment") + return false, fmt.Errorf("check first existing segment: %w", err) } defer f.Close() var hdr [4]byte - if _, err := f.Read(hdr[:]); err != nil && err != io.EOF { - return false, errors.Wrap(err, "read header from first segment") + if _, err := f.Read(hdr[:]); err != nil && !errors.Is(err, io.EOF) { + return false, fmt.Errorf("read header from first segment: %w", err) } // If we cannot read the magic header for segments of the old WAL, abort. // Either it's migrated already or there's a corruption issue with which @@ -1223,11 +1232,11 @@ func MigrateWAL(logger log.Logger, dir string) (err error) { tmpdir := dir + ".tmp" if err := os.RemoveAll(tmpdir); err != nil { - return errors.Wrap(err, "cleanup replacement dir") + return fmt.Errorf("cleanup replacement dir: %w", err) } repl, err := wlog.New(logger, nil, tmpdir, wlog.CompressionNone) if err != nil { - return errors.Wrap(err, "open new WAL") + return fmt.Errorf("open new WAL: %w", err) } // It should've already been closed as part of the previous finalization. @@ -1240,7 +1249,7 @@ func MigrateWAL(logger log.Logger, dir string) (err error) { w, err := OpenSegmentWAL(dir, logger, time.Minute, nil) if err != nil { - return errors.Wrap(err, "open old WAL") + return fmt.Errorf("open old WAL: %w", err) } defer w.Close() @@ -1271,22 +1280,22 @@ func MigrateWAL(logger log.Logger, dir string) (err error) { }, ) if decErr != nil { - return errors.Wrap(err, "decode old entries") + return fmt.Errorf("decode old entries: %w", err) } if err != nil { - return errors.Wrap(err, "write new entries") + return fmt.Errorf("write new entries: %w", err) } // We explicitly close even when there is a defer for Windows to be // able to delete it. The defer is in place to close it in-case there // are errors above. if err := w.Close(); err != nil { - return errors.Wrap(err, "close old WAL") + return fmt.Errorf("close old WAL: %w", err) } if err := repl.Close(); err != nil { - return errors.Wrap(err, "close new WAL") + return fmt.Errorf("close new WAL: %w", err) } if err := fileutil.Replace(tmpdir, dir); err != nil { - return errors.Wrap(err, "replace old WAL") + return fmt.Errorf("replace old WAL: %w", err) } return nil } diff --git a/tsdb/wal_test.go b/tsdb/wal_test.go index 5b2911131..964763d7f 100644 --- a/tsdb/wal_test.go +++ b/tsdb/wal_test.go @@ -49,7 +49,7 @@ func TestSegmentWAL_cut(t *testing.T) { require.NoError(t, w.cut()) // Cutting creates a new file. - require.Equal(t, 2, len(w.files)) + require.Len(t, w.files, 2) require.NoError(t, w.write(WALEntrySeries, 1, []byte("Hello World!!"))) @@ -409,7 +409,7 @@ func TestWALRestoreCorrupted(t *testing.T) { r := w2.Reader() serf := func(l []record.RefSeries) { - require.Equal(t, 0, len(l)) + require.Empty(t, l) } // Weird hack to check order of reads. diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go index 381e09186..bc5b4f20f 100644 --- a/tsdb/wlog/checkpoint_test.go +++ b/tsdb/wlog/checkpoint_test.go @@ -229,7 +229,7 @@ func TestCheckpoint(t *testing.T) { // Only the new checkpoint should be left. files, err := os.ReadDir(dir) require.NoError(t, err) - require.Equal(t, 1, len(files)) + require.Len(t, files, 1) require.Equal(t, "checkpoint.00000106", files[0].Name()) sr, err := NewSegmentsReader(filepath.Join(dir, "checkpoint.00000106")) diff --git a/tsdb/wlog/reader_test.go b/tsdb/wlog/reader_test.go index 9d2ec50d2..0f510e0c1 100644 --- a/tsdb/wlog/reader_test.go +++ b/tsdb/wlog/reader_test.go @@ -344,7 +344,7 @@ func TestReaderFuzz(t *testing.T) { r := reader.Record() // Expected value may come as nil or empty slice, so it requires special comparison. if len(expected) == 0 { - require.Len(t, r, 0) + require.Empty(t, r) } else { require.Equal(t, expected, r, "read wrong record") } @@ -395,7 +395,7 @@ func TestReaderFuzz_Live(t *testing.T) { require.True(t, ok, "unexpected record") // Expected value may come as nil or empty slice, so it requires special comparison. if len(expected) == 0 { - require.Len(t, rec, 0) + require.Empty(t, rec) } else { require.Equal(t, expected, rec, "record does not match expected") } diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index fc665b57d..b30dce91a 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -199,7 +199,7 @@ func TestTailSamples(t *testing.T) { floatHistogram := enc.FloatHistogramSamples([]record.RefFloatHistogramSample{{ Ref: chunks.HeadSeriesRef(inner), T: now.UnixNano() + 1, - FH: hist.ToFloat(), + FH: hist.ToFloat(nil), }}, nil) require.NoError(t, w.Log(floatHistogram)) } diff --git a/tsdb/wlog/wlog.go b/tsdb/wlog/wlog.go index c3ae001d9..fdea75694 100644 --- a/tsdb/wlog/wlog.go +++ b/tsdb/wlog/wlog.go @@ -116,6 +116,10 @@ func (e *CorruptionErr) Error() string { return fmt.Sprintf("corruption in segment %s at %d: %s", SegmentName(e.Dir, e.Segment), e.Offset, e.Err) } +func (e *CorruptionErr) Unwrap() error { + return e.Err +} + // OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends. func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { segName := SegmentName(dir, k) diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go index eb7fb8a54..8f4533e0e 100644 --- a/tsdb/wlog/wlog_test.go +++ b/tsdb/wlog/wlog_test.go @@ -189,7 +189,7 @@ func TestWALRepair_ReadingError(t *testing.T) { result = append(result, append(b, r.Record()...)) } require.NoError(t, r.Err()) - require.Equal(t, test.intactRecs, len(result), "Wrong number of intact records") + require.Len(t, result, test.intactRecs, "Wrong number of intact records") for i, r := range result { if !bytes.Equal(records[i], r) { @@ -283,7 +283,7 @@ func TestCorruptAndCarryOn(t *testing.T) { reader := NewReader(sr) i := 0 for ; i < 4 && reader.Next(); i++ { - require.Equal(t, recordSize, len(reader.Record())) + require.Len(t, reader.Record(), recordSize) } require.Equal(t, 4, i, "not enough records") require.False(t, reader.Next(), "unexpected record") @@ -301,8 +301,8 @@ func TestCorruptAndCarryOn(t *testing.T) { require.NoError(t, err) // Ensure that we have a completely clean slate after repairing. - require.Equal(t, w.segment.Index(), 1) // We corrupted segment 0. - require.Equal(t, w.donePages, 0) + require.Equal(t, 1, w.segment.Index()) // We corrupted segment 0. + require.Equal(t, 0, w.donePages) for i := 0; i < 5; i++ { buf := make([]byte, recordSize) @@ -325,11 +325,11 @@ func TestCorruptAndCarryOn(t *testing.T) { reader := NewReader(sr) i := 0 for ; i < 9 && reader.Next(); i++ { - require.Equal(t, recordSize, len(reader.Record())) + require.Len(t, reader.Record(), recordSize) } require.Equal(t, 9, i, "wrong number of records") require.False(t, reader.Next(), "unexpected record") - require.Equal(t, nil, reader.Err()) + require.NoError(t, reader.Err()) sr.Close() } } @@ -456,7 +456,7 @@ func TestLogPartialWrite(t *testing.T) { for i := 1; i <= testData.numRecords; i++ { if err := w.Log(record); i == testData.faultyRecord { - require.Error(t, io.ErrShortWrite, err) + require.ErrorIs(t, io.ErrShortWrite, err) } else { require.NoError(t, err) } diff --git a/util/fmtutil/format_test.go b/util/fmtutil/format_test.go index 0f052f5e7..c592630fe 100644 --- a/util/fmtutil/format_test.go +++ b/util/fmtutil/format_test.go @@ -204,7 +204,7 @@ func TestParseAndPushMetricsTextAndFormat(t *testing.T) { expected, err := MetricTextToWriteRequest(input, labels) require.NoError(t, err) - require.Equal(t, writeRequestFixture, expected) + require.Equal(t, expected, writeRequestFixture) } func TestMetricTextToWriteRequestErrorParsingFloatValue(t *testing.T) { @@ -217,7 +217,7 @@ func TestMetricTextToWriteRequestErrorParsingFloatValue(t *testing.T) { labels := map[string]string{"job": "promtool"} _, err := MetricTextToWriteRequest(input, labels) - require.Equal(t, err.Error(), "text format parsing error in line 4: expected float as value, got \"1027Error\"") + require.Equal(t, "text format parsing error in line 4: expected float as value, got \"1027Error\"", err.Error()) } func TestMetricTextToWriteRequestErrorParsingMetricType(t *testing.T) { @@ -229,5 +229,5 @@ func TestMetricTextToWriteRequestErrorParsingMetricType(t *testing.T) { labels := map[string]string{"job": "promtool"} _, err := MetricTextToWriteRequest(input, labels) - require.Equal(t, err.Error(), "text format parsing error in line 3: unknown metric type \"info\"") + require.Equal(t, "text format parsing error in line 3: unknown metric type \"info\"", err.Error()) } diff --git a/util/runutil/runutil.go b/util/runutil/runutil.go new file mode 100644 index 000000000..5a77c332b --- /dev/null +++ b/util/runutil/runutil.go @@ -0,0 +1,37 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copied from https://github.com/efficientgo/core/blob/a21078e2c723b69e05f95c65dbc5058712b4edd8/runutil/runutil.go#L39 +// and adjusted. + +package runutil + +import "time" + +// Retry executes f every interval seconds until timeout or no error is returned from f. +func Retry(interval time.Duration, stopc <-chan struct{}, f func() error) error { + tick := time.NewTicker(interval) + defer tick.Stop() + + var err error + for { + if err = f(); err == nil { + return nil + } + select { + case <-stopc: + return err + case <-tick.C: + } + } +} diff --git a/util/zeropool/pool_test.go b/util/zeropool/pool_test.go index 638a03588..fea820022 100644 --- a/util/zeropool/pool_test.go +++ b/util/zeropool/pool_test.go @@ -28,19 +28,19 @@ func TestPool(t *testing.T) { t.Run("provides correct values", func(t *testing.T) { pool := zeropool.New(func() []byte { return make([]byte, 1024) }) item1 := pool.Get() - require.Equal(t, 1024, len(item1)) + require.Len(t, item1, 1024) item2 := pool.Get() - require.Equal(t, 1024, len(item2)) + require.Len(t, item2, 1024) pool.Put(item1) pool.Put(item2) item1 = pool.Get() - require.Equal(t, 1024, len(item1)) + require.Len(t, item1, 1024) item2 = pool.Get() - require.Equal(t, 1024, len(item2)) + require.Len(t, item2, 1024) }) t.Run("is not racy", func(t *testing.T) { diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index a5dd8640b..d4da05e46 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -872,7 +872,7 @@ func TestStats(t *testing.T) { name: "stats is blank", param: "", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &QueryData{}) + require.IsType(t, &QueryData{}, i) qd := i.(*QueryData) require.Nil(t, qd.Stats) }, @@ -881,7 +881,7 @@ func TestStats(t *testing.T) { name: "stats is true", param: "true", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &QueryData{}) + require.IsType(t, &QueryData{}, i) qd := i.(*QueryData) require.NotNil(t, qd.Stats) qs := qd.Stats.Builtin() @@ -896,7 +896,7 @@ func TestStats(t *testing.T) { name: "stats is all", param: "all", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &QueryData{}) + require.IsType(t, &QueryData{}, i) qd := i.(*QueryData) require.NotNil(t, qd.Stats) qs := qd.Stats.Builtin() @@ -917,12 +917,12 @@ func TestStats(t *testing.T) { }, param: "known", expected: func(t *testing.T, i interface{}) { - require.IsType(t, i, &QueryData{}) + require.IsType(t, &QueryData{}, i) qd := i.(*QueryData) require.NotNil(t, qd.Stats) j, err := json.Marshal(qd.Stats) require.NoError(t, err) - require.JSONEq(t, string(j), `{"custom":"Custom Value"}`) + require.JSONEq(t, `{"custom":"Custom Value"}`, string(j)) }, }, } { diff --git a/web/federate.go b/web/federate.go index 2b79d0053..2e7bac21d 100644 --- a/web/federate.go +++ b/web/federate.go @@ -138,7 +138,7 @@ Loop: case chunkenc.ValFloat: f = sample.F() case chunkenc.ValHistogram: - fh = sample.H().ToFloat() + fh = sample.H().ToFloat(nil) case chunkenc.ValFloatHistogram: fh = sample.FH() default: diff --git a/web/federate_test.go b/web/federate_test.go index 80539861d..94783a739 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -354,7 +354,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { _, err = app.AppendHistogram(0, l, 100*60*1000, histWithoutZeroBucket.Copy(), nil) expVec = append(expVec, promql.Sample{ T: 100 * 60 * 1000, - H: histWithoutZeroBucket.ToFloat(), + H: histWithoutZeroBucket.ToFloat(nil), Metric: expL, }) default: @@ -363,7 +363,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { _, err = app.AppendHistogram(0, l, 100*60*1000, hist.Copy(), nil) expVec = append(expVec, promql.Sample{ T: 100 * 60 * 1000, - H: hist.ToFloat(), + H: hist.ToFloat(nil), Metric: expL, }) } diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 4260f3d26..20777a38a 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -33,16 +33,16 @@ "lru-cache": "^7.18.3" }, "devDependencies": { - "@codemirror/autocomplete": "^6.7.1", - "@codemirror/language": "^6.7.0", - "@codemirror/lint": "^6.2.2", - "@codemirror/state": "^6.2.1", - "@codemirror/view": "^6.13.0", - "@lezer/common": "^1.0.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6", + "@codemirror/autocomplete": "^6.11.1", + "@codemirror/language": "^6.9.3", + "@codemirror/lint": "^6.4.2", + "@codemirror/state": "^6.3.3", + "@codemirror/view": "^6.22.1", + "@lezer/common": "^1.1.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14", "isomorphic-fetch": "^3.0.0", - "nock": "^13.3.1" + "nock": "^13.4.0" }, "peerDependencies": { "@codemirror/autocomplete": "^6.4.0", diff --git a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts index 77a87c8cc..963fc95f2 100644 --- a/web/ui/module/codemirror-promql/src/complete/promql.terms.ts +++ b/web/ui/module/codemirror-promql/src/complete/promql.terms.ts @@ -317,6 +317,12 @@ export const functionIdentifierTerms = [ info: 'Calculate base-2 logarithm of input series', type: 'function', }, + { + label: 'mad_over_time', + detail: 'function', + info: 'Return the median absolute deviation over time for input series', + type: 'function', + }, { label: 'max_over_time', detail: 'function', diff --git a/web/ui/module/codemirror-promql/src/parser/parser.test.ts b/web/ui/module/codemirror-promql/src/parser/parser.test.ts index 5ef9c1f90..78195a5c6 100644 --- a/web/ui/module/codemirror-promql/src/parser/parser.test.ts +++ b/web/ui/module/codemirror-promql/src/parser/parser.test.ts @@ -95,6 +95,11 @@ describe('promql operations', () => { expectedValueType: ValueType.vector, expectedDiag: [] as Diagnostic[], }, + { + expr: 'mad_over_time(rate(metric_name[5m])[1h:] offset 1m)', + expectedValueType: ValueType.vector, + expectedDiag: [] as Diagnostic[], + }, { expr: 'max_over_time(rate(metric_name[5m])[1h:] offset 1m)', expectedValueType: ValueType.vector, diff --git a/web/ui/module/codemirror-promql/src/types/function.ts b/web/ui/module/codemirror-promql/src/types/function.ts index cceeef90b..369478158 100644 --- a/web/ui/module/codemirror-promql/src/types/function.ts +++ b/web/ui/module/codemirror-promql/src/types/function.ts @@ -56,6 +56,7 @@ import { Ln, Log10, Log2, + MadOverTime, MaxOverTime, MinOverTime, Minute, @@ -370,6 +371,12 @@ const promqlFunctions: { [key: number]: PromQLFunction } = { variadic: 0, returnType: ValueType.vector, }, + [MadOverTime]: { + name: 'mad_over_time', + argTypes: [ValueType.matrix], + variadic: 0, + returnType: ValueType.vector, + }, [MaxOverTime]: { name: 'max_over_time', argTypes: [ValueType.matrix], diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index d6783b0f8..0792682b1 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -30,9 +30,9 @@ "test": "NODE_OPTIONS=--experimental-vm-modules jest" }, "devDependencies": { - "@lezer/generator": "^1.2.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6" + "@lezer/generator": "^1.5.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14" }, "peerDependencies": { "@lezer/highlight": "^1.1.2", diff --git a/web/ui/module/lezer-promql/src/promql.grammar b/web/ui/module/lezer-promql/src/promql.grammar index 5280ea800..ab627c829 100644 --- a/web/ui/module/lezer-promql/src/promql.grammar +++ b/web/ui/module/lezer-promql/src/promql.grammar @@ -149,6 +149,7 @@ FunctionIdentifier { Ln | Log10 | Log2 | + MadOverTime | MaxOverTime | MinOverTime | Minute | @@ -380,6 +381,7 @@ NumberLiteral { Ln { condFn<"ln"> } Log10 { condFn<"log10"> } Log2 { condFn<"log2"> } + MadOverTime { condFn<"mad_over_time"> } MaxOverTime { condFn<"max_over_time"> } MinOverTime { condFn<"min_over_time"> } Minute { condFn<"minute"> } diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index ce70ee990..51dd60749 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -12,16 +12,16 @@ "module/*" ], "devDependencies": { - "@types/jest": "^29.5.2", - "@types/node": "^17.0.45", - "eslint-config-prettier": "^8.8.0", + "@types/jest": "^29.5.11", + "@types/node": "^20.10.4", + "eslint-config-prettier": "^8.10.0", "eslint-config-react-app": "^7.0.1", "eslint-plugin-prettier": "^4.2.1", - "jest-canvas-mock": "^2.5.1", + "jest-canvas-mock": "^2.5.2", "jest-fetch-mock": "^3.0.3", "prettier": "^2.8.8", "react-scripts": "^5.0.1", - "ts-jest": "^29.1.0", + "ts-jest": "^29.1.1", "typescript": "^4.9.5" }, "engines": { @@ -37,16 +37,16 @@ "lru-cache": "^7.18.3" }, "devDependencies": { - "@codemirror/autocomplete": "^6.7.1", - "@codemirror/language": "^6.7.0", - "@codemirror/lint": "^6.2.2", - "@codemirror/state": "^6.2.1", - "@codemirror/view": "^6.13.0", - "@lezer/common": "^1.0.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6", + "@codemirror/autocomplete": "^6.11.1", + "@codemirror/language": "^6.9.3", + "@codemirror/lint": "^6.4.2", + "@codemirror/state": "^6.3.3", + "@codemirror/view": "^6.22.1", + "@lezer/common": "^1.1.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14", "isomorphic-fetch": "^3.0.0", - "nock": "^13.3.1" + "nock": "^13.4.0" }, "engines": { "node": ">=12.0.0" @@ -73,9 +73,9 @@ "version": "0.48.0", "license": "Apache-2.0", "devDependencies": { - "@lezer/generator": "^1.2.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6" + "@lezer/generator": "^1.5.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14" }, "peerDependencies": { "@lezer/highlight": "^1.1.2", @@ -96,17 +96,89 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz", - "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "dev": true, "dependencies": { - "@babel/highlight": "^7.18.6" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" }, "engines": { "node": ">=6.9.0" } }, + "node_modules/@babel/code-frame/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/code-frame/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/code-frame/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/@babel/compat-data": { "version": "7.19.3", "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.19.3.tgz", @@ -174,13 +246,14 @@ } }, "node_modules/@babel/generator": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.19.3.tgz", - "integrity": "sha512-fqVZnmp1ncvZU757UzDheKZpfPgatqY59XtW2/j/18H7u76akb8xqvjw82f+i2UKd/ksYsSick/BCLQUUtJ/qQ==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.3.tgz", + "integrity": "sha512-keeZWAV4LU3tW0qRi19HRpabC/ilM0HRBBzf9/k8FFiG4KVpiv0FIy4hHfLfFQZNhziCTPTmd59zoyv6DNISzg==", "dev": true, "dependencies": { - "@babel/types": "^7.19.3", + "@babel/types": "^7.23.3", "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" }, "engines": { @@ -299,9 +372,9 @@ } }, "node_modules/@babel/helper-environment-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", - "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", "dev": true, "engines": { "node": ">=6.9.0" @@ -320,25 +393,25 @@ } }, "node_modules/@babel/helper-function-name": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz", - "integrity": "sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", "dev": true, "dependencies": { - "@babel/template": "^7.18.10", - "@babel/types": "^7.19.0" + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-hoist-variables": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", - "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", "dev": true, "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" @@ -467,30 +540,30 @@ } }, "node_modules/@babel/helper-split-export-declaration": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", - "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "dev": true, "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.18.10.tgz", - "integrity": "sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.19.1", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", - "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", "dev": true, "engines": { "node": ">=6.9.0" @@ -535,13 +608,13 @@ } }, "node_modules/@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", "dev": true, "dependencies": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, "engines": { @@ -620,9 +693,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.19.3.tgz", - "integrity": "sha512-pJ9xOlNWHiy9+FuFP09DEAFbAn4JskgRsVcc169w2xRBC3FRGuQEwjeIMMND9L2zc0iEhO/tGv4Zq+km+hxNpQ==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.3.tgz", + "integrity": "sha512-uVsWNvlVsIninV2prNz/3lHCb+5CJ+e+IUBfbjToAHODtfGYLfCFuY4AU7TskI+dAKk+njsPiBjq1gKTvZOBaw==", "dev": true, "bin": { "parser": "bin/babel-parser.js" @@ -2025,33 +2098,33 @@ } }, "node_modules/@babel/template": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz", - "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.18.10", - "@babel/types": "^7.18.10" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.19.3.tgz", - "integrity": "sha512-qh5yf6149zhq2sgIXmwjnsvmnNQC2iw70UFjp4olxucKrWd/dvlUsBI88VSLUsnMNF7/vnOiA+nk1+yLoCqROQ==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.3.tgz", + "integrity": "sha512-+K0yF1/9yR0oHdE0StHuEj3uTPzwwbrLGfNOndVJVV2TqA5+j3oljJUb4nmB954FLGjNem976+B+eDuLIjesiQ==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.19.3", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.19.3", - "@babel/types": "^7.19.3", + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.3", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.3", + "@babel/types": "^7.23.3", "debug": "^4.1.0", "globals": "^11.1.0" }, @@ -2069,13 +2142,13 @@ } }, "node_modules/@babel/types": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.19.3.tgz", - "integrity": "sha512-hGCaQzIY22DJlDh9CH7NOxgKkFjBk0Cw9xDO1Xmh2151ti7wiGfQ3LauXzL4HP1fmFlTX6XjpRETTpUcv7wQLw==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.3.tgz", + "integrity": "sha512-OZnvoH2l8PK5eUvEcUyCt/sXgr/h+UWpVuBbOljwcrAgUl6lpchoQ++PHGyQy1AtYnVA6CEq3y5xeEI10brpXw==", "dev": true, "dependencies": { - "@babel/helper-string-parser": "^7.18.10", - "@babel/helper-validator-identifier": "^7.19.1", + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" }, "engines": { @@ -2089,13 +2162,13 @@ "dev": true }, "node_modules/@codemirror/autocomplete": { - "version": "6.7.1", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.7.1.tgz", - "integrity": "sha512-hSxf9S0uB+GV+gBsjY1FZNo53e1FFdzPceRfCfD1gWOnV6o21GfB5J5Wg9G/4h76XZMPrF0A6OCK/Rz5+V1egg==", + "version": "6.11.1", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.11.1.tgz", + "integrity": "sha512-L5UInv8Ffd6BPw0P3EF7JLYAMeEbclY7+6Q11REt8vhih8RuLreKtPy/xk8wPxs4EQgYqzI7cdgpiYwWlbS/ow==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.6.0", + "@codemirror/view": "^6.17.0", "@lezer/common": "^1.0.0" }, "peerDependencies": { @@ -2106,33 +2179,33 @@ } }, "node_modules/@codemirror/commands": { - "version": "6.2.4", - "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.2.4.tgz", - "integrity": "sha512-42lmDqVH0ttfilLShReLXsDfASKLXzfyC36bzwcqzox9PlHulMcsUOfHXNo2X2aFMVNUoQ7j+d4q5bnfseYoOA==", + "version": "6.3.2", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.3.2.tgz", + "integrity": "sha512-tjoi4MCWDNxgIpoLZ7+tezdS9OEB6pkiDKhfKx9ReJ/XBcs2G2RXIu+/FxXBlWsPTsz6C9q/r4gjzrsxpcnqCQ==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.2.0", "@codemirror/view": "^6.0.0", - "@lezer/common": "^1.0.0" + "@lezer/common": "^1.1.0" } }, "node_modules/@codemirror/language": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.7.0.tgz", - "integrity": "sha512-4SMwe6Fwn57klCUsVN0y4/h/iWT+XIXFEmop2lIHHuWO0ubjCrF3suqSZLyOQlznxkNnNbOOfKe5HQbQGCAmTg==", + "version": "6.9.3", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.9.3.tgz", + "integrity": "sha512-qq48pYzoi6ldYWV/52+Z9Ou6QouVI+8YwvxFbUypI33NbjG2UeRHKENRyhwljTTiOqjQ33FjyZj6EREQ9apAOQ==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", - "@lezer/common": "^1.0.0", + "@lezer/common": "^1.1.0", "@lezer/highlight": "^1.0.0", "@lezer/lr": "^1.0.0", "style-mod": "^4.0.0" } }, "node_modules/@codemirror/lint": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.2.2.tgz", - "integrity": "sha512-kHGuynBHjqinp1Bx25D2hgH8a6Fh1m9rSmZFzBVTqPIXDIcZ6j3VI67DY8USGYpGrjrJys9R52eLxtfERGNozg==", + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.4.2.tgz", + "integrity": "sha512-wzRkluWb1ptPKdzlsrbwwjYCPLgzU6N88YBAmlZi8WFyuiEduSd05MnJYNogzyc8rPK7pj6m95ptUApc8sHKVA==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -2140,9 +2213,9 @@ } }, "node_modules/@codemirror/search": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.0.tgz", - "integrity": "sha512-64/M40YeJPToKvGO6p3fijo2vwUEj4nACEAXElCaYQ50HrXSvRaK+NHEhSh73WFBGdvIdhrV+lL9PdJy2RfCYA==", + "version": "6.5.5", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.5.tgz", + "integrity": "sha512-PIEN3Ke1buPod2EHbJsoQwlbpkz30qGZKcnmH1eihq9+bPQx8gelauUwLYaY4vBOuBAuEhmpDLii4rj/uO0yMA==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -2150,17 +2223,17 @@ } }, "node_modules/@codemirror/state": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.1.tgz", - "integrity": "sha512-RupHSZ8+OjNT38zU9fKH2sv+Dnlr8Eb8sl4NOnnqz95mCFTZUaiRP8Xv5MeeaG0px2b8Bnfe7YGwCV3nsBhbuw==" + "version": "6.3.3", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.3.3.tgz", + "integrity": "sha512-0wufKcTw2dEwEaADajjHf6hBy1sh3M6V0e+q4JKIhLuiMSe5td5HOWpUdvKth1fT1M9VYOboajoBHpkCd7PG7A==" }, "node_modules/@codemirror/view": { - "version": "6.13.0", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.13.0.tgz", - "integrity": "sha512-oXTfJzHJ5Tl7f6T8ZO0HKf981zubxgKohjddLobbntbNZHlOZGMRL+pPZGtclDWFaFJWtGBYRGyNdjQ6Xsx5yA==", + "version": "6.22.1", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.22.1.tgz", + "integrity": "sha512-38BRn1nPqZqiHbmWfI8zri23IbRVbmSpSmh1E/Ysvc+lIGGdBC17K8zlK7ZU6fhfy9x4De9Zyj5JQqScPq5DkA==", "dependencies": { "@codemirror/state": "^6.1.4", - "style-mod": "^4.0.0", + "style-mod": "^4.1.0", "w3c-keyname": "^2.2.4" } }, @@ -2485,33 +2558,33 @@ } }, "node_modules/@fortawesome/fontawesome-common-types": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.4.0.tgz", - "integrity": "sha512-HNii132xfomg5QVZw0HwXXpN22s7VBHQBv9CeOu9tfJnhsWQNd2lmTNi8CSrnw5B+5YOmzu1UoPAyxaXsJ6RgQ==", + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.5.1.tgz", + "integrity": "sha512-GkWzv+L6d2bI5f/Vk6ikJ9xtl7dfXtoRu3YGE6nq0p/FFqA1ebMOAWg3XgRyb0I6LYyYkiAo+3/KrwuBp8xG7A==", "hasInstallScript": true, "engines": { "node": ">=6" } }, "node_modules/@fortawesome/fontawesome-svg-core": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.4.0.tgz", - "integrity": "sha512-Bertv8xOiVELz5raB2FlXDPKt+m94MQ3JgDfsVbrqNpLU9+UE2E18GKjLKw+d3XbeYPqg1pzyQKGsrzbw+pPaw==", + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.5.1.tgz", + "integrity": "sha512-MfRCYlQPXoLlpem+egxjfkEuP9UQswTrlCOsknus/NcMoblTH2g0jPrapbcIb04KGA7E2GZxbAccGZfWoYgsrQ==", "hasInstallScript": true, "dependencies": { - "@fortawesome/fontawesome-common-types": "6.4.0" + "@fortawesome/fontawesome-common-types": "6.5.1" }, "engines": { "node": ">=6" } }, "node_modules/@fortawesome/free-solid-svg-icons": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.4.0.tgz", - "integrity": "sha512-kutPeRGWm8V5dltFP1zGjQOEAzaLZj4StdQhWVZnfGFCvAPVvHh8qk5bRrU4KXnRRRNni5tKQI9PBAdI6MP8nQ==", + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.5.1.tgz", + "integrity": "sha512-S1PPfU3mIJa59biTtXJz1oI0+KAXW6bkAb31XKhxdxtuXDiUIFsih4JR1v5BbxY7hVHsD1RKq+jRkVRaf773NQ==", "hasInstallScript": true, "dependencies": { - "@fortawesome/fontawesome-common-types": "6.4.0" + "@fortawesome/fontawesome-common-types": "6.5.1" }, "engines": { "node": ">=6" @@ -3510,35 +3583,35 @@ "dev": true }, "node_modules/@lezer/common": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.3.tgz", - "integrity": "sha512-JH4wAXCgUOcCGNekQPLhVeUtIqjH0yPBs7vvUdSjyQama9618IOKFJwkv2kcqdhF0my8hQEgCTEJU0GIgnahvA==" + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.1.1.tgz", + "integrity": "sha512-aAPB9YbvZHqAW+bIwiuuTDGB4DG0sYNRObGLxud8cW7osw1ZQxfDuTZ8KQiqfZ0QJGcR34CvpTMDXEyo/+Htgg==" }, "node_modules/@lezer/generator": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.2.3.tgz", - "integrity": "sha512-xRmNryYbJpWs7novjWtQLCGHOj71B4X1QHQ4SgJqwm11tl6COEVAGhuFTXKX16JMJUhumdXaX8We6hEMd4clDg==", + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.5.1.tgz", + "integrity": "sha512-vodJv2JPwsFsiBBHE463yBhvUI9TmhIu5duF/8MH304xNS6FyWH/vTyG61pjhERm5f+VBP94co0eiN+afWcvXw==", "dev": true, "dependencies": { "@lezer/common": "^1.0.2", "@lezer/lr": "^1.3.0" }, "bin": { - "lezer-generator": "dist/lezer-generator.cjs" + "lezer-generator": "src/lezer-generator.cjs" } }, "node_modules/@lezer/highlight": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.6.tgz", - "integrity": "sha512-cmSJYa2us+r3SePpRCjN5ymCqCPv+zyXmDl0ciWtVaNiORT/MxM7ZgOMQZADD0o51qOaOg24qc/zBViOIwAjJg==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.0.tgz", + "integrity": "sha512-WrS5Mw51sGrpqjlh3d4/fOwpEV2Hd3YOkp9DBt4k8XZQcoTHZFB7sx030A6OcahF4J1nDQAa3jXlTVVYH50IFA==", "dependencies": { "@lezer/common": "^1.0.0" } }, "node_modules/@lezer/lr": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.6.tgz", - "integrity": "sha512-IDhcWjfxwWACnatUi0GzWBCbochfqxo3LZZlS27LbJh8RVYYXXyR5Ck9659IhkWkhSW/kZlaaiJpUO+YZTUK+Q==", + "version": "1.3.14", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.14.tgz", + "integrity": "sha512-z5mY4LStlA3yL7aHT/rqgG614cfcvklS+8oFRFBYrs4YaWLJyKKM4+nN6KopToX0o9Hj6zmH6M5kinOYuy06ug==", "dependencies": { "@lezer/common": "^1.0.0" } @@ -4209,9 +4282,9 @@ } }, "node_modules/@types/enzyme": { - "version": "3.10.13", - "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.13.tgz", - "integrity": "sha512-FCtoUhmFsud0Yx9fmZk179GkdZ4U9B0GFte64/Md+W/agx0L5SxsIIbhLBOxIb9y2UfBA4WQnaG1Od/UsUQs9Q==", + "version": "3.10.18", + "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.18.tgz", + "integrity": "sha512-RaO/TyyHZvXkpzinbMTZmd/S5biU4zxkvDsn22ujC29t9FMSzq8tnn8f2MxQ2P8GVhFRG5jTAL05DXKyTtpEQQ==", "dev": true, "dependencies": { "@types/cheerio": "*", @@ -4279,9 +4352,9 @@ } }, "node_modules/@types/flot": { - "version": "0.0.32", - "resolved": "https://registry.npmjs.org/@types/flot/-/flot-0.0.32.tgz", - "integrity": "sha512-aturel4TWMY86N4Pkpc9pSoUd/p8c3BjGj4fTDkaZIpkRPzLH1VXZCAKGUywcFkTqgZMhPJFPWxd4pl87y8h/w==", + "version": "0.0.36", + "resolved": "https://registry.npmjs.org/@types/flot/-/flot-0.0.36.tgz", + "integrity": "sha512-xRo4MUIMnRPGXJCuQXAWvo+uKRmziRGHAy9LQHsLgbKanknpe5z3EThqVuYkVCC6ZWPZ/8pllBXnzQmGzFkJ/Q==", "dev": true, "dependencies": { "@types/jquery": "*" @@ -4341,9 +4414,9 @@ } }, "node_modules/@types/jest": { - "version": "29.5.2", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.2.tgz", - "integrity": "sha512-mSoZVJF5YzGVCk+FsDxzDuH7s+SCkzrgKZzf0Z0T2WudhBUPoF6ktoTPC4R0ZoCPCV5xUvuU6ias5NvxcBcMMg==", + "version": "29.5.11", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.11.tgz", + "integrity": "sha512-S2mHmYIVe13vrm6q4kN6fLYYAka15ALQki/vgDC3mIukEOx8WJlv0kQPM+d4w8Gp6u0uSdKND04IlTXBv0rwnQ==", "dev": true, "dependencies": { "expect": "^29.0.0", @@ -4383,9 +4456,9 @@ "dev": true }, "node_modules/@types/jquery": { - "version": "3.5.16", - "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.16.tgz", - "integrity": "sha512-bsI7y4ZgeMkmpG9OM710RRzDFp+w4P1RGiIt30C1mSBT+ExCleeh4HObwgArnDFELmRrOpXgSYN9VF1hj+f1lw==", + "version": "3.5.29", + "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.29.tgz", + "integrity": "sha512-oXQQC9X9MOPRrMhPHHOsXqeQDnWeCDT3PelUIg/Oy8FAbzSZtFHRjc7IpbfFVmpLtJ+UOoywpRsuO5Jxjybyeg==", "dev": true, "dependencies": { "@types/sizzle": "*" @@ -4410,9 +4483,12 @@ "devOptional": true }, "node_modules/@types/node": { - "version": "17.0.45", - "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", - "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==" + "version": "20.10.4", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.10.4.tgz", + "integrity": "sha512-D08YG6rr8X90YB56tSIuBaddy/UXAA9RKJoFvrsnogAum/0pmjkgi4+2nx96A330FmioegBWmEYQ+syqCFaveg==", + "dependencies": { + "undici-types": "~5.26.4" + } }, "node_modules/@types/parse-json": { "version": "4.0.0", @@ -4451,9 +4527,9 @@ "devOptional": true }, "node_modules/@types/react": { - "version": "17.0.53", - "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.53.tgz", - "integrity": "sha512-1yIpQR2zdYu1Z/dc1OxC+MA6GR240u3gcnP4l6mvj/PJiVaqHsQPmWttsvHsfnhfPbU2FuGmo0wSITPygjBmsw==", + "version": "17.0.71", + "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.71.tgz", + "integrity": "sha512-lfqOu9mp16nmaGRrS8deS2Taqhd5Ih0o92Te5Ws6I1py4ytHBcXLqh0YIqVsViqwVI5f+haiFM6hju814BzcmA==", "dev": true, "dependencies": { "@types/prop-types": "*", @@ -4462,18 +4538,18 @@ } }, "node_modules/@types/react-copy-to-clipboard": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/@types/react-copy-to-clipboard/-/react-copy-to-clipboard-5.0.4.tgz", - "integrity": "sha512-otTJsJpofYAeaIeOwV5xBUGpo6exXG2HX7X4nseToCB2VgPEBxGBHCm/FecZ676doNR7HCSTVtmohxfG2b3/yQ==", + "version": "5.0.7", + "resolved": "https://registry.npmjs.org/@types/react-copy-to-clipboard/-/react-copy-to-clipboard-5.0.7.tgz", + "integrity": "sha512-Gft19D+as4M+9Whq1oglhmK49vqPhcLzk8WfvfLvaYMIPYanyfLy0+CwFucMJfdKoSFyySPmkkWn8/E6voQXjQ==", "dev": true, "dependencies": { "@types/react": "*" } }, "node_modules/@types/react-dom": { - "version": "17.0.20", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.20.tgz", - "integrity": "sha512-4pzIjSxDueZZ90F52mU3aPoogkHIoSIDG+oQ+wQK7Cy2B9S+MvOqY0uEA/qawKz381qrEDkvpwyt8Bm31I8sbA==", + "version": "17.0.25", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.25.tgz", + "integrity": "sha512-urx7A7UxkZQmThYA4So0NelOVjx3V4rNFVJwp0WZlbIK5eM4rNJDiN3R/E9ix0MBh6kAEojk/9YL+Te6D9zHNA==", "dev": true, "dependencies": { "@types/react": "^17" @@ -4525,9 +4601,9 @@ "dev": true }, "node_modules/@types/sanitize-html": { - "version": "2.9.0", - "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.9.0.tgz", - "integrity": "sha512-4fP/kEcKNj2u39IzrxWYuf/FnCCwwQCpif6wwY6ROUS1EPRIfWJjGkY3HIowY1EX/VbX5e86yq8AAE7UPMgATg==", + "version": "2.9.5", + "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.9.5.tgz", + "integrity": "sha512-2Sr1vd8Dw+ypsg/oDDfZ57OMSG2Befs+l2CMyCC5bVSK3CpE7lTB2aNlbbWzazgVA+Qqfuholwom6x/mWd1qmw==", "dev": true, "dependencies": { "htmlparser2": "^8.0.0" @@ -4578,9 +4654,9 @@ } }, "node_modules/@types/sinon": { - "version": "10.0.15", - "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.15.tgz", - "integrity": "sha512-3lrFNQG0Kr2LDzvjyjB6AMJk4ge+8iYhQfdnSwIwlG88FUOV43kPcQqDZkDa/h3WSZy6i8Fr0BSjfQtB1B3xuQ==", + "version": "10.0.20", + "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.20.tgz", + "integrity": "sha512-2APKKruFNCAZgx3daAyACGzWuJ028VVCUDk6o2rw/Z4PXT0ogwdV4KUegW0MwVs0Zu59auPXbbuBJHF12Sx1Eg==", "dev": true, "dependencies": { "@types/sinonjs__fake-timers": "*" @@ -7562,9 +7638,9 @@ "dev": true }, "node_modules/downshift": { - "version": "7.6.0", - "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.0.tgz", - "integrity": "sha512-VSoTVynTAsabou/hbZ6HJHUVhtBiVOjQoBsCPcQq5eAROIGP+9XKMp9asAKQ3cEcUP4oe0fFdD2pziUjhFY33Q==", + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.2.tgz", + "integrity": "sha512-iOv+E1Hyt3JDdL9yYcOgW7nZ7GQ2Uz6YbggwXvKUSleetYhU2nXD482Rz6CzvM4lvI1At34BYruKAL4swRGxaA==", "dependencies": { "@babel/runtime": "^7.14.8", "compute-scroll-into-view": "^2.0.4", @@ -7989,9 +8065,9 @@ } }, "node_modules/eslint-config-prettier": { - "version": "8.8.0", - "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.8.0.tgz", - "integrity": "sha512-wLbQiFre3tdGgpDv67NQKnJuTlcUVYHas3k+DZCc2U2BadthoEY4B7hLPvAxaqdyOGCzuLfii2fqGph10va7oA==", + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.10.0.tgz", + "integrity": "sha512-SM8AMJdeQqRYT9O9zguiruQZaN7+z+E4eAP9oiLNGKMtomwaB1E9dcgUD6ZAn/eQAb52USbvezbiljfZUhbJcg==", "dev": true, "bin": { "eslint-config-prettier": "bin/cli.js" @@ -9244,9 +9320,9 @@ "dev": true }, "node_modules/fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", "hasInstallScript": true, "optional": true, "os": [ @@ -10555,9 +10631,9 @@ } }, "node_modules/jest-canvas-mock": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.5.1.tgz", - "integrity": "sha512-IVnRiz+v4EYn3ydM/pBo8GW/J+nU/Hg5gHBQQOUQhdRyNfvHnabB8ReqARLO0p+kvQghqr4V0tA92CF3JcUSRg==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.5.2.tgz", + "integrity": "sha512-vgnpPupjOL6+L5oJXzxTxFrlGEIbHdZqFU+LFNdtLxZ3lRDCl17FlTMM7IatoRQkrcyOTMlDinjUguqmQ6bR2A==", "dev": true, "dependencies": { "cssfontparser": "^1.2.1", @@ -12841,9 +12917,9 @@ } }, "node_modules/jquery": { - "version": "3.7.0", - "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.0.tgz", - "integrity": "sha512-umpJ0/k8X0MvD1ds0P9SfowREz2LenHsQaxSohMZ5OMNEU2r0tf8pdeEFTHMFxWVxKNyU9rTtK3CWzUCTKJUeQ==" + "version": "3.7.1", + "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.1.tgz", + "integrity": "sha512-m4avr8yL8kmFN8psrbFFFmB/If14iN5o9nw/NgnnM+kybDJpRsAynV2BsfpTYrTRysYUdADVD7CkUUizgkpLfg==" }, "node_modules/jquery.flot.tooltip": { "version": "0.9.0", @@ -13646,14 +13722,13 @@ } }, "node_modules/nock": { - "version": "13.3.1", - "resolved": "https://registry.npmjs.org/nock/-/nock-13.3.1.tgz", - "integrity": "sha512-vHnopocZuI93p2ccivFyGuUfzjq2fxNyNurp7816mlT5V5HF4SzXu8lvLrVzBbNqzs+ODooZ6OksuSUNM7Njkw==", + "version": "13.4.0", + "resolved": "https://registry.npmjs.org/nock/-/nock-13.4.0.tgz", + "integrity": "sha512-W8NVHjO/LCTNA64yxAPHV/K47LpGYcVzgKd3Q0n6owhwvD0Dgoterc25R4rnZbckJEb6Loxz1f5QMuJpJnbSyQ==", "dev": true, "dependencies": { "debug": "^4.1.0", "json-stringify-safe": "^5.0.1", - "lodash": "^4.17.21", "propagate": "^2.0.0" }, "engines": { @@ -18065,9 +18140,9 @@ "dev": true }, "node_modules/sass": { - "version": "1.62.1", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.62.1.tgz", - "integrity": "sha512-NHpxIzN29MXvWiuswfc1W3I0N8SXBd8UR26WntmDlRYf0bSADnwnOjsyMZ3lMezSlArD33Vs3YFhp7dWvL770A==", + "version": "1.69.5", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.69.5.tgz", + "integrity": "sha512-qg2+UCJibLr2LCVOt3OlPhr/dqVHWOa9XtZf2OjbLs/T4VPSJ00udtgJxH3neXZm+QqX8B+3cU7RaLqp1iVfcQ==", "dependencies": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -18796,9 +18871,9 @@ } }, "node_modules/style-mod": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.0.0.tgz", - "integrity": "sha512-OPhtyEjyyN9x3nhPsu76f52yUGXiZcgvsrFVtvTkyGRQJ0XK+GPc6ov1z+lRpbeabka+MYEQxOYRnt5nF30aMw==" + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.0.tgz", + "integrity": "sha512-Ca5ib8HrFn+f+0n4N4ScTIA9iTOQ7MaGS1ylHcoVqW9J7w2w8PzN6g9gKmTYgGEBH8e120+RCmhpje6jC5uGWA==" }, "node_modules/stylehacks": { "version": "5.1.0", @@ -19384,9 +19459,9 @@ "dev": true }, "node_modules/ts-jest": { - "version": "29.1.0", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.0.tgz", - "integrity": "sha512-ZhNr7Z4PcYa+JjMl62ir+zPiNJfXJN6E8hSLnaUKhOgqcn8vb3e537cpkd0FuAfRK3sR1LSqM1MOhliXNgOFPA==", + "version": "29.1.1", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz", + "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==", "dev": true, "dependencies": { "bs-logger": "0.x", @@ -19395,7 +19470,7 @@ "json5": "^2.2.3", "lodash.memoize": "4.x", "make-error": "1.x", - "semver": "7.x", + "semver": "^7.5.3", "yargs-parser": "^21.0.1" }, "bin": { @@ -19605,6 +19680,11 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, "node_modules/unicode-canonical-property-names-ecmascript": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", @@ -20766,28 +20846,28 @@ "name": "@prometheus-io/app", "version": "0.48.0", "dependencies": { - "@codemirror/autocomplete": "^6.7.1", - "@codemirror/commands": "^6.2.4", - "@codemirror/language": "^6.7.0", - "@codemirror/lint": "^6.2.2", - "@codemirror/search": "^6.5.0", - "@codemirror/state": "^6.2.1", - "@codemirror/view": "^6.13.0", + "@codemirror/autocomplete": "^6.11.1", + "@codemirror/commands": "^6.3.2", + "@codemirror/language": "^6.9.3", + "@codemirror/lint": "^6.4.2", + "@codemirror/search": "^6.5.5", + "@codemirror/state": "^6.3.3", + "@codemirror/view": "^6.22.1", "@forevolve/bootstrap-dark": "^2.1.1", - "@fortawesome/fontawesome-svg-core": "6.4.0", - "@fortawesome/free-solid-svg-icons": "6.4.0", + "@fortawesome/fontawesome-svg-core": "6.5.1", + "@fortawesome/free-solid-svg-icons": "6.5.1", "@fortawesome/react-fontawesome": "0.2.0", - "@lezer/common": "^1.0.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6", + "@lezer/common": "^1.1.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", "@prometheus-io/codemirror-promql": "0.48.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", - "downshift": "^7.6.0", + "downshift": "^7.6.2", "http-proxy-middleware": "^2.0.6", - "jquery": "^3.7.0", + "jquery": "^3.7.1", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.29.4", "moment-timezone": "^0.5.43", @@ -20801,21 +20881,21 @@ "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", "sanitize-html": "^2.11.0", - "sass": "1.62.1", + "sass": "1.69.5", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" }, "devDependencies": { "@testing-library/react-hooks": "^7.0.2", - "@types/enzyme": "^3.10.13", - "@types/flot": "0.0.32", - "@types/jquery": "^3.5.16", - "@types/react": "^17.0.60", - "@types/react-copy-to-clipboard": "^5.0.4", - "@types/react-dom": "^17.0.20", + "@types/enzyme": "^3.10.18", + "@types/flot": "0.0.36", + "@types/jquery": "^3.5.29", + "@types/react": "^17.0.71", + "@types/react-copy-to-clipboard": "^5.0.7", + "@types/react-dom": "^17.0.25", "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.9.0", - "@types/sinon": "^10.0.15", + "@types/sanitize-html": "^2.9.5", + "@types/sinon": "^10.0.20", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "enzyme": "^3.11.0", "enzyme-to-json": "^3.6.2", @@ -20823,18 +20903,7 @@ "sinon": "^14.0.2" }, "optionalDependencies": { - "fsevents": "^2.3.2" - } - }, - "react-app/node_modules/@types/react": { - "version": "17.0.60", - "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.60.tgz", - "integrity": "sha512-pCH7bqWIfzHs3D+PDs3O/COCQJka+Kcw3RnO9rFA2zalqoXg7cNjJDh6mZ7oRtY1wmY4LVwDdAbA1F7Z8tv3BQ==", - "dev": true, - "dependencies": { - "@types/prop-types": "*", - "@types/scheduler": "*", - "csstype": "^3.0.2" + "fsevents": "^2.3.3" } } }, @@ -20850,12 +20919,71 @@ } }, "@babel/code-frame": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz", - "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "dev": true, "requires": { - "@babel/highlight": "^7.18.6" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true + }, + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } } }, "@babel/compat-data": { @@ -20907,13 +21035,14 @@ } }, "@babel/generator": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.19.3.tgz", - "integrity": "sha512-fqVZnmp1ncvZU757UzDheKZpfPgatqY59XtW2/j/18H7u76akb8xqvjw82f+i2UKd/ksYsSick/BCLQUUtJ/qQ==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.3.tgz", + "integrity": "sha512-keeZWAV4LU3tW0qRi19HRpabC/ilM0HRBBzf9/k8FFiG4KVpiv0FIy4hHfLfFQZNhziCTPTmd59zoyv6DNISzg==", "dev": true, "requires": { - "@babel/types": "^7.19.3", + "@babel/types": "^7.23.3", "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" }, "dependencies": { @@ -21001,9 +21130,9 @@ } }, "@babel/helper-environment-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", - "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", "dev": true }, "@babel/helper-explode-assignable-expression": { @@ -21016,22 +21145,22 @@ } }, "@babel/helper-function-name": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz", - "integrity": "sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", "dev": true, "requires": { - "@babel/template": "^7.18.10", - "@babel/types": "^7.19.0" + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" } }, "@babel/helper-hoist-variables": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", - "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", "dev": true, "requires": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" } }, "@babel/helper-member-expression-to-functions": { @@ -21127,24 +21256,24 @@ } }, "@babel/helper-split-export-declaration": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", - "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "dev": true, "requires": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" } }, "@babel/helper-string-parser": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.18.10.tgz", - "integrity": "sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", "dev": true }, "@babel/helper-validator-identifier": { - "version": "7.19.1", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", - "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", "dev": true }, "@babel/helper-validator-option": { @@ -21177,13 +21306,13 @@ } }, "@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", "dev": true, "requires": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, "dependencies": { @@ -21246,9 +21375,9 @@ } }, "@babel/parser": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.19.3.tgz", - "integrity": "sha512-pJ9xOlNWHiy9+FuFP09DEAFbAn4JskgRsVcc169w2xRBC3FRGuQEwjeIMMND9L2zc0iEhO/tGv4Zq+km+hxNpQ==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.3.tgz", + "integrity": "sha512-uVsWNvlVsIninV2prNz/3lHCb+5CJ+e+IUBfbjToAHODtfGYLfCFuY4AU7TskI+dAKk+njsPiBjq1gKTvZOBaw==", "dev": true }, "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { @@ -22182,30 +22311,30 @@ } }, "@babel/template": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz", - "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "dev": true, "requires": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.18.10", - "@babel/types": "^7.18.10" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" } }, "@babel/traverse": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.19.3.tgz", - "integrity": "sha512-qh5yf6149zhq2sgIXmwjnsvmnNQC2iw70UFjp4olxucKrWd/dvlUsBI88VSLUsnMNF7/vnOiA+nk1+yLoCqROQ==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.3.tgz", + "integrity": "sha512-+K0yF1/9yR0oHdE0StHuEj3uTPzwwbrLGfNOndVJVV2TqA5+j3oljJUb4nmB954FLGjNem976+B+eDuLIjesiQ==", "dev": true, "requires": { - "@babel/code-frame": "^7.18.6", - "@babel/generator": "^7.19.3", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.19.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.19.3", - "@babel/types": "^7.19.3", + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.3", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.3", + "@babel/types": "^7.23.3", "debug": "^4.1.0", "globals": "^11.1.0" }, @@ -22219,13 +22348,13 @@ } }, "@babel/types": { - "version": "7.19.3", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.19.3.tgz", - "integrity": "sha512-hGCaQzIY22DJlDh9CH7NOxgKkFjBk0Cw9xDO1Xmh2151ti7wiGfQ3LauXzL4HP1fmFlTX6XjpRETTpUcv7wQLw==", + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.3.tgz", + "integrity": "sha512-OZnvoH2l8PK5eUvEcUyCt/sXgr/h+UWpVuBbOljwcrAgUl6lpchoQ++PHGyQy1AtYnVA6CEq3y5xeEI10brpXw==", "dev": true, "requires": { - "@babel/helper-string-parser": "^7.18.10", - "@babel/helper-validator-identifier": "^7.19.1", + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" } }, @@ -22236,44 +22365,44 @@ "dev": true }, "@codemirror/autocomplete": { - "version": "6.7.1", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.7.1.tgz", - "integrity": "sha512-hSxf9S0uB+GV+gBsjY1FZNo53e1FFdzPceRfCfD1gWOnV6o21GfB5J5Wg9G/4h76XZMPrF0A6OCK/Rz5+V1egg==", + "version": "6.11.1", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.11.1.tgz", + "integrity": "sha512-L5UInv8Ffd6BPw0P3EF7JLYAMeEbclY7+6Q11REt8vhih8RuLreKtPy/xk8wPxs4EQgYqzI7cdgpiYwWlbS/ow==", "requires": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", - "@codemirror/view": "^6.6.0", + "@codemirror/view": "^6.17.0", "@lezer/common": "^1.0.0" } }, "@codemirror/commands": { - "version": "6.2.4", - "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.2.4.tgz", - "integrity": "sha512-42lmDqVH0ttfilLShReLXsDfASKLXzfyC36bzwcqzox9PlHulMcsUOfHXNo2X2aFMVNUoQ7j+d4q5bnfseYoOA==", + "version": "6.3.2", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.3.2.tgz", + "integrity": "sha512-tjoi4MCWDNxgIpoLZ7+tezdS9OEB6pkiDKhfKx9ReJ/XBcs2G2RXIu+/FxXBlWsPTsz6C9q/r4gjzrsxpcnqCQ==", "requires": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.2.0", "@codemirror/view": "^6.0.0", - "@lezer/common": "^1.0.0" + "@lezer/common": "^1.1.0" } }, "@codemirror/language": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.7.0.tgz", - "integrity": "sha512-4SMwe6Fwn57klCUsVN0y4/h/iWT+XIXFEmop2lIHHuWO0ubjCrF3suqSZLyOQlznxkNnNbOOfKe5HQbQGCAmTg==", + "version": "6.9.3", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.9.3.tgz", + "integrity": "sha512-qq48pYzoi6ldYWV/52+Z9Ou6QouVI+8YwvxFbUypI33NbjG2UeRHKENRyhwljTTiOqjQ33FjyZj6EREQ9apAOQ==", "requires": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", - "@lezer/common": "^1.0.0", + "@lezer/common": "^1.1.0", "@lezer/highlight": "^1.0.0", "@lezer/lr": "^1.0.0", "style-mod": "^4.0.0" } }, "@codemirror/lint": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.2.2.tgz", - "integrity": "sha512-kHGuynBHjqinp1Bx25D2hgH8a6Fh1m9rSmZFzBVTqPIXDIcZ6j3VI67DY8USGYpGrjrJys9R52eLxtfERGNozg==", + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.4.2.tgz", + "integrity": "sha512-wzRkluWb1ptPKdzlsrbwwjYCPLgzU6N88YBAmlZi8WFyuiEduSd05MnJYNogzyc8rPK7pj6m95ptUApc8sHKVA==", "requires": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -22281,9 +22410,9 @@ } }, "@codemirror/search": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.0.tgz", - "integrity": "sha512-64/M40YeJPToKvGO6p3fijo2vwUEj4nACEAXElCaYQ50HrXSvRaK+NHEhSh73WFBGdvIdhrV+lL9PdJy2RfCYA==", + "version": "6.5.5", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.5.tgz", + "integrity": "sha512-PIEN3Ke1buPod2EHbJsoQwlbpkz30qGZKcnmH1eihq9+bPQx8gelauUwLYaY4vBOuBAuEhmpDLii4rj/uO0yMA==", "requires": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -22291,17 +22420,17 @@ } }, "@codemirror/state": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.1.tgz", - "integrity": "sha512-RupHSZ8+OjNT38zU9fKH2sv+Dnlr8Eb8sl4NOnnqz95mCFTZUaiRP8Xv5MeeaG0px2b8Bnfe7YGwCV3nsBhbuw==" + "version": "6.3.3", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.3.3.tgz", + "integrity": "sha512-0wufKcTw2dEwEaADajjHf6hBy1sh3M6V0e+q4JKIhLuiMSe5td5HOWpUdvKth1fT1M9VYOboajoBHpkCd7PG7A==" }, "@codemirror/view": { - "version": "6.13.0", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.13.0.tgz", - "integrity": "sha512-oXTfJzHJ5Tl7f6T8ZO0HKf981zubxgKohjddLobbntbNZHlOZGMRL+pPZGtclDWFaFJWtGBYRGyNdjQ6Xsx5yA==", + "version": "6.22.1", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.22.1.tgz", + "integrity": "sha512-38BRn1nPqZqiHbmWfI8zri23IbRVbmSpSmh1E/Ysvc+lIGGdBC17K8zlK7ZU6fhfy9x4De9Zyj5JQqScPq5DkA==", "requires": { "@codemirror/state": "^6.1.4", - "style-mod": "^4.0.0", + "style-mod": "^4.1.0", "w3c-keyname": "^2.2.4" } }, @@ -22475,24 +22604,24 @@ } }, "@fortawesome/fontawesome-common-types": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.4.0.tgz", - "integrity": "sha512-HNii132xfomg5QVZw0HwXXpN22s7VBHQBv9CeOu9tfJnhsWQNd2lmTNi8CSrnw5B+5YOmzu1UoPAyxaXsJ6RgQ==" + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.5.1.tgz", + "integrity": "sha512-GkWzv+L6d2bI5f/Vk6ikJ9xtl7dfXtoRu3YGE6nq0p/FFqA1ebMOAWg3XgRyb0I6LYyYkiAo+3/KrwuBp8xG7A==" }, "@fortawesome/fontawesome-svg-core": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.4.0.tgz", - "integrity": "sha512-Bertv8xOiVELz5raB2FlXDPKt+m94MQ3JgDfsVbrqNpLU9+UE2E18GKjLKw+d3XbeYPqg1pzyQKGsrzbw+pPaw==", + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.5.1.tgz", + "integrity": "sha512-MfRCYlQPXoLlpem+egxjfkEuP9UQswTrlCOsknus/NcMoblTH2g0jPrapbcIb04KGA7E2GZxbAccGZfWoYgsrQ==", "requires": { - "@fortawesome/fontawesome-common-types": "6.4.0" + "@fortawesome/fontawesome-common-types": "6.5.1" } }, "@fortawesome/free-solid-svg-icons": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.4.0.tgz", - "integrity": "sha512-kutPeRGWm8V5dltFP1zGjQOEAzaLZj4StdQhWVZnfGFCvAPVvHh8qk5bRrU4KXnRRRNni5tKQI9PBAdI6MP8nQ==", + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.5.1.tgz", + "integrity": "sha512-S1PPfU3mIJa59biTtXJz1oI0+KAXW6bkAb31XKhxdxtuXDiUIFsih4JR1v5BbxY7hVHsD1RKq+jRkVRaf773NQ==", "requires": { - "@fortawesome/fontawesome-common-types": "6.4.0" + "@fortawesome/fontawesome-common-types": "6.5.1" } }, "@fortawesome/react-fontawesome": { @@ -23282,14 +23411,14 @@ "dev": true }, "@lezer/common": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.3.tgz", - "integrity": "sha512-JH4wAXCgUOcCGNekQPLhVeUtIqjH0yPBs7vvUdSjyQama9618IOKFJwkv2kcqdhF0my8hQEgCTEJU0GIgnahvA==" + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.1.1.tgz", + "integrity": "sha512-aAPB9YbvZHqAW+bIwiuuTDGB4DG0sYNRObGLxud8cW7osw1ZQxfDuTZ8KQiqfZ0QJGcR34CvpTMDXEyo/+Htgg==" }, "@lezer/generator": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.2.3.tgz", - "integrity": "sha512-xRmNryYbJpWs7novjWtQLCGHOj71B4X1QHQ4SgJqwm11tl6COEVAGhuFTXKX16JMJUhumdXaX8We6hEMd4clDg==", + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.5.1.tgz", + "integrity": "sha512-vodJv2JPwsFsiBBHE463yBhvUI9TmhIu5duF/8MH304xNS6FyWH/vTyG61pjhERm5f+VBP94co0eiN+afWcvXw==", "dev": true, "requires": { "@lezer/common": "^1.0.2", @@ -23297,17 +23426,17 @@ } }, "@lezer/highlight": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.6.tgz", - "integrity": "sha512-cmSJYa2us+r3SePpRCjN5ymCqCPv+zyXmDl0ciWtVaNiORT/MxM7ZgOMQZADD0o51qOaOg24qc/zBViOIwAjJg==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.0.tgz", + "integrity": "sha512-WrS5Mw51sGrpqjlh3d4/fOwpEV2Hd3YOkp9DBt4k8XZQcoTHZFB7sx030A6OcahF4J1nDQAa3jXlTVVYH50IFA==", "requires": { "@lezer/common": "^1.0.0" } }, "@lezer/lr": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.6.tgz", - "integrity": "sha512-IDhcWjfxwWACnatUi0GzWBCbochfqxo3LZZlS27LbJh8RVYYXXyR5Ck9659IhkWkhSW/kZlaaiJpUO+YZTUK+Q==", + "version": "1.3.14", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.14.tgz", + "integrity": "sha512-z5mY4LStlA3yL7aHT/rqgG614cfcvklS+8oFRFBYrs4YaWLJyKKM4+nN6KopToX0o9Hj6zmH6M5kinOYuy06ug==", "requires": { "@lezer/common": "^1.0.0" } @@ -23406,42 +23535,42 @@ "@prometheus-io/app": { "version": "file:react-app", "requires": { - "@codemirror/autocomplete": "^6.7.1", - "@codemirror/commands": "^6.2.4", - "@codemirror/language": "^6.7.0", - "@codemirror/lint": "^6.2.2", - "@codemirror/search": "^6.5.0", - "@codemirror/state": "^6.2.1", - "@codemirror/view": "^6.13.0", + "@codemirror/autocomplete": "^6.11.1", + "@codemirror/commands": "^6.3.2", + "@codemirror/language": "^6.9.3", + "@codemirror/lint": "^6.4.2", + "@codemirror/search": "^6.5.5", + "@codemirror/state": "^6.3.3", + "@codemirror/view": "^6.22.1", "@forevolve/bootstrap-dark": "^2.1.1", - "@fortawesome/fontawesome-svg-core": "6.4.0", - "@fortawesome/free-solid-svg-icons": "6.4.0", + "@fortawesome/fontawesome-svg-core": "6.5.1", + "@fortawesome/free-solid-svg-icons": "6.5.1", "@fortawesome/react-fontawesome": "0.2.0", - "@lezer/common": "^1.0.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6", + "@lezer/common": "^1.1.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", "@prometheus-io/codemirror-promql": "0.48.0", "@testing-library/react-hooks": "^7.0.2", - "@types/enzyme": "^3.10.13", - "@types/flot": "0.0.32", - "@types/jquery": "^3.5.16", - "@types/react": "^17.0.60", - "@types/react-copy-to-clipboard": "^5.0.4", - "@types/react-dom": "^17.0.20", + "@types/enzyme": "^3.10.18", + "@types/flot": "0.0.36", + "@types/jquery": "^3.5.29", + "@types/react": "^17.0.71", + "@types/react-copy-to-clipboard": "^5.0.7", + "@types/react-dom": "^17.0.25", "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.9.0", - "@types/sinon": "^10.0.15", + "@types/sanitize-html": "^2.9.5", + "@types/sinon": "^10.0.20", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", - "downshift": "^7.6.0", + "downshift": "^7.6.2", "enzyme": "^3.11.0", "enzyme-to-json": "^3.6.2", - "fsevents": "^2.3.2", + "fsevents": "^2.3.3", "http-proxy-middleware": "^2.0.6", - "jquery": "^3.7.0", + "jquery": "^3.7.1", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.29.4", "moment-timezone": "^0.5.43", @@ -23456,40 +23585,27 @@ "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", "sanitize-html": "^2.11.0", - "sass": "1.62.1", + "sass": "1.69.5", "sinon": "^14.0.2", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" - }, - "dependencies": { - "@types/react": { - "version": "17.0.60", - "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.60.tgz", - "integrity": "sha512-pCH7bqWIfzHs3D+PDs3O/COCQJka+Kcw3RnO9rFA2zalqoXg7cNjJDh6mZ7oRtY1wmY4LVwDdAbA1F7Z8tv3BQ==", - "dev": true, - "requires": { - "@types/prop-types": "*", - "@types/scheduler": "*", - "csstype": "^3.0.2" - } - } } }, "@prometheus-io/codemirror-promql": { "version": "file:module/codemirror-promql", "requires": { - "@codemirror/autocomplete": "^6.7.1", - "@codemirror/language": "^6.7.0", - "@codemirror/lint": "^6.2.2", - "@codemirror/state": "^6.2.1", - "@codemirror/view": "^6.13.0", - "@lezer/common": "^1.0.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6", + "@codemirror/autocomplete": "^6.11.1", + "@codemirror/language": "^6.9.3", + "@codemirror/lint": "^6.4.2", + "@codemirror/state": "^6.3.3", + "@codemirror/view": "^6.22.1", + "@lezer/common": "^1.1.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14", "@prometheus-io/lezer-promql": "0.48.0", "isomorphic-fetch": "^3.0.0", "lru-cache": "^7.18.3", - "nock": "^13.3.1" + "nock": "^13.4.0" }, "dependencies": { "lru-cache": { @@ -23502,9 +23618,9 @@ "@prometheus-io/lezer-promql": { "version": "file:module/lezer-promql", "requires": { - "@lezer/generator": "^1.2.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6" + "@lezer/generator": "^1.5.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14" } }, "@rollup/plugin-babel": { @@ -23875,9 +23991,9 @@ } }, "@types/enzyme": { - "version": "3.10.13", - "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.13.tgz", - "integrity": "sha512-FCtoUhmFsud0Yx9fmZk179GkdZ4U9B0GFte64/Md+W/agx0L5SxsIIbhLBOxIb9y2UfBA4WQnaG1Od/UsUQs9Q==", + "version": "3.10.18", + "resolved": "https://registry.npmjs.org/@types/enzyme/-/enzyme-3.10.18.tgz", + "integrity": "sha512-RaO/TyyHZvXkpzinbMTZmd/S5biU4zxkvDsn22ujC29t9FMSzq8tnn8f2MxQ2P8GVhFRG5jTAL05DXKyTtpEQQ==", "dev": true, "requires": { "@types/cheerio": "*", @@ -23947,9 +24063,9 @@ } }, "@types/flot": { - "version": "0.0.32", - "resolved": "https://registry.npmjs.org/@types/flot/-/flot-0.0.32.tgz", - "integrity": "sha512-aturel4TWMY86N4Pkpc9pSoUd/p8c3BjGj4fTDkaZIpkRPzLH1VXZCAKGUywcFkTqgZMhPJFPWxd4pl87y8h/w==", + "version": "0.0.36", + "resolved": "https://registry.npmjs.org/@types/flot/-/flot-0.0.36.tgz", + "integrity": "sha512-xRo4MUIMnRPGXJCuQXAWvo+uKRmziRGHAy9LQHsLgbKanknpe5z3EThqVuYkVCC6ZWPZ/8pllBXnzQmGzFkJ/Q==", "dev": true, "requires": { "@types/jquery": "*" @@ -24009,9 +24125,9 @@ } }, "@types/jest": { - "version": "29.5.2", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.2.tgz", - "integrity": "sha512-mSoZVJF5YzGVCk+FsDxzDuH7s+SCkzrgKZzf0Z0T2WudhBUPoF6ktoTPC4R0ZoCPCV5xUvuU6ias5NvxcBcMMg==", + "version": "29.5.11", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.11.tgz", + "integrity": "sha512-S2mHmYIVe13vrm6q4kN6fLYYAka15ALQki/vgDC3mIukEOx8WJlv0kQPM+d4w8Gp6u0uSdKND04IlTXBv0rwnQ==", "dev": true, "requires": { "expect": "^29.0.0", @@ -24044,9 +24160,9 @@ } }, "@types/jquery": { - "version": "3.5.16", - "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.16.tgz", - "integrity": "sha512-bsI7y4ZgeMkmpG9OM710RRzDFp+w4P1RGiIt30C1mSBT+ExCleeh4HObwgArnDFELmRrOpXgSYN9VF1hj+f1lw==", + "version": "3.5.29", + "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.29.tgz", + "integrity": "sha512-oXQQC9X9MOPRrMhPHHOsXqeQDnWeCDT3PelUIg/Oy8FAbzSZtFHRjc7IpbfFVmpLtJ+UOoywpRsuO5Jxjybyeg==", "dev": true, "requires": { "@types/sizzle": "*" @@ -24071,9 +24187,12 @@ "devOptional": true }, "@types/node": { - "version": "17.0.45", - "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", - "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==" + "version": "20.10.4", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.10.4.tgz", + "integrity": "sha512-D08YG6rr8X90YB56tSIuBaddy/UXAA9RKJoFvrsnogAum/0pmjkgi4+2nx96A330FmioegBWmEYQ+syqCFaveg==", + "requires": { + "undici-types": "~5.26.4" + } }, "@types/parse-json": { "version": "4.0.0", @@ -24112,9 +24231,9 @@ "devOptional": true }, "@types/react": { - "version": "17.0.53", - "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.53.tgz", - "integrity": "sha512-1yIpQR2zdYu1Z/dc1OxC+MA6GR240u3gcnP4l6mvj/PJiVaqHsQPmWttsvHsfnhfPbU2FuGmo0wSITPygjBmsw==", + "version": "17.0.71", + "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.71.tgz", + "integrity": "sha512-lfqOu9mp16nmaGRrS8deS2Taqhd5Ih0o92Te5Ws6I1py4ytHBcXLqh0YIqVsViqwVI5f+haiFM6hju814BzcmA==", "dev": true, "requires": { "@types/prop-types": "*", @@ -24123,18 +24242,18 @@ } }, "@types/react-copy-to-clipboard": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/@types/react-copy-to-clipboard/-/react-copy-to-clipboard-5.0.4.tgz", - "integrity": "sha512-otTJsJpofYAeaIeOwV5xBUGpo6exXG2HX7X4nseToCB2VgPEBxGBHCm/FecZ676doNR7HCSTVtmohxfG2b3/yQ==", + "version": "5.0.7", + "resolved": "https://registry.npmjs.org/@types/react-copy-to-clipboard/-/react-copy-to-clipboard-5.0.7.tgz", + "integrity": "sha512-Gft19D+as4M+9Whq1oglhmK49vqPhcLzk8WfvfLvaYMIPYanyfLy0+CwFucMJfdKoSFyySPmkkWn8/E6voQXjQ==", "dev": true, "requires": { "@types/react": "*" } }, "@types/react-dom": { - "version": "17.0.20", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.20.tgz", - "integrity": "sha512-4pzIjSxDueZZ90F52mU3aPoogkHIoSIDG+oQ+wQK7Cy2B9S+MvOqY0uEA/qawKz381qrEDkvpwyt8Bm31I8sbA==", + "version": "17.0.25", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.25.tgz", + "integrity": "sha512-urx7A7UxkZQmThYA4So0NelOVjx3V4rNFVJwp0WZlbIK5eM4rNJDiN3R/E9ix0MBh6kAEojk/9YL+Te6D9zHNA==", "dev": true, "requires": { "@types/react": "^17" @@ -24186,9 +24305,9 @@ "dev": true }, "@types/sanitize-html": { - "version": "2.9.0", - "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.9.0.tgz", - "integrity": "sha512-4fP/kEcKNj2u39IzrxWYuf/FnCCwwQCpif6wwY6ROUS1EPRIfWJjGkY3HIowY1EX/VbX5e86yq8AAE7UPMgATg==", + "version": "2.9.5", + "resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.9.5.tgz", + "integrity": "sha512-2Sr1vd8Dw+ypsg/oDDfZ57OMSG2Befs+l2CMyCC5bVSK3CpE7lTB2aNlbbWzazgVA+Qqfuholwom6x/mWd1qmw==", "dev": true, "requires": { "htmlparser2": "^8.0.0" @@ -24234,9 +24353,9 @@ } }, "@types/sinon": { - "version": "10.0.15", - "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.15.tgz", - "integrity": "sha512-3lrFNQG0Kr2LDzvjyjB6AMJk4ge+8iYhQfdnSwIwlG88FUOV43kPcQqDZkDa/h3WSZy6i8Fr0BSjfQtB1B3xuQ==", + "version": "10.0.20", + "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.20.tgz", + "integrity": "sha512-2APKKruFNCAZgx3daAyACGzWuJ028VVCUDk6o2rw/Z4PXT0ogwdV4KUegW0MwVs0Zu59auPXbbuBJHF12Sx1Eg==", "dev": true, "requires": { "@types/sinonjs__fake-timers": "*" @@ -26472,9 +26591,9 @@ "dev": true }, "downshift": { - "version": "7.6.0", - "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.0.tgz", - "integrity": "sha512-VSoTVynTAsabou/hbZ6HJHUVhtBiVOjQoBsCPcQq5eAROIGP+9XKMp9asAKQ3cEcUP4oe0fFdD2pziUjhFY33Q==", + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.2.tgz", + "integrity": "sha512-iOv+E1Hyt3JDdL9yYcOgW7nZ7GQ2Uz6YbggwXvKUSleetYhU2nXD482Rz6CzvM4lvI1At34BYruKAL4swRGxaA==", "requires": { "@babel/runtime": "^7.14.8", "compute-scroll-into-view": "^2.0.4", @@ -26810,9 +26929,9 @@ } }, "eslint-config-prettier": { - "version": "8.8.0", - "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.8.0.tgz", - "integrity": "sha512-wLbQiFre3tdGgpDv67NQKnJuTlcUVYHas3k+DZCc2U2BadthoEY4B7hLPvAxaqdyOGCzuLfii2fqGph10va7oA==", + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.10.0.tgz", + "integrity": "sha512-SM8AMJdeQqRYT9O9zguiruQZaN7+z+E4eAP9oiLNGKMtomwaB1E9dcgUD6ZAn/eQAb52USbvezbiljfZUhbJcg==", "dev": true, "requires": {} }, @@ -27735,9 +27854,9 @@ "dev": true }, "fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", "optional": true }, "function-bind": { @@ -28681,9 +28800,9 @@ } }, "jest-canvas-mock": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.5.1.tgz", - "integrity": "sha512-IVnRiz+v4EYn3ydM/pBo8GW/J+nU/Hg5gHBQQOUQhdRyNfvHnabB8ReqARLO0p+kvQghqr4V0tA92CF3JcUSRg==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jest-canvas-mock/-/jest-canvas-mock-2.5.2.tgz", + "integrity": "sha512-vgnpPupjOL6+L5oJXzxTxFrlGEIbHdZqFU+LFNdtLxZ3lRDCl17FlTMM7IatoRQkrcyOTMlDinjUguqmQ6bR2A==", "dev": true, "requires": { "cssfontparser": "^1.2.1", @@ -30529,9 +30648,9 @@ } }, "jquery": { - "version": "3.7.0", - "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.0.tgz", - "integrity": "sha512-umpJ0/k8X0MvD1ds0P9SfowREz2LenHsQaxSohMZ5OMNEU2r0tf8pdeEFTHMFxWVxKNyU9rTtK3CWzUCTKJUeQ==" + "version": "3.7.1", + "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.1.tgz", + "integrity": "sha512-m4avr8yL8kmFN8psrbFFFmB/If14iN5o9nw/NgnnM+kybDJpRsAynV2BsfpTYrTRysYUdADVD7CkUUizgkpLfg==" }, "jquery.flot.tooltip": { "version": "0.9.0", @@ -31178,14 +31297,13 @@ } }, "nock": { - "version": "13.3.1", - "resolved": "https://registry.npmjs.org/nock/-/nock-13.3.1.tgz", - "integrity": "sha512-vHnopocZuI93p2ccivFyGuUfzjq2fxNyNurp7816mlT5V5HF4SzXu8lvLrVzBbNqzs+ODooZ6OksuSUNM7Njkw==", + "version": "13.4.0", + "resolved": "https://registry.npmjs.org/nock/-/nock-13.4.0.tgz", + "integrity": "sha512-W8NVHjO/LCTNA64yxAPHV/K47LpGYcVzgKd3Q0n6owhwvD0Dgoterc25R4rnZbckJEb6Loxz1f5QMuJpJnbSyQ==", "dev": true, "requires": { "debug": "^4.1.0", "json-stringify-safe": "^5.0.1", - "lodash": "^4.17.21", "propagate": "^2.0.0" } }, @@ -34329,9 +34447,9 @@ "dev": true }, "sass": { - "version": "1.62.1", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.62.1.tgz", - "integrity": "sha512-NHpxIzN29MXvWiuswfc1W3I0N8SXBd8UR26WntmDlRYf0bSADnwnOjsyMZ3lMezSlArD33Vs3YFhp7dWvL770A==", + "version": "1.69.5", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.69.5.tgz", + "integrity": "sha512-qg2+UCJibLr2LCVOt3OlPhr/dqVHWOa9XtZf2OjbLs/T4VPSJ00udtgJxH3neXZm+QqX8B+3cU7RaLqp1iVfcQ==", "requires": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -34896,9 +35014,9 @@ "requires": {} }, "style-mod": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.0.0.tgz", - "integrity": "sha512-OPhtyEjyyN9x3nhPsu76f52yUGXiZcgvsrFVtvTkyGRQJ0XK+GPc6ov1z+lRpbeabka+MYEQxOYRnt5nF30aMw==" + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.0.tgz", + "integrity": "sha512-Ca5ib8HrFn+f+0n4N4ScTIA9iTOQ7MaGS1ylHcoVqW9J7w2w8PzN6g9gKmTYgGEBH8e120+RCmhpje6jC5uGWA==" }, "stylehacks": { "version": "5.1.0", @@ -35344,9 +35462,9 @@ "dev": true }, "ts-jest": { - "version": "29.1.0", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.0.tgz", - "integrity": "sha512-ZhNr7Z4PcYa+JjMl62ir+zPiNJfXJN6E8hSLnaUKhOgqcn8vb3e537cpkd0FuAfRK3sR1LSqM1MOhliXNgOFPA==", + "version": "29.1.1", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz", + "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==", "dev": true, "requires": { "bs-logger": "0.x", @@ -35355,7 +35473,7 @@ "json5": "^2.2.3", "lodash.memoize": "4.x", "make-error": "1.x", - "semver": "7.x", + "semver": "^7.5.3", "yargs-parser": "^21.0.1" }, "dependencies": { @@ -35498,6 +35616,11 @@ "which-boxed-primitive": "^1.0.2" } }, + "undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, "unicode-canonical-property-names-ecmascript": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", diff --git a/web/ui/package.json b/web/ui/package.json index acdef8236..7212ccacb 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -16,16 +16,16 @@ "npm": ">=7.0.0" }, "devDependencies": { - "@types/jest": "^29.5.2", - "@types/node": "^17.0.45", - "eslint-config-prettier": "^8.8.0", + "@types/jest": "^29.5.11", + "@types/node": "^20.10.4", + "eslint-config-prettier": "^8.10.0", "eslint-config-react-app": "^7.0.1", "eslint-plugin-prettier": "^4.2.1", - "jest-canvas-mock": "^2.5.1", + "jest-canvas-mock": "^2.5.2", "jest-fetch-mock": "^3.0.3", "prettier": "^2.8.8", "react-scripts": "^5.0.1", - "ts-jest": "^29.1.0", + "ts-jest": "^29.1.1", "typescript": "^4.9.5" }, "version": "0.48.0" diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index c25cd52b2..46deeefc0 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -3,28 +3,28 @@ "version": "0.48.0", "private": true, "dependencies": { - "@codemirror/autocomplete": "^6.7.1", - "@codemirror/commands": "^6.2.4", - "@codemirror/language": "^6.7.0", - "@codemirror/lint": "^6.2.2", - "@codemirror/search": "^6.5.0", - "@codemirror/state": "^6.2.1", - "@codemirror/view": "^6.13.0", + "@codemirror/autocomplete": "^6.11.1", + "@codemirror/commands": "^6.3.2", + "@codemirror/language": "^6.9.3", + "@codemirror/lint": "^6.4.2", + "@codemirror/search": "^6.5.5", + "@codemirror/state": "^6.3.3", + "@codemirror/view": "^6.22.1", "@forevolve/bootstrap-dark": "^2.1.1", - "@fortawesome/fontawesome-svg-core": "6.4.0", - "@fortawesome/free-solid-svg-icons": "6.4.0", + "@fortawesome/fontawesome-svg-core": "6.5.1", + "@fortawesome/free-solid-svg-icons": "6.5.1", "@fortawesome/react-fontawesome": "0.2.0", - "@lezer/common": "^1.0.3", - "@lezer/highlight": "^1.1.6", - "@lezer/lr": "^1.3.6", + "@lezer/common": "^1.1.1", + "@lezer/highlight": "^1.2.0", + "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", "@prometheus-io/codemirror-promql": "0.48.0", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", - "downshift": "^7.6.0", + "downshift": "^7.6.2", "http-proxy-middleware": "^2.0.6", - "jquery": "^3.7.0", + "jquery": "^3.7.1", "jquery.flot.tooltip": "^0.9.0", "moment": "^2.29.4", "moment-timezone": "^0.5.43", @@ -38,7 +38,7 @@ "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", "sanitize-html": "^2.11.0", - "sass": "1.62.1", + "sass": "1.69.5", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" }, @@ -66,15 +66,15 @@ ], "devDependencies": { "@testing-library/react-hooks": "^7.0.2", - "@types/enzyme": "^3.10.13", - "@types/flot": "0.0.32", - "@types/jquery": "^3.5.16", - "@types/react": "^17.0.60", - "@types/react-copy-to-clipboard": "^5.0.4", - "@types/react-dom": "^17.0.20", + "@types/enzyme": "^3.10.18", + "@types/flot": "0.0.36", + "@types/jquery": "^3.5.29", + "@types/react": "^17.0.71", + "@types/react-copy-to-clipboard": "^5.0.7", + "@types/react-dom": "^17.0.25", "@types/react-router-dom": "^5.3.3", - "@types/sanitize-html": "^2.9.0", - "@types/sinon": "^10.0.15", + "@types/sanitize-html": "^2.9.5", + "@types/sinon": "^10.0.20", "@wojtekmaj/enzyme-adapter-react-17": "^0.8.0", "enzyme": "^3.11.0", "enzyme-to-json": "^3.6.2", @@ -94,6 +94,6 @@ } }, "optionalDependencies": { - "fsevents": "^2.3.2" + "fsevents": "^2.3.3" } }